1*a1e26a70SApple OSS Distributions /*
2*a1e26a70SApple OSS Distributions * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3*a1e26a70SApple OSS Distributions *
4*a1e26a70SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*a1e26a70SApple OSS Distributions *
6*a1e26a70SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*a1e26a70SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*a1e26a70SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*a1e26a70SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*a1e26a70SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*a1e26a70SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*a1e26a70SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*a1e26a70SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*a1e26a70SApple OSS Distributions *
15*a1e26a70SApple OSS Distributions * Please obtain a copy of the License at
16*a1e26a70SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*a1e26a70SApple OSS Distributions *
18*a1e26a70SApple OSS Distributions * The Original Code and all software distributed under the License are
19*a1e26a70SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*a1e26a70SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*a1e26a70SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*a1e26a70SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*a1e26a70SApple OSS Distributions * Please see the License for the specific language governing rights and
24*a1e26a70SApple OSS Distributions * limitations under the License.
25*a1e26a70SApple OSS Distributions *
26*a1e26a70SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*a1e26a70SApple OSS Distributions */
28*a1e26a70SApple OSS Distributions /* Copyright (c) 1995-2018 Apple, Inc. All Rights Reserved */
29*a1e26a70SApple OSS Distributions
30*a1e26a70SApple OSS Distributions #include <sys/cdefs.h>
31*a1e26a70SApple OSS Distributions
32*a1e26a70SApple OSS Distributions #include <kern/assert.h>
33*a1e26a70SApple OSS Distributions #include <kern/ast.h>
34*a1e26a70SApple OSS Distributions #include <kern/clock.h>
35*a1e26a70SApple OSS Distributions #include <kern/cpu_data.h>
36*a1e26a70SApple OSS Distributions #include <kern/kern_types.h>
37*a1e26a70SApple OSS Distributions #include <kern/policy_internal.h>
38*a1e26a70SApple OSS Distributions #include <kern/processor.h>
39*a1e26a70SApple OSS Distributions #include <kern/sched_prim.h> /* for thread_exception_return */
40*a1e26a70SApple OSS Distributions #include <kern/task.h>
41*a1e26a70SApple OSS Distributions #include <kern/thread.h>
42*a1e26a70SApple OSS Distributions #include <kern/thread_group.h>
43*a1e26a70SApple OSS Distributions #include <kern/zalloc.h>
44*a1e26a70SApple OSS Distributions #include <kern/work_interval.h>
45*a1e26a70SApple OSS Distributions #include <mach/kern_return.h>
46*a1e26a70SApple OSS Distributions #include <mach/mach_param.h>
47*a1e26a70SApple OSS Distributions #include <mach/mach_port.h>
48*a1e26a70SApple OSS Distributions #include <mach/mach_types.h>
49*a1e26a70SApple OSS Distributions #include <mach/mach_vm.h>
50*a1e26a70SApple OSS Distributions #include <mach/sync_policy.h>
51*a1e26a70SApple OSS Distributions #include <mach/task.h>
52*a1e26a70SApple OSS Distributions #include <mach/thread_act.h> /* for thread_resume */
53*a1e26a70SApple OSS Distributions #include <mach/thread_policy.h>
54*a1e26a70SApple OSS Distributions #include <mach/thread_status.h>
55*a1e26a70SApple OSS Distributions #include <mach/vm_prot.h>
56*a1e26a70SApple OSS Distributions #include <mach/vm_statistics.h>
57*a1e26a70SApple OSS Distributions #include <machine/atomic.h>
58*a1e26a70SApple OSS Distributions #include <machine/machine_routines.h>
59*a1e26a70SApple OSS Distributions #include <machine/smp.h>
60*a1e26a70SApple OSS Distributions #include <vm/vm_map.h>
61*a1e26a70SApple OSS Distributions #include <vm/vm_fault_xnu.h>
62*a1e26a70SApple OSS Distributions #include <vm/vm_protos.h>
63*a1e26a70SApple OSS Distributions
64*a1e26a70SApple OSS Distributions #include <sys/eventvar.h>
65*a1e26a70SApple OSS Distributions #include <sys/kdebug.h>
66*a1e26a70SApple OSS Distributions #include <sys/kernel.h>
67*a1e26a70SApple OSS Distributions #include <sys/lock.h>
68*a1e26a70SApple OSS Distributions #include <sys/param.h>
69*a1e26a70SApple OSS Distributions #include <sys/proc_info.h> /* for fill_procworkqueue */
70*a1e26a70SApple OSS Distributions #include <sys/proc_internal.h>
71*a1e26a70SApple OSS Distributions #include <sys/pthread_shims.h>
72*a1e26a70SApple OSS Distributions #include <sys/resourcevar.h>
73*a1e26a70SApple OSS Distributions #include <sys/signalvar.h>
74*a1e26a70SApple OSS Distributions #include <sys/sysctl.h>
75*a1e26a70SApple OSS Distributions #include <sys/sysproto.h>
76*a1e26a70SApple OSS Distributions #include <sys/systm.h>
77*a1e26a70SApple OSS Distributions #include <sys/ulock.h> /* for ulock_owner_value_to_port_name */
78*a1e26a70SApple OSS Distributions
79*a1e26a70SApple OSS Distributions #include <pthread/bsdthread_private.h>
80*a1e26a70SApple OSS Distributions #include <pthread/workqueue_syscalls.h>
81*a1e26a70SApple OSS Distributions #include <pthread/workqueue_internal.h>
82*a1e26a70SApple OSS Distributions #include <pthread/workqueue_trace.h>
83*a1e26a70SApple OSS Distributions
84*a1e26a70SApple OSS Distributions #include <os/log.h>
85*a1e26a70SApple OSS Distributions
86*a1e26a70SApple OSS Distributions static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2;
87*a1e26a70SApple OSS Distributions
88*a1e26a70SApple OSS Distributions static void workq_bound_thread_unpark_continue(void *uth, wait_result_t wr) __dead2;
89*a1e26a70SApple OSS Distributions
90*a1e26a70SApple OSS Distributions static void workq_bound_thread_initialize_and_unpark_continue(void *uth, wait_result_t wr) __dead2;
91*a1e26a70SApple OSS Distributions
92*a1e26a70SApple OSS Distributions static void workq_bound_thread_setup_and_run(struct uthread *uth, int setup_flags) __dead2;
93*a1e26a70SApple OSS Distributions
94*a1e26a70SApple OSS Distributions static void workq_schedule_creator(proc_t p, struct workqueue *wq,
95*a1e26a70SApple OSS Distributions workq_kern_threadreq_flags_t flags);
96*a1e26a70SApple OSS Distributions
97*a1e26a70SApple OSS Distributions static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
98*a1e26a70SApple OSS Distributions workq_threadreq_t req);
99*a1e26a70SApple OSS Distributions
100*a1e26a70SApple OSS Distributions static uint32_t workq_constrained_allowance(struct workqueue *wq,
101*a1e26a70SApple OSS Distributions thread_qos_t at_qos, struct uthread *uth,
102*a1e26a70SApple OSS Distributions bool may_start_timer, bool record_failed_allowance);
103*a1e26a70SApple OSS Distributions
104*a1e26a70SApple OSS Distributions static bool _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq);
105*a1e26a70SApple OSS Distributions
106*a1e26a70SApple OSS Distributions static bool workq_thread_is_busy(uint64_t cur_ts,
107*a1e26a70SApple OSS Distributions _Atomic uint64_t *lastblocked_tsp);
108*a1e26a70SApple OSS Distributions
109*a1e26a70SApple OSS Distributions static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS;
110*a1e26a70SApple OSS Distributions
111*a1e26a70SApple OSS Distributions static bool
112*a1e26a70SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags);
113*a1e26a70SApple OSS Distributions
114*a1e26a70SApple OSS Distributions static inline void
115*a1e26a70SApple OSS Distributions workq_lock_spin(struct workqueue *wq);
116*a1e26a70SApple OSS Distributions
117*a1e26a70SApple OSS Distributions static inline void
118*a1e26a70SApple OSS Distributions workq_unlock(struct workqueue *wq);
119*a1e26a70SApple OSS Distributions
120*a1e26a70SApple OSS Distributions #pragma mark globals
121*a1e26a70SApple OSS Distributions
122*a1e26a70SApple OSS Distributions struct workq_usec_var {
123*a1e26a70SApple OSS Distributions uint32_t usecs;
124*a1e26a70SApple OSS Distributions uint64_t abstime;
125*a1e26a70SApple OSS Distributions };
126*a1e26a70SApple OSS Distributions
127*a1e26a70SApple OSS Distributions #define WORKQ_SYSCTL_USECS(var, init) \
128*a1e26a70SApple OSS Distributions static struct workq_usec_var var = { .usecs = init }; \
129*a1e26a70SApple OSS Distributions SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
130*a1e26a70SApple OSS Distributions CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
131*a1e26a70SApple OSS Distributions workq_sysctl_handle_usecs, "I", "")
132*a1e26a70SApple OSS Distributions
133*a1e26a70SApple OSS Distributions static LCK_GRP_DECLARE(workq_lck_grp, "workq");
134*a1e26a70SApple OSS Distributions os_refgrp_decl(static, workq_refgrp, "workq", NULL);
135*a1e26a70SApple OSS Distributions
136*a1e26a70SApple OSS Distributions static ZONE_DEFINE(workq_zone_workqueue, "workq.wq",
137*a1e26a70SApple OSS Distributions sizeof(struct workqueue), ZC_NONE);
138*a1e26a70SApple OSS Distributions static ZONE_DEFINE(workq_zone_threadreq, "workq.threadreq",
139*a1e26a70SApple OSS Distributions sizeof(struct workq_threadreq_s), ZC_CACHING);
140*a1e26a70SApple OSS Distributions
141*a1e26a70SApple OSS Distributions static struct mpsc_daemon_queue workq_deallocate_queue;
142*a1e26a70SApple OSS Distributions
143*a1e26a70SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
144*a1e26a70SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
145*a1e26a70SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS);
146*a1e26a70SApple OSS Distributions static uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
147*a1e26a70SApple OSS Distributions static uint32_t wq_max_constrained_threads = WORKQUEUE_MAXTHREADS / 8;
148*a1e26a70SApple OSS Distributions static uint32_t wq_init_constrained_limit = 1;
149*a1e26a70SApple OSS Distributions static uint16_t wq_death_max_load;
150*a1e26a70SApple OSS Distributions static uint32_t wq_max_parallelism[WORKQ_NUM_QOS_BUCKETS];
151*a1e26a70SApple OSS Distributions
152*a1e26a70SApple OSS Distributions /*
153*a1e26a70SApple OSS Distributions * This is not a hard limit but the max size we want to aim to hit across the
154*a1e26a70SApple OSS Distributions * entire cooperative pool. We can oversubscribe the pool due to non-cooperative
155*a1e26a70SApple OSS Distributions * workers and the max we will oversubscribe the pool by, is a total of
156*a1e26a70SApple OSS Distributions * wq_max_cooperative_threads * WORKQ_NUM_QOS_BUCKETS.
157*a1e26a70SApple OSS Distributions */
158*a1e26a70SApple OSS Distributions static uint32_t wq_max_cooperative_threads;
159*a1e26a70SApple OSS Distributions
160*a1e26a70SApple OSS Distributions static inline uint32_t
wq_cooperative_queue_max_size(struct workqueue * wq)161*a1e26a70SApple OSS Distributions wq_cooperative_queue_max_size(struct workqueue *wq)
162*a1e26a70SApple OSS Distributions {
163*a1e26a70SApple OSS Distributions return wq->wq_cooperative_queue_has_limited_max_size ? 1 : wq_max_cooperative_threads;
164*a1e26a70SApple OSS Distributions }
165*a1e26a70SApple OSS Distributions
166*a1e26a70SApple OSS Distributions #pragma mark sysctls
167*a1e26a70SApple OSS Distributions
168*a1e26a70SApple OSS Distributions static int
169*a1e26a70SApple OSS Distributions workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS
170*a1e26a70SApple OSS Distributions {
171*a1e26a70SApple OSS Distributions #pragma unused(arg2)
172*a1e26a70SApple OSS Distributions struct workq_usec_var *v = arg1;
173*a1e26a70SApple OSS Distributions int error = sysctl_handle_int(oidp, &v->usecs, 0, req);
174*a1e26a70SApple OSS Distributions if (error || !req->newptr) {
175*a1e26a70SApple OSS Distributions return error;
176*a1e26a70SApple OSS Distributions }
177*a1e26a70SApple OSS Distributions clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC,
178*a1e26a70SApple OSS Distributions &v->abstime);
179*a1e26a70SApple OSS Distributions return 0;
180*a1e26a70SApple OSS Distributions }
181*a1e26a70SApple OSS Distributions
182*a1e26a70SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
183*a1e26a70SApple OSS Distributions &wq_max_threads, 0, "");
184*a1e26a70SApple OSS Distributions
185*a1e26a70SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
186*a1e26a70SApple OSS Distributions &wq_max_constrained_threads, 0, "");
187*a1e26a70SApple OSS Distributions
188*a1e26a70SApple OSS Distributions static int
189*a1e26a70SApple OSS Distributions wq_limit_cooperative_threads_for_proc SYSCTL_HANDLER_ARGS
190*a1e26a70SApple OSS Distributions {
191*a1e26a70SApple OSS Distributions #pragma unused(arg1, arg2, oidp)
192*a1e26a70SApple OSS Distributions int input_pool_size = 0;
193*a1e26a70SApple OSS Distributions int changed;
194*a1e26a70SApple OSS Distributions int error = 0;
195*a1e26a70SApple OSS Distributions
196*a1e26a70SApple OSS Distributions error = sysctl_io_number(req, 0, sizeof(int), &input_pool_size, &changed);
197*a1e26a70SApple OSS Distributions if (error || !changed) {
198*a1e26a70SApple OSS Distributions return error;
199*a1e26a70SApple OSS Distributions }
200*a1e26a70SApple OSS Distributions
201*a1e26a70SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_DEFAULT 0
202*a1e26a70SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS -1
203*a1e26a70SApple OSS Distributions /* Not available currently, but sysctl interface is designed to allow these
204*a1e26a70SApple OSS Distributions * extra parameters:
205*a1e26a70SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_STRICT : -2 (across all bucket)
206*a1e26a70SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_CUSTOM : [1, 512]
207*a1e26a70SApple OSS Distributions */
208*a1e26a70SApple OSS Distributions
209*a1e26a70SApple OSS Distributions if (input_pool_size != WQ_COOPERATIVE_POOL_SIZE_DEFAULT
210*a1e26a70SApple OSS Distributions && input_pool_size != WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS) {
211*a1e26a70SApple OSS Distributions error = EINVAL;
212*a1e26a70SApple OSS Distributions goto out;
213*a1e26a70SApple OSS Distributions }
214*a1e26a70SApple OSS Distributions
215*a1e26a70SApple OSS Distributions proc_t p = req->p;
216*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
217*a1e26a70SApple OSS Distributions
218*a1e26a70SApple OSS Distributions if (wq != NULL) {
219*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
220*a1e26a70SApple OSS Distributions if (wq->wq_reqcount > 0 || wq->wq_nthreads > 0) {
221*a1e26a70SApple OSS Distributions // Hackily enforce that the workqueue is still new (no requests or
222*a1e26a70SApple OSS Distributions // threads)
223*a1e26a70SApple OSS Distributions error = ENOTSUP;
224*a1e26a70SApple OSS Distributions } else {
225*a1e26a70SApple OSS Distributions wq->wq_cooperative_queue_has_limited_max_size = (input_pool_size == WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS);
226*a1e26a70SApple OSS Distributions }
227*a1e26a70SApple OSS Distributions workq_unlock(wq);
228*a1e26a70SApple OSS Distributions } else {
229*a1e26a70SApple OSS Distributions /* This process has no workqueue, calling this syctl makes no sense */
230*a1e26a70SApple OSS Distributions return ENOTSUP;
231*a1e26a70SApple OSS Distributions }
232*a1e26a70SApple OSS Distributions
233*a1e26a70SApple OSS Distributions out:
234*a1e26a70SApple OSS Distributions return error;
235*a1e26a70SApple OSS Distributions }
236*a1e26a70SApple OSS Distributions
237*a1e26a70SApple OSS Distributions SYSCTL_PROC(_kern, OID_AUTO, wq_limit_cooperative_threads,
238*a1e26a70SApple OSS Distributions CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_WR | CTLFLAG_LOCKED | CTLTYPE_INT, 0, 0,
239*a1e26a70SApple OSS Distributions wq_limit_cooperative_threads_for_proc,
240*a1e26a70SApple OSS Distributions "I", "Modify the max pool size of the cooperative pool");
241*a1e26a70SApple OSS Distributions
242*a1e26a70SApple OSS Distributions #pragma mark p_wqptr
243*a1e26a70SApple OSS Distributions
244*a1e26a70SApple OSS Distributions #define WQPTR_IS_INITING_VALUE ((struct workqueue *)~(uintptr_t)0)
245*a1e26a70SApple OSS Distributions
246*a1e26a70SApple OSS Distributions static struct workqueue *
proc_get_wqptr_fast(struct proc * p)247*a1e26a70SApple OSS Distributions proc_get_wqptr_fast(struct proc *p)
248*a1e26a70SApple OSS Distributions {
249*a1e26a70SApple OSS Distributions return os_atomic_load(&p->p_wqptr, relaxed);
250*a1e26a70SApple OSS Distributions }
251*a1e26a70SApple OSS Distributions
252*a1e26a70SApple OSS Distributions struct workqueue *
proc_get_wqptr(struct proc * p)253*a1e26a70SApple OSS Distributions proc_get_wqptr(struct proc *p)
254*a1e26a70SApple OSS Distributions {
255*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
256*a1e26a70SApple OSS Distributions return wq == WQPTR_IS_INITING_VALUE ? NULL : wq;
257*a1e26a70SApple OSS Distributions }
258*a1e26a70SApple OSS Distributions
259*a1e26a70SApple OSS Distributions static void
proc_set_wqptr(struct proc * p,struct workqueue * wq)260*a1e26a70SApple OSS Distributions proc_set_wqptr(struct proc *p, struct workqueue *wq)
261*a1e26a70SApple OSS Distributions {
262*a1e26a70SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, wq, release);
263*a1e26a70SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
264*a1e26a70SApple OSS Distributions proc_lock(p);
265*a1e26a70SApple OSS Distributions thread_wakeup(&p->p_wqptr);
266*a1e26a70SApple OSS Distributions proc_unlock(p);
267*a1e26a70SApple OSS Distributions }
268*a1e26a70SApple OSS Distributions }
269*a1e26a70SApple OSS Distributions
270*a1e26a70SApple OSS Distributions static bool
proc_init_wqptr_or_wait(struct proc * p)271*a1e26a70SApple OSS Distributions proc_init_wqptr_or_wait(struct proc *p)
272*a1e26a70SApple OSS Distributions {
273*a1e26a70SApple OSS Distributions struct workqueue *wq;
274*a1e26a70SApple OSS Distributions
275*a1e26a70SApple OSS Distributions proc_lock(p);
276*a1e26a70SApple OSS Distributions wq = os_atomic_load(&p->p_wqptr, relaxed);
277*a1e26a70SApple OSS Distributions
278*a1e26a70SApple OSS Distributions if (wq == NULL) {
279*a1e26a70SApple OSS Distributions os_atomic_store(&p->p_wqptr, WQPTR_IS_INITING_VALUE, relaxed);
280*a1e26a70SApple OSS Distributions proc_unlock(p);
281*a1e26a70SApple OSS Distributions return true;
282*a1e26a70SApple OSS Distributions }
283*a1e26a70SApple OSS Distributions
284*a1e26a70SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
285*a1e26a70SApple OSS Distributions assert_wait(&p->p_wqptr, THREAD_UNINT);
286*a1e26a70SApple OSS Distributions proc_unlock(p);
287*a1e26a70SApple OSS Distributions thread_block(THREAD_CONTINUE_NULL);
288*a1e26a70SApple OSS Distributions } else {
289*a1e26a70SApple OSS Distributions proc_unlock(p);
290*a1e26a70SApple OSS Distributions }
291*a1e26a70SApple OSS Distributions return false;
292*a1e26a70SApple OSS Distributions }
293*a1e26a70SApple OSS Distributions
294*a1e26a70SApple OSS Distributions static inline event_t
workq_parked_wait_event(struct uthread * uth)295*a1e26a70SApple OSS Distributions workq_parked_wait_event(struct uthread *uth)
296*a1e26a70SApple OSS Distributions {
297*a1e26a70SApple OSS Distributions return (event_t)&uth->uu_workq_stackaddr;
298*a1e26a70SApple OSS Distributions }
299*a1e26a70SApple OSS Distributions
300*a1e26a70SApple OSS Distributions static inline void
workq_thread_wakeup(struct uthread * uth)301*a1e26a70SApple OSS Distributions workq_thread_wakeup(struct uthread *uth)
302*a1e26a70SApple OSS Distributions {
303*a1e26a70SApple OSS Distributions thread_wakeup_thread(workq_parked_wait_event(uth), get_machthread(uth));
304*a1e26a70SApple OSS Distributions }
305*a1e26a70SApple OSS Distributions
306*a1e26a70SApple OSS Distributions #pragma mark wq_thactive
307*a1e26a70SApple OSS Distributions
308*a1e26a70SApple OSS Distributions #if defined(__LP64__)
309*a1e26a70SApple OSS Distributions // Layout is:
310*a1e26a70SApple OSS Distributions // 127 - 115 : 13 bits of zeroes
311*a1e26a70SApple OSS Distributions // 114 - 112 : best QoS among all pending constrained requests
312*a1e26a70SApple OSS Distributions // 111 - 0 : MGR, AUI, UI, IN, DF, UT, BG+MT buckets every 16 bits
313*a1e26a70SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 16
314*a1e26a70SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (7 * WQ_THACTIVE_BUCKET_WIDTH)
315*a1e26a70SApple OSS Distributions #else
316*a1e26a70SApple OSS Distributions // Layout is:
317*a1e26a70SApple OSS Distributions // 63 - 61 : best QoS among all pending constrained requests
318*a1e26a70SApple OSS Distributions // 60 : Manager bucket (0 or 1)
319*a1e26a70SApple OSS Distributions // 59 - 0 : AUI, UI, IN, DF, UT, BG+MT buckets every 10 bits
320*a1e26a70SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 10
321*a1e26a70SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (6 * WQ_THACTIVE_BUCKET_WIDTH + 1)
322*a1e26a70SApple OSS Distributions #endif
323*a1e26a70SApple OSS Distributions #define WQ_THACTIVE_BUCKET_MASK ((1U << WQ_THACTIVE_BUCKET_WIDTH) - 1)
324*a1e26a70SApple OSS Distributions #define WQ_THACTIVE_BUCKET_HALF (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
325*a1e26a70SApple OSS Distributions
326*a1e26a70SApple OSS Distributions static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
327*a1e26a70SApple OSS Distributions "Make sure we have space to encode a QoS");
328*a1e26a70SApple OSS Distributions
329*a1e26a70SApple OSS Distributions static inline wq_thactive_t
_wq_thactive(struct workqueue * wq)330*a1e26a70SApple OSS Distributions _wq_thactive(struct workqueue *wq)
331*a1e26a70SApple OSS Distributions {
332*a1e26a70SApple OSS Distributions return os_atomic_load_wide(&wq->wq_thactive, relaxed);
333*a1e26a70SApple OSS Distributions }
334*a1e26a70SApple OSS Distributions
335*a1e26a70SApple OSS Distributions static inline uint8_t
_wq_bucket(thread_qos_t qos)336*a1e26a70SApple OSS Distributions _wq_bucket(thread_qos_t qos)
337*a1e26a70SApple OSS Distributions {
338*a1e26a70SApple OSS Distributions // Map both BG and MT to the same bucket by over-shifting down and
339*a1e26a70SApple OSS Distributions // clamping MT and BG together.
340*a1e26a70SApple OSS Distributions switch (qos) {
341*a1e26a70SApple OSS Distributions case THREAD_QOS_MAINTENANCE:
342*a1e26a70SApple OSS Distributions return 0;
343*a1e26a70SApple OSS Distributions default:
344*a1e26a70SApple OSS Distributions return qos - 2;
345*a1e26a70SApple OSS Distributions }
346*a1e26a70SApple OSS Distributions }
347*a1e26a70SApple OSS Distributions
348*a1e26a70SApple OSS Distributions #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
349*a1e26a70SApple OSS Distributions ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
350*a1e26a70SApple OSS Distributions
351*a1e26a70SApple OSS Distributions static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue * wq)352*a1e26a70SApple OSS Distributions _wq_thactive_best_constrained_req_qos(struct workqueue *wq)
353*a1e26a70SApple OSS Distributions {
354*a1e26a70SApple OSS Distributions // Avoid expensive atomic operations: the three bits we're loading are in
355*a1e26a70SApple OSS Distributions // a single byte, and always updated under the workqueue lock
356*a1e26a70SApple OSS Distributions wq_thactive_t v = *(wq_thactive_t *)&wq->wq_thactive;
357*a1e26a70SApple OSS Distributions return WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(v);
358*a1e26a70SApple OSS Distributions }
359*a1e26a70SApple OSS Distributions
360*a1e26a70SApple OSS Distributions static void
_wq_thactive_refresh_best_constrained_req_qos(struct workqueue * wq)361*a1e26a70SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq)
362*a1e26a70SApple OSS Distributions {
363*a1e26a70SApple OSS Distributions thread_qos_t old_qos, new_qos;
364*a1e26a70SApple OSS Distributions workq_threadreq_t req;
365*a1e26a70SApple OSS Distributions
366*a1e26a70SApple OSS Distributions req = priority_queue_max(&wq->wq_constrained_queue,
367*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry);
368*a1e26a70SApple OSS Distributions new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED;
369*a1e26a70SApple OSS Distributions old_qos = _wq_thactive_best_constrained_req_qos(wq);
370*a1e26a70SApple OSS Distributions if (old_qos != new_qos) {
371*a1e26a70SApple OSS Distributions long delta = (long)new_qos - (long)old_qos;
372*a1e26a70SApple OSS Distributions wq_thactive_t v = (wq_thactive_t)delta << WQ_THACTIVE_QOS_SHIFT;
373*a1e26a70SApple OSS Distributions /*
374*a1e26a70SApple OSS Distributions * We can do an atomic add relative to the initial load because updates
375*a1e26a70SApple OSS Distributions * to this qos are always serialized under the workqueue lock.
376*a1e26a70SApple OSS Distributions */
377*a1e26a70SApple OSS Distributions v = os_atomic_add(&wq->wq_thactive, v, relaxed);
378*a1e26a70SApple OSS Distributions #ifdef __LP64__
379*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v,
380*a1e26a70SApple OSS Distributions (uint64_t)(v >> 64), 0);
381*a1e26a70SApple OSS Distributions #else
382*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0);
383*a1e26a70SApple OSS Distributions #endif
384*a1e26a70SApple OSS Distributions }
385*a1e26a70SApple OSS Distributions }
386*a1e26a70SApple OSS Distributions
387*a1e26a70SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_offset_for_qos(thread_qos_t qos)388*a1e26a70SApple OSS Distributions _wq_thactive_offset_for_qos(thread_qos_t qos)
389*a1e26a70SApple OSS Distributions {
390*a1e26a70SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
391*a1e26a70SApple OSS Distributions __builtin_assume(bucket < WORKQ_NUM_BUCKETS);
392*a1e26a70SApple OSS Distributions return (wq_thactive_t)1 << (bucket * WQ_THACTIVE_BUCKET_WIDTH);
393*a1e26a70SApple OSS Distributions }
394*a1e26a70SApple OSS Distributions
395*a1e26a70SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_inc(struct workqueue * wq,thread_qos_t qos)396*a1e26a70SApple OSS Distributions _wq_thactive_inc(struct workqueue *wq, thread_qos_t qos)
397*a1e26a70SApple OSS Distributions {
398*a1e26a70SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
399*a1e26a70SApple OSS Distributions return os_atomic_add_orig(&wq->wq_thactive, v, relaxed);
400*a1e26a70SApple OSS Distributions }
401*a1e26a70SApple OSS Distributions
402*a1e26a70SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_dec(struct workqueue * wq,thread_qos_t qos)403*a1e26a70SApple OSS Distributions _wq_thactive_dec(struct workqueue *wq, thread_qos_t qos)
404*a1e26a70SApple OSS Distributions {
405*a1e26a70SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
406*a1e26a70SApple OSS Distributions return os_atomic_sub_orig(&wq->wq_thactive, v, relaxed);
407*a1e26a70SApple OSS Distributions }
408*a1e26a70SApple OSS Distributions
409*a1e26a70SApple OSS Distributions static inline void
_wq_thactive_move(struct workqueue * wq,thread_qos_t old_qos,thread_qos_t new_qos)410*a1e26a70SApple OSS Distributions _wq_thactive_move(struct workqueue *wq,
411*a1e26a70SApple OSS Distributions thread_qos_t old_qos, thread_qos_t new_qos)
412*a1e26a70SApple OSS Distributions {
413*a1e26a70SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) -
414*a1e26a70SApple OSS Distributions _wq_thactive_offset_for_qos(old_qos);
415*a1e26a70SApple OSS Distributions os_atomic_add(&wq->wq_thactive, v, relaxed);
416*a1e26a70SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(old_qos)]--;
417*a1e26a70SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(new_qos)]++;
418*a1e26a70SApple OSS Distributions }
419*a1e26a70SApple OSS Distributions
420*a1e26a70SApple OSS Distributions static inline uint32_t
_wq_thactive_aggregate_downto_qos(struct workqueue * wq,wq_thactive_t v,thread_qos_t qos,uint32_t * busycount,uint32_t * max_busycount)421*a1e26a70SApple OSS Distributions _wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
422*a1e26a70SApple OSS Distributions thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
423*a1e26a70SApple OSS Distributions {
424*a1e26a70SApple OSS Distributions uint32_t count = 0, active;
425*a1e26a70SApple OSS Distributions uint64_t curtime;
426*a1e26a70SApple OSS Distributions
427*a1e26a70SApple OSS Distributions assert(WORKQ_THREAD_QOS_MIN <= qos && qos <= WORKQ_THREAD_QOS_MAX);
428*a1e26a70SApple OSS Distributions
429*a1e26a70SApple OSS Distributions if (busycount) {
430*a1e26a70SApple OSS Distributions curtime = mach_absolute_time();
431*a1e26a70SApple OSS Distributions *busycount = 0;
432*a1e26a70SApple OSS Distributions }
433*a1e26a70SApple OSS Distributions if (max_busycount) {
434*a1e26a70SApple OSS Distributions *max_busycount = THREAD_QOS_LAST - qos;
435*a1e26a70SApple OSS Distributions }
436*a1e26a70SApple OSS Distributions
437*a1e26a70SApple OSS Distributions uint8_t i = _wq_bucket(qos);
438*a1e26a70SApple OSS Distributions v >>= i * WQ_THACTIVE_BUCKET_WIDTH;
439*a1e26a70SApple OSS Distributions for (; i < WORKQ_NUM_QOS_BUCKETS; i++, v >>= WQ_THACTIVE_BUCKET_WIDTH) {
440*a1e26a70SApple OSS Distributions active = v & WQ_THACTIVE_BUCKET_MASK;
441*a1e26a70SApple OSS Distributions count += active;
442*a1e26a70SApple OSS Distributions
443*a1e26a70SApple OSS Distributions if (busycount && wq->wq_thscheduled_count[i] > active) {
444*a1e26a70SApple OSS Distributions if (workq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i])) {
445*a1e26a70SApple OSS Distributions /*
446*a1e26a70SApple OSS Distributions * We only consider the last blocked thread for a given bucket
447*a1e26a70SApple OSS Distributions * as busy because we don't want to take the list lock in each
448*a1e26a70SApple OSS Distributions * sched callback. However this is an approximation that could
449*a1e26a70SApple OSS Distributions * contribute to thread creation storms.
450*a1e26a70SApple OSS Distributions */
451*a1e26a70SApple OSS Distributions (*busycount)++;
452*a1e26a70SApple OSS Distributions }
453*a1e26a70SApple OSS Distributions }
454*a1e26a70SApple OSS Distributions }
455*a1e26a70SApple OSS Distributions
456*a1e26a70SApple OSS Distributions return count;
457*a1e26a70SApple OSS Distributions }
458*a1e26a70SApple OSS Distributions
459*a1e26a70SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
460*a1e26a70SApple OSS Distributions * for any overrides */
461*a1e26a70SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_dec(struct workqueue * wq,thread_qos_t qos)462*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(struct workqueue *wq, thread_qos_t qos)
463*a1e26a70SApple OSS Distributions {
464*a1e26a70SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]--;
465*a1e26a70SApple OSS Distributions assert(old_scheduled_count > 0);
466*a1e26a70SApple OSS Distributions }
467*a1e26a70SApple OSS Distributions
468*a1e26a70SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
469*a1e26a70SApple OSS Distributions * for any overrides */
470*a1e26a70SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_inc(struct workqueue * wq,thread_qos_t qos)471*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(struct workqueue *wq, thread_qos_t qos)
472*a1e26a70SApple OSS Distributions {
473*a1e26a70SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]++;
474*a1e26a70SApple OSS Distributions assert(old_scheduled_count < UINT8_MAX);
475*a1e26a70SApple OSS Distributions }
476*a1e26a70SApple OSS Distributions
477*a1e26a70SApple OSS Distributions #pragma mark wq_flags
478*a1e26a70SApple OSS Distributions
479*a1e26a70SApple OSS Distributions static inline uint32_t
_wq_flags(struct workqueue * wq)480*a1e26a70SApple OSS Distributions _wq_flags(struct workqueue *wq)
481*a1e26a70SApple OSS Distributions {
482*a1e26a70SApple OSS Distributions return os_atomic_load(&wq->wq_flags, relaxed);
483*a1e26a70SApple OSS Distributions }
484*a1e26a70SApple OSS Distributions
485*a1e26a70SApple OSS Distributions static inline bool
_wq_exiting(struct workqueue * wq)486*a1e26a70SApple OSS Distributions _wq_exiting(struct workqueue *wq)
487*a1e26a70SApple OSS Distributions {
488*a1e26a70SApple OSS Distributions return _wq_flags(wq) & WQ_EXITING;
489*a1e26a70SApple OSS Distributions }
490*a1e26a70SApple OSS Distributions
491*a1e26a70SApple OSS Distributions bool
workq_is_exiting(struct proc * p)492*a1e26a70SApple OSS Distributions workq_is_exiting(struct proc *p)
493*a1e26a70SApple OSS Distributions {
494*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
495*a1e26a70SApple OSS Distributions return !wq || _wq_exiting(wq);
496*a1e26a70SApple OSS Distributions }
497*a1e26a70SApple OSS Distributions
498*a1e26a70SApple OSS Distributions
499*a1e26a70SApple OSS Distributions #pragma mark workqueue lock
500*a1e26a70SApple OSS Distributions
501*a1e26a70SApple OSS Distributions static bool
workq_lock_is_acquired_kdp(struct workqueue * wq)502*a1e26a70SApple OSS Distributions workq_lock_is_acquired_kdp(struct workqueue *wq)
503*a1e26a70SApple OSS Distributions {
504*a1e26a70SApple OSS Distributions return kdp_lck_ticket_is_acquired(&wq->wq_lock);
505*a1e26a70SApple OSS Distributions }
506*a1e26a70SApple OSS Distributions
507*a1e26a70SApple OSS Distributions static inline void
workq_lock_spin(struct workqueue * wq)508*a1e26a70SApple OSS Distributions workq_lock_spin(struct workqueue *wq)
509*a1e26a70SApple OSS Distributions {
510*a1e26a70SApple OSS Distributions lck_ticket_lock(&wq->wq_lock, &workq_lck_grp);
511*a1e26a70SApple OSS Distributions }
512*a1e26a70SApple OSS Distributions
513*a1e26a70SApple OSS Distributions static inline void
workq_lock_held(struct workqueue * wq)514*a1e26a70SApple OSS Distributions workq_lock_held(struct workqueue *wq)
515*a1e26a70SApple OSS Distributions {
516*a1e26a70SApple OSS Distributions LCK_TICKET_ASSERT_OWNED(&wq->wq_lock);
517*a1e26a70SApple OSS Distributions }
518*a1e26a70SApple OSS Distributions
519*a1e26a70SApple OSS Distributions static inline bool
workq_lock_try(struct workqueue * wq)520*a1e26a70SApple OSS Distributions workq_lock_try(struct workqueue *wq)
521*a1e26a70SApple OSS Distributions {
522*a1e26a70SApple OSS Distributions return lck_ticket_lock_try(&wq->wq_lock, &workq_lck_grp);
523*a1e26a70SApple OSS Distributions }
524*a1e26a70SApple OSS Distributions
525*a1e26a70SApple OSS Distributions static inline void
workq_unlock(struct workqueue * wq)526*a1e26a70SApple OSS Distributions workq_unlock(struct workqueue *wq)
527*a1e26a70SApple OSS Distributions {
528*a1e26a70SApple OSS Distributions lck_ticket_unlock(&wq->wq_lock);
529*a1e26a70SApple OSS Distributions }
530*a1e26a70SApple OSS Distributions
531*a1e26a70SApple OSS Distributions #pragma mark idle thread lists
532*a1e26a70SApple OSS Distributions
533*a1e26a70SApple OSS Distributions #define WORKQ_POLICY_INIT(qos) \
534*a1e26a70SApple OSS Distributions (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
535*a1e26a70SApple OSS Distributions
536*a1e26a70SApple OSS Distributions static inline thread_qos_t
workq_pri_bucket(struct uu_workq_policy req)537*a1e26a70SApple OSS Distributions workq_pri_bucket(struct uu_workq_policy req)
538*a1e26a70SApple OSS Distributions {
539*a1e26a70SApple OSS Distributions return MAX(MAX(req.qos_req, req.qos_max), req.qos_override);
540*a1e26a70SApple OSS Distributions }
541*a1e26a70SApple OSS Distributions
542*a1e26a70SApple OSS Distributions static inline thread_qos_t
workq_pri_override(struct uu_workq_policy req)543*a1e26a70SApple OSS Distributions workq_pri_override(struct uu_workq_policy req)
544*a1e26a70SApple OSS Distributions {
545*a1e26a70SApple OSS Distributions return MAX(workq_pri_bucket(req), req.qos_bucket);
546*a1e26a70SApple OSS Distributions }
547*a1e26a70SApple OSS Distributions
548*a1e26a70SApple OSS Distributions static inline bool
workq_thread_needs_params_change(workq_threadreq_t req,struct uthread * uth)549*a1e26a70SApple OSS Distributions workq_thread_needs_params_change(workq_threadreq_t req, struct uthread *uth)
550*a1e26a70SApple OSS Distributions {
551*a1e26a70SApple OSS Distributions workq_threadreq_param_t cur_trp, req_trp = { };
552*a1e26a70SApple OSS Distributions
553*a1e26a70SApple OSS Distributions cur_trp.trp_value = uth->uu_save.uus_workq_park_data.workloop_params;
554*a1e26a70SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
555*a1e26a70SApple OSS Distributions req_trp = kqueue_threadreq_workloop_param(req);
556*a1e26a70SApple OSS Distributions }
557*a1e26a70SApple OSS Distributions
558*a1e26a70SApple OSS Distributions /*
559*a1e26a70SApple OSS Distributions * CPU percent flags are handled separately to policy changes, so ignore
560*a1e26a70SApple OSS Distributions * them for all of these checks.
561*a1e26a70SApple OSS Distributions */
562*a1e26a70SApple OSS Distributions uint16_t cur_flags = (cur_trp.trp_flags & ~TRP_CPUPERCENT);
563*a1e26a70SApple OSS Distributions uint16_t req_flags = (req_trp.trp_flags & ~TRP_CPUPERCENT);
564*a1e26a70SApple OSS Distributions
565*a1e26a70SApple OSS Distributions if (!req_flags && !cur_flags) {
566*a1e26a70SApple OSS Distributions return false;
567*a1e26a70SApple OSS Distributions }
568*a1e26a70SApple OSS Distributions
569*a1e26a70SApple OSS Distributions if (req_flags != cur_flags) {
570*a1e26a70SApple OSS Distributions return true;
571*a1e26a70SApple OSS Distributions }
572*a1e26a70SApple OSS Distributions
573*a1e26a70SApple OSS Distributions if ((req_flags & TRP_PRIORITY) && req_trp.trp_pri != cur_trp.trp_pri) {
574*a1e26a70SApple OSS Distributions return true;
575*a1e26a70SApple OSS Distributions }
576*a1e26a70SApple OSS Distributions
577*a1e26a70SApple OSS Distributions if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
578*a1e26a70SApple OSS Distributions return true;
579*a1e26a70SApple OSS Distributions }
580*a1e26a70SApple OSS Distributions
581*a1e26a70SApple OSS Distributions return false;
582*a1e26a70SApple OSS Distributions }
583*a1e26a70SApple OSS Distributions
584*a1e26a70SApple OSS Distributions static inline bool
workq_thread_needs_priority_change(workq_threadreq_t req,struct uthread * uth)585*a1e26a70SApple OSS Distributions workq_thread_needs_priority_change(workq_threadreq_t req, struct uthread *uth)
586*a1e26a70SApple OSS Distributions {
587*a1e26a70SApple OSS Distributions if (workq_thread_needs_params_change(req, uth)) {
588*a1e26a70SApple OSS Distributions return true;
589*a1e26a70SApple OSS Distributions }
590*a1e26a70SApple OSS Distributions
591*a1e26a70SApple OSS Distributions if (req->tr_qos != workq_pri_override(uth->uu_workq_pri)) {
592*a1e26a70SApple OSS Distributions return true;
593*a1e26a70SApple OSS Distributions }
594*a1e26a70SApple OSS Distributions
595*a1e26a70SApple OSS Distributions #if CONFIG_PREADOPT_TG
596*a1e26a70SApple OSS Distributions thread_group_qos_t tg = kqr_preadopt_thread_group(req);
597*a1e26a70SApple OSS Distributions if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
598*a1e26a70SApple OSS Distributions /*
599*a1e26a70SApple OSS Distributions * Ideally, we'd add check here to see if thread's preadopt TG is same
600*a1e26a70SApple OSS Distributions * as the thread requests's thread group and short circuit if that is
601*a1e26a70SApple OSS Distributions * the case. But in the interest of keeping the code clean and not
602*a1e26a70SApple OSS Distributions * taking the thread lock here, we're going to skip this. We will
603*a1e26a70SApple OSS Distributions * eventually shortcircuit once we try to set the preadoption thread
604*a1e26a70SApple OSS Distributions * group on the thread.
605*a1e26a70SApple OSS Distributions */
606*a1e26a70SApple OSS Distributions return true;
607*a1e26a70SApple OSS Distributions }
608*a1e26a70SApple OSS Distributions #endif
609*a1e26a70SApple OSS Distributions
610*a1e26a70SApple OSS Distributions return false;
611*a1e26a70SApple OSS Distributions }
612*a1e26a70SApple OSS Distributions
613*a1e26a70SApple OSS Distributions /* Input thread must be self. Called during self override, resetting overrides
614*a1e26a70SApple OSS Distributions * or while processing kevents
615*a1e26a70SApple OSS Distributions *
616*a1e26a70SApple OSS Distributions * Called with workq lock held. Sometimes also the thread mutex
617*a1e26a70SApple OSS Distributions */
618*a1e26a70SApple OSS Distributions static void
workq_thread_update_bucket(proc_t p,struct workqueue * wq,struct uthread * uth,struct uu_workq_policy old_pri,struct uu_workq_policy new_pri,bool force_run)619*a1e26a70SApple OSS Distributions workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth,
620*a1e26a70SApple OSS Distributions struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
621*a1e26a70SApple OSS Distributions bool force_run)
622*a1e26a70SApple OSS Distributions {
623*a1e26a70SApple OSS Distributions assert(uth == current_uthread());
624*a1e26a70SApple OSS Distributions
625*a1e26a70SApple OSS Distributions thread_qos_t old_bucket = old_pri.qos_bucket;
626*a1e26a70SApple OSS Distributions thread_qos_t new_bucket = workq_pri_bucket(new_pri);
627*a1e26a70SApple OSS Distributions
628*a1e26a70SApple OSS Distributions if ((old_bucket != new_bucket) &&
629*a1e26a70SApple OSS Distributions !workq_thread_is_permanently_bound(uth)) {
630*a1e26a70SApple OSS Distributions _wq_thactive_move(wq, old_bucket, new_bucket);
631*a1e26a70SApple OSS Distributions }
632*a1e26a70SApple OSS Distributions
633*a1e26a70SApple OSS Distributions new_pri.qos_bucket = new_bucket;
634*a1e26a70SApple OSS Distributions uth->uu_workq_pri = new_pri;
635*a1e26a70SApple OSS Distributions
636*a1e26a70SApple OSS Distributions if (old_pri.qos_override != new_pri.qos_override) {
637*a1e26a70SApple OSS Distributions thread_set_workq_override(get_machthread(uth), new_pri.qos_override);
638*a1e26a70SApple OSS Distributions }
639*a1e26a70SApple OSS Distributions
640*a1e26a70SApple OSS Distributions if (wq->wq_reqcount &&
641*a1e26a70SApple OSS Distributions !workq_thread_is_permanently_bound(uth) &&
642*a1e26a70SApple OSS Distributions (old_bucket > new_bucket || force_run)) {
643*a1e26a70SApple OSS Distributions int flags = WORKQ_THREADREQ_CAN_CREATE_THREADS;
644*a1e26a70SApple OSS Distributions if (old_bucket > new_bucket) {
645*a1e26a70SApple OSS Distributions /*
646*a1e26a70SApple OSS Distributions * When lowering our bucket, we may unblock a thread request,
647*a1e26a70SApple OSS Distributions * but we can't drop our priority before we have evaluated
648*a1e26a70SApple OSS Distributions * whether this is the case, and if we ever drop the workqueue lock
649*a1e26a70SApple OSS Distributions * that would cause a priority inversion.
650*a1e26a70SApple OSS Distributions *
651*a1e26a70SApple OSS Distributions * We hence have to disallow thread creation in that case.
652*a1e26a70SApple OSS Distributions */
653*a1e26a70SApple OSS Distributions flags = 0;
654*a1e26a70SApple OSS Distributions }
655*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, flags);
656*a1e26a70SApple OSS Distributions }
657*a1e26a70SApple OSS Distributions }
658*a1e26a70SApple OSS Distributions
659*a1e26a70SApple OSS Distributions /*
660*a1e26a70SApple OSS Distributions * Sets/resets the cpu percent limits on the current thread. We can't set
661*a1e26a70SApple OSS Distributions * these limits from outside of the current thread, so this function needs
662*a1e26a70SApple OSS Distributions * to be called when we're executing on the intended
663*a1e26a70SApple OSS Distributions */
664*a1e26a70SApple OSS Distributions static void
workq_thread_reset_cpupercent(workq_threadreq_t req,struct uthread * uth)665*a1e26a70SApple OSS Distributions workq_thread_reset_cpupercent(workq_threadreq_t req, struct uthread *uth)
666*a1e26a70SApple OSS Distributions {
667*a1e26a70SApple OSS Distributions assert(uth == current_uthread());
668*a1e26a70SApple OSS Distributions workq_threadreq_param_t trp = { };
669*a1e26a70SApple OSS Distributions
670*a1e26a70SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
671*a1e26a70SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
672*a1e26a70SApple OSS Distributions }
673*a1e26a70SApple OSS Distributions
674*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_CPUPERCENT) {
675*a1e26a70SApple OSS Distributions /*
676*a1e26a70SApple OSS Distributions * Going through disable when we have an existing CPU percent limit
677*a1e26a70SApple OSS Distributions * set will force the ledger to refill the token bucket of the current
678*a1e26a70SApple OSS Distributions * thread. Removing any penalty applied by previous thread use.
679*a1e26a70SApple OSS Distributions */
680*a1e26a70SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, 0, 0);
681*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_CPUPERCENT;
682*a1e26a70SApple OSS Distributions }
683*a1e26a70SApple OSS Distributions
684*a1e26a70SApple OSS Distributions if (trp.trp_flags & TRP_CPUPERCENT) {
685*a1e26a70SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent,
686*a1e26a70SApple OSS Distributions (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
687*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT;
688*a1e26a70SApple OSS Distributions }
689*a1e26a70SApple OSS Distributions }
690*a1e26a70SApple OSS Distributions
691*a1e26a70SApple OSS Distributions /*
692*a1e26a70SApple OSS Distributions * This function is always called with the workq lock, except for the
693*a1e26a70SApple OSS Distributions * permanently bound workqueue thread, which instead requires the kqlock.
694*a1e26a70SApple OSS Distributions * See locking model for bound thread's uu_workq_flags.
695*a1e26a70SApple OSS Distributions */
696*a1e26a70SApple OSS Distributions static void
workq_thread_reset_pri(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req,bool unpark)697*a1e26a70SApple OSS Distributions workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth,
698*a1e26a70SApple OSS Distributions workq_threadreq_t req, bool unpark)
699*a1e26a70SApple OSS Distributions {
700*a1e26a70SApple OSS Distributions thread_t th = get_machthread(uth);
701*a1e26a70SApple OSS Distributions thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP;
702*a1e26a70SApple OSS Distributions workq_threadreq_param_t trp = { };
703*a1e26a70SApple OSS Distributions int priority = 31;
704*a1e26a70SApple OSS Distributions int policy = POLICY_TIMESHARE;
705*a1e26a70SApple OSS Distributions
706*a1e26a70SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
707*a1e26a70SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
708*a1e26a70SApple OSS Distributions }
709*a1e26a70SApple OSS Distributions
710*a1e26a70SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(qos);
711*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_OUTSIDE_QOS;
712*a1e26a70SApple OSS Distributions
713*a1e26a70SApple OSS Distributions if (unpark) {
714*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
715*a1e26a70SApple OSS Distributions // qos sent out to userspace (may differ from uu_workq_pri on param threads)
716*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
717*a1e26a70SApple OSS Distributions }
718*a1e26a70SApple OSS Distributions
719*a1e26a70SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
720*a1e26a70SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
721*a1e26a70SApple OSS Distributions assert(trp.trp_value == 0); // manager qos and thread policy don't mix
722*a1e26a70SApple OSS Distributions
723*a1e26a70SApple OSS Distributions if (_pthread_priority_has_sched_pri(mgr_pri)) {
724*a1e26a70SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
725*a1e26a70SApple OSS Distributions thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri,
726*a1e26a70SApple OSS Distributions POLICY_TIMESHARE);
727*a1e26a70SApple OSS Distributions return;
728*a1e26a70SApple OSS Distributions }
729*a1e26a70SApple OSS Distributions
730*a1e26a70SApple OSS Distributions qos = _pthread_priority_thread_qos(mgr_pri);
731*a1e26a70SApple OSS Distributions } else {
732*a1e26a70SApple OSS Distributions if (trp.trp_flags & TRP_PRIORITY) {
733*a1e26a70SApple OSS Distributions qos = THREAD_QOS_UNSPECIFIED;
734*a1e26a70SApple OSS Distributions priority = trp.trp_pri;
735*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_OUTSIDE_QOS;
736*a1e26a70SApple OSS Distributions }
737*a1e26a70SApple OSS Distributions
738*a1e26a70SApple OSS Distributions if (trp.trp_flags & TRP_POLICY) {
739*a1e26a70SApple OSS Distributions policy = trp.trp_pol;
740*a1e26a70SApple OSS Distributions }
741*a1e26a70SApple OSS Distributions }
742*a1e26a70SApple OSS Distributions
743*a1e26a70SApple OSS Distributions #if CONFIG_PREADOPT_TG
744*a1e26a70SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP)) {
745*a1e26a70SApple OSS Distributions /*
746*a1e26a70SApple OSS Distributions * For kqwl permanently configured with a thread group, we can safely borrow
747*a1e26a70SApple OSS Distributions * +1 ref from kqwl_preadopt_tg. A thread then takes additional +1 ref
748*a1e26a70SApple OSS Distributions * for itself via thread_set_preadopt_thread_group.
749*a1e26a70SApple OSS Distributions *
750*a1e26a70SApple OSS Distributions * In all other cases, we cannot safely read and borrow the reference from the kqwl
751*a1e26a70SApple OSS Distributions * since it can disappear from under us at any time due to the max-ing logic in
752*a1e26a70SApple OSS Distributions * kqueue_set_preadopted_thread_group.
753*a1e26a70SApple OSS Distributions *
754*a1e26a70SApple OSS Distributions * As such, we do the following dance:
755*a1e26a70SApple OSS Distributions *
756*a1e26a70SApple OSS Distributions * 1) cmpxchng and steal the kqwl's preadopt thread group and leave
757*a1e26a70SApple OSS Distributions * behind with (NULL + QoS). At this point, we have the reference
758*a1e26a70SApple OSS Distributions * to the thread group from the kqwl.
759*a1e26a70SApple OSS Distributions * 2) Have the thread set the preadoption thread group on itself.
760*a1e26a70SApple OSS Distributions * 3) cmpxchng from (NULL + QoS) which we set earlier in (1), back to
761*a1e26a70SApple OSS Distributions * thread_group + QoS. ie we try to give the reference back to the kqwl.
762*a1e26a70SApple OSS Distributions * If we fail, that's because a higher QoS thread group was set on the
763*a1e26a70SApple OSS Distributions * kqwl in kqueue_set_preadopted_thread_group in which case, we need to
764*a1e26a70SApple OSS Distributions * go back to (1).
765*a1e26a70SApple OSS Distributions */
766*a1e26a70SApple OSS Distributions
767*a1e26a70SApple OSS Distributions _Atomic(struct thread_group *) * tg_loc = kqr_preadopt_thread_group_addr(req);
768*a1e26a70SApple OSS Distributions
769*a1e26a70SApple OSS Distributions thread_group_qos_t old_tg, new_tg;
770*a1e26a70SApple OSS Distributions int ret = 0;
771*a1e26a70SApple OSS Distributions again:
772*a1e26a70SApple OSS Distributions ret = os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
773*a1e26a70SApple OSS Distributions if ((!KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) ||
774*a1e26a70SApple OSS Distributions KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
775*a1e26a70SApple OSS Distributions os_atomic_rmw_loop_give_up(break);
776*a1e26a70SApple OSS Distributions }
777*a1e26a70SApple OSS Distributions
778*a1e26a70SApple OSS Distributions /*
779*a1e26a70SApple OSS Distributions * Leave the QoS behind - kqueue_set_preadopted_thread_group will
780*a1e26a70SApple OSS Distributions * only modify it if there is a higher QoS thread group to attach
781*a1e26a70SApple OSS Distributions */
782*a1e26a70SApple OSS Distributions new_tg = (thread_group_qos_t) ((uintptr_t) old_tg & KQWL_PREADOPT_TG_QOS_MASK);
783*a1e26a70SApple OSS Distributions });
784*a1e26a70SApple OSS Distributions
785*a1e26a70SApple OSS Distributions if (ret) {
786*a1e26a70SApple OSS Distributions /*
787*a1e26a70SApple OSS Distributions * We successfully took the ref from the kqwl so set it on the
788*a1e26a70SApple OSS Distributions * thread now
789*a1e26a70SApple OSS Distributions */
790*a1e26a70SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
791*a1e26a70SApple OSS Distributions
792*a1e26a70SApple OSS Distributions thread_group_qos_t thread_group_to_expect = new_tg;
793*a1e26a70SApple OSS Distributions thread_group_qos_t thread_group_to_set = old_tg;
794*a1e26a70SApple OSS Distributions
795*a1e26a70SApple OSS Distributions os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
796*a1e26a70SApple OSS Distributions if (old_tg != thread_group_to_expect) {
797*a1e26a70SApple OSS Distributions /*
798*a1e26a70SApple OSS Distributions * There was an intervening write to the kqwl_preadopt_tg,
799*a1e26a70SApple OSS Distributions * and it has a higher QoS than what we are working with
800*a1e26a70SApple OSS Distributions * here. Abandon our current adopted thread group and redo
801*a1e26a70SApple OSS Distributions * the full dance
802*a1e26a70SApple OSS Distributions */
803*a1e26a70SApple OSS Distributions thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(thread_group_to_set));
804*a1e26a70SApple OSS Distributions os_atomic_rmw_loop_give_up(goto again);
805*a1e26a70SApple OSS Distributions }
806*a1e26a70SApple OSS Distributions
807*a1e26a70SApple OSS Distributions new_tg = thread_group_to_set;
808*a1e26a70SApple OSS Distributions });
809*a1e26a70SApple OSS Distributions } else {
810*a1e26a70SApple OSS Distributions if (KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
811*a1e26a70SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
812*a1e26a70SApple OSS Distributions } else {
813*a1e26a70SApple OSS Distributions /* Nothing valid on the kqwl, just clear what's on the thread */
814*a1e26a70SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
815*a1e26a70SApple OSS Distributions }
816*a1e26a70SApple OSS Distributions }
817*a1e26a70SApple OSS Distributions } else {
818*a1e26a70SApple OSS Distributions /* Not even a kqwl, clear what's on the thread */
819*a1e26a70SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
820*a1e26a70SApple OSS Distributions }
821*a1e26a70SApple OSS Distributions #endif
822*a1e26a70SApple OSS Distributions thread_set_workq_pri(th, qos, priority, policy);
823*a1e26a70SApple OSS Distributions }
824*a1e26a70SApple OSS Distributions
825*a1e26a70SApple OSS Distributions /*
826*a1e26a70SApple OSS Distributions * Called by kevent with the NOTE_WL_THREAD_REQUEST knote lock held,
827*a1e26a70SApple OSS Distributions * every time a servicer is being told about a new max QoS.
828*a1e26a70SApple OSS Distributions */
829*a1e26a70SApple OSS Distributions void
workq_thread_set_max_qos(struct proc * p,workq_threadreq_t kqr)830*a1e26a70SApple OSS Distributions workq_thread_set_max_qos(struct proc *p, workq_threadreq_t kqr)
831*a1e26a70SApple OSS Distributions {
832*a1e26a70SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
833*a1e26a70SApple OSS Distributions struct uthread *uth = current_uthread();
834*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
835*a1e26a70SApple OSS Distributions thread_qos_t qos = kqr->tr_kq_qos_index;
836*a1e26a70SApple OSS Distributions
837*a1e26a70SApple OSS Distributions if (uth->uu_workq_pri.qos_max == qos) {
838*a1e26a70SApple OSS Distributions return;
839*a1e26a70SApple OSS Distributions }
840*a1e26a70SApple OSS Distributions
841*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
842*a1e26a70SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
843*a1e26a70SApple OSS Distributions new_pri.qos_max = qos;
844*a1e26a70SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
845*a1e26a70SApple OSS Distributions workq_unlock(wq);
846*a1e26a70SApple OSS Distributions }
847*a1e26a70SApple OSS Distributions
848*a1e26a70SApple OSS Distributions #pragma mark idle threads accounting and handling
849*a1e26a70SApple OSS Distributions
850*a1e26a70SApple OSS Distributions static inline struct uthread *
workq_oldest_killable_idle_thread(struct workqueue * wq)851*a1e26a70SApple OSS Distributions workq_oldest_killable_idle_thread(struct workqueue *wq)
852*a1e26a70SApple OSS Distributions {
853*a1e26a70SApple OSS Distributions struct uthread *uth = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
854*a1e26a70SApple OSS Distributions
855*a1e26a70SApple OSS Distributions if (uth && !uth->uu_save.uus_workq_park_data.has_stack) {
856*a1e26a70SApple OSS Distributions uth = TAILQ_PREV(uth, workq_uthread_head, uu_workq_entry);
857*a1e26a70SApple OSS Distributions if (uth) {
858*a1e26a70SApple OSS Distributions assert(uth->uu_save.uus_workq_park_data.has_stack);
859*a1e26a70SApple OSS Distributions }
860*a1e26a70SApple OSS Distributions }
861*a1e26a70SApple OSS Distributions return uth;
862*a1e26a70SApple OSS Distributions }
863*a1e26a70SApple OSS Distributions
864*a1e26a70SApple OSS Distributions static inline uint64_t
workq_kill_delay_for_idle_thread(struct workqueue * wq)865*a1e26a70SApple OSS Distributions workq_kill_delay_for_idle_thread(struct workqueue *wq)
866*a1e26a70SApple OSS Distributions {
867*a1e26a70SApple OSS Distributions uint64_t delay = wq_reduce_pool_window.abstime;
868*a1e26a70SApple OSS Distributions uint16_t idle = wq->wq_thidlecount;
869*a1e26a70SApple OSS Distributions
870*a1e26a70SApple OSS Distributions /*
871*a1e26a70SApple OSS Distributions * If we have less than wq_death_max_load threads, have a 5s timer.
872*a1e26a70SApple OSS Distributions *
873*a1e26a70SApple OSS Distributions * For the next wq_max_constrained_threads ones, decay linearly from
874*a1e26a70SApple OSS Distributions * from 5s to 50ms.
875*a1e26a70SApple OSS Distributions */
876*a1e26a70SApple OSS Distributions if (idle <= wq_death_max_load) {
877*a1e26a70SApple OSS Distributions return delay;
878*a1e26a70SApple OSS Distributions }
879*a1e26a70SApple OSS Distributions
880*a1e26a70SApple OSS Distributions if (wq_max_constrained_threads > idle - wq_death_max_load) {
881*a1e26a70SApple OSS Distributions delay *= (wq_max_constrained_threads - (idle - wq_death_max_load));
882*a1e26a70SApple OSS Distributions }
883*a1e26a70SApple OSS Distributions return delay / wq_max_constrained_threads;
884*a1e26a70SApple OSS Distributions }
885*a1e26a70SApple OSS Distributions
886*a1e26a70SApple OSS Distributions static inline bool
workq_should_kill_idle_thread(struct workqueue * wq,struct uthread * uth,uint64_t now)887*a1e26a70SApple OSS Distributions workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth,
888*a1e26a70SApple OSS Distributions uint64_t now)
889*a1e26a70SApple OSS Distributions {
890*a1e26a70SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
891*a1e26a70SApple OSS Distributions return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay;
892*a1e26a70SApple OSS Distributions }
893*a1e26a70SApple OSS Distributions
894*a1e26a70SApple OSS Distributions static void
workq_death_call_schedule(struct workqueue * wq,uint64_t deadline)895*a1e26a70SApple OSS Distributions workq_death_call_schedule(struct workqueue *wq, uint64_t deadline)
896*a1e26a70SApple OSS Distributions {
897*a1e26a70SApple OSS Distributions uint32_t wq_flags = os_atomic_load(&wq->wq_flags, relaxed);
898*a1e26a70SApple OSS Distributions
899*a1e26a70SApple OSS Distributions if (wq_flags & (WQ_EXITING | WQ_DEATH_CALL_SCHEDULED)) {
900*a1e26a70SApple OSS Distributions return;
901*a1e26a70SApple OSS Distributions }
902*a1e26a70SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
903*a1e26a70SApple OSS Distributions
904*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_NONE, wq, 1, 0, 0);
905*a1e26a70SApple OSS Distributions
906*a1e26a70SApple OSS Distributions /*
907*a1e26a70SApple OSS Distributions * <rdar://problem/13139182> Due to how long term timers work, the leeway
908*a1e26a70SApple OSS Distributions * can't be too short, so use 500ms which is long enough that we will not
909*a1e26a70SApple OSS Distributions * wake up the CPU for killing threads, but short enough that it doesn't
910*a1e26a70SApple OSS Distributions * fall into long-term timer list shenanigans.
911*a1e26a70SApple OSS Distributions */
912*a1e26a70SApple OSS Distributions thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline,
913*a1e26a70SApple OSS Distributions wq_reduce_pool_window.abstime / 10,
914*a1e26a70SApple OSS Distributions THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
915*a1e26a70SApple OSS Distributions }
916*a1e26a70SApple OSS Distributions
917*a1e26a70SApple OSS Distributions /*
918*a1e26a70SApple OSS Distributions * `decrement` is set to the number of threads that are no longer dying:
919*a1e26a70SApple OSS Distributions * - because they have been resuscitated just in time (workq_pop_idle_thread)
920*a1e26a70SApple OSS Distributions * - or have been killed (workq_thread_terminate).
921*a1e26a70SApple OSS Distributions */
922*a1e26a70SApple OSS Distributions static void
workq_death_policy_evaluate(struct workqueue * wq,uint16_t decrement)923*a1e26a70SApple OSS Distributions workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement)
924*a1e26a70SApple OSS Distributions {
925*a1e26a70SApple OSS Distributions struct uthread *uth;
926*a1e26a70SApple OSS Distributions
927*a1e26a70SApple OSS Distributions assert(wq->wq_thdying_count >= decrement);
928*a1e26a70SApple OSS Distributions if ((wq->wq_thdying_count -= decrement) > 0) {
929*a1e26a70SApple OSS Distributions return;
930*a1e26a70SApple OSS Distributions }
931*a1e26a70SApple OSS Distributions
932*a1e26a70SApple OSS Distributions if (wq->wq_thidlecount <= 1) {
933*a1e26a70SApple OSS Distributions return;
934*a1e26a70SApple OSS Distributions }
935*a1e26a70SApple OSS Distributions
936*a1e26a70SApple OSS Distributions if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) {
937*a1e26a70SApple OSS Distributions return;
938*a1e26a70SApple OSS Distributions }
939*a1e26a70SApple OSS Distributions
940*a1e26a70SApple OSS Distributions uint64_t now = mach_absolute_time();
941*a1e26a70SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
942*a1e26a70SApple OSS Distributions
943*a1e26a70SApple OSS Distributions if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) {
944*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
945*a1e26a70SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
946*a1e26a70SApple OSS Distributions wq->wq_thdying_count++;
947*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
948*a1e26a70SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
949*a1e26a70SApple OSS Distributions workq_thread_wakeup(uth);
950*a1e26a70SApple OSS Distributions }
951*a1e26a70SApple OSS Distributions return;
952*a1e26a70SApple OSS Distributions }
953*a1e26a70SApple OSS Distributions
954*a1e26a70SApple OSS Distributions workq_death_call_schedule(wq,
955*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp + delay);
956*a1e26a70SApple OSS Distributions }
957*a1e26a70SApple OSS Distributions
958*a1e26a70SApple OSS Distributions void
workq_thread_terminate(struct proc * p,struct uthread * uth)959*a1e26a70SApple OSS Distributions workq_thread_terminate(struct proc *p, struct uthread *uth)
960*a1e26a70SApple OSS Distributions {
961*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
962*a1e26a70SApple OSS Distributions
963*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
964*a1e26a70SApple OSS Distributions if (!workq_thread_is_permanently_bound(uth)) {
965*a1e26a70SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
966*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
967*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END,
968*a1e26a70SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
969*a1e26a70SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
970*a1e26a70SApple OSS Distributions }
971*a1e26a70SApple OSS Distributions }
972*a1e26a70SApple OSS Distributions if (wq->wq_nthreads-- == wq_max_threads) {
973*a1e26a70SApple OSS Distributions /*
974*a1e26a70SApple OSS Distributions * We got under the thread limit again, which may have prevented
975*a1e26a70SApple OSS Distributions * thread creation from happening, redrive if there are pending requests
976*a1e26a70SApple OSS Distributions */
977*a1e26a70SApple OSS Distributions if (wq->wq_reqcount) {
978*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
979*a1e26a70SApple OSS Distributions }
980*a1e26a70SApple OSS Distributions }
981*a1e26a70SApple OSS Distributions workq_unlock(wq);
982*a1e26a70SApple OSS Distributions
983*a1e26a70SApple OSS Distributions thread_deallocate(get_machthread(uth));
984*a1e26a70SApple OSS Distributions }
985*a1e26a70SApple OSS Distributions
986*a1e26a70SApple OSS Distributions static void
workq_kill_old_threads_call(void * param0,void * param1 __unused)987*a1e26a70SApple OSS Distributions workq_kill_old_threads_call(void *param0, void *param1 __unused)
988*a1e26a70SApple OSS Distributions {
989*a1e26a70SApple OSS Distributions struct workqueue *wq = param0;
990*a1e26a70SApple OSS Distributions
991*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
992*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_START, wq, 0, 0, 0);
993*a1e26a70SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
994*a1e26a70SApple OSS Distributions workq_death_policy_evaluate(wq, 0);
995*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_END, wq, 0, 0, 0);
996*a1e26a70SApple OSS Distributions workq_unlock(wq);
997*a1e26a70SApple OSS Distributions }
998*a1e26a70SApple OSS Distributions
999*a1e26a70SApple OSS Distributions static struct uthread *
workq_pop_idle_thread(struct workqueue * wq,uint16_t uu_flags,bool * needs_wakeup)1000*a1e26a70SApple OSS Distributions workq_pop_idle_thread(struct workqueue *wq, uint16_t uu_flags,
1001*a1e26a70SApple OSS Distributions bool *needs_wakeup)
1002*a1e26a70SApple OSS Distributions {
1003*a1e26a70SApple OSS Distributions struct uthread *uth;
1004*a1e26a70SApple OSS Distributions
1005*a1e26a70SApple OSS Distributions if ((uth = TAILQ_FIRST(&wq->wq_thidlelist))) {
1006*a1e26a70SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1007*a1e26a70SApple OSS Distributions } else {
1008*a1e26a70SApple OSS Distributions uth = TAILQ_FIRST(&wq->wq_thnewlist);
1009*a1e26a70SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1010*a1e26a70SApple OSS Distributions }
1011*a1e26a70SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1012*a1e26a70SApple OSS Distributions
1013*a1e26a70SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_RUNNING) == 0);
1014*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING | uu_flags;
1015*a1e26a70SApple OSS Distributions
1016*a1e26a70SApple OSS Distributions /* A thread is never woken up as part of the cooperative pool */
1017*a1e26a70SApple OSS Distributions assert((uu_flags & UT_WORKQ_COOPERATIVE) == 0);
1018*a1e26a70SApple OSS Distributions
1019*a1e26a70SApple OSS Distributions if ((uu_flags & UT_WORKQ_OVERCOMMIT) == 0) {
1020*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
1021*a1e26a70SApple OSS Distributions }
1022*a1e26a70SApple OSS Distributions wq->wq_threads_scheduled++;
1023*a1e26a70SApple OSS Distributions wq->wq_thidlecount--;
1024*a1e26a70SApple OSS Distributions
1025*a1e26a70SApple OSS Distributions if (__improbable(uth->uu_workq_flags & UT_WORKQ_DYING)) {
1026*a1e26a70SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_DYING;
1027*a1e26a70SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
1028*a1e26a70SApple OSS Distributions *needs_wakeup = false;
1029*a1e26a70SApple OSS Distributions } else if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
1030*a1e26a70SApple OSS Distributions *needs_wakeup = false;
1031*a1e26a70SApple OSS Distributions } else {
1032*a1e26a70SApple OSS Distributions *needs_wakeup = true;
1033*a1e26a70SApple OSS Distributions }
1034*a1e26a70SApple OSS Distributions return uth;
1035*a1e26a70SApple OSS Distributions }
1036*a1e26a70SApple OSS Distributions
1037*a1e26a70SApple OSS Distributions /*
1038*a1e26a70SApple OSS Distributions * Called by thread_create_workq_waiting() during thread initialization, before
1039*a1e26a70SApple OSS Distributions * assert_wait, before the thread has been started.
1040*a1e26a70SApple OSS Distributions */
1041*a1e26a70SApple OSS Distributions event_t
workq_thread_init_and_wq_lock(task_t task,thread_t th)1042*a1e26a70SApple OSS Distributions workq_thread_init_and_wq_lock(task_t task, thread_t th)
1043*a1e26a70SApple OSS Distributions {
1044*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1045*a1e26a70SApple OSS Distributions
1046*a1e26a70SApple OSS Distributions uth->uu_workq_flags = UT_WORKQ_NEW;
1047*a1e26a70SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(THREAD_QOS_LEGACY);
1048*a1e26a70SApple OSS Distributions uth->uu_workq_thport = MACH_PORT_NULL;
1049*a1e26a70SApple OSS Distributions uth->uu_workq_stackaddr = 0;
1050*a1e26a70SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = 0;
1051*a1e26a70SApple OSS Distributions
1052*a1e26a70SApple OSS Distributions thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
1053*a1e26a70SApple OSS Distributions thread_reset_workq_qos(th, THREAD_QOS_LEGACY);
1054*a1e26a70SApple OSS Distributions
1055*a1e26a70SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(get_bsdtask_info(task)));
1056*a1e26a70SApple OSS Distributions return workq_parked_wait_event(uth);
1057*a1e26a70SApple OSS Distributions }
1058*a1e26a70SApple OSS Distributions
1059*a1e26a70SApple OSS Distributions /**
1060*a1e26a70SApple OSS Distributions * Try to add a new workqueue thread.
1061*a1e26a70SApple OSS Distributions *
1062*a1e26a70SApple OSS Distributions * - called with workq lock held
1063*a1e26a70SApple OSS Distributions * - dropped and retaken around thread creation
1064*a1e26a70SApple OSS Distributions * - return with workq lock held
1065*a1e26a70SApple OSS Distributions */
1066*a1e26a70SApple OSS Distributions static kern_return_t
workq_add_new_idle_thread(proc_t p,struct workqueue * wq,thread_continue_t continuation,bool is_permanently_bound,thread_t * new_thread)1067*a1e26a70SApple OSS Distributions workq_add_new_idle_thread(
1068*a1e26a70SApple OSS Distributions proc_t p,
1069*a1e26a70SApple OSS Distributions struct workqueue *wq,
1070*a1e26a70SApple OSS Distributions thread_continue_t continuation,
1071*a1e26a70SApple OSS Distributions bool is_permanently_bound,
1072*a1e26a70SApple OSS Distributions thread_t *new_thread)
1073*a1e26a70SApple OSS Distributions {
1074*a1e26a70SApple OSS Distributions mach_vm_offset_t th_stackaddr;
1075*a1e26a70SApple OSS Distributions kern_return_t kret;
1076*a1e26a70SApple OSS Distributions thread_t th;
1077*a1e26a70SApple OSS Distributions
1078*a1e26a70SApple OSS Distributions wq->wq_nthreads++;
1079*a1e26a70SApple OSS Distributions
1080*a1e26a70SApple OSS Distributions workq_unlock(wq);
1081*a1e26a70SApple OSS Distributions
1082*a1e26a70SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1083*a1e26a70SApple OSS Distributions
1084*a1e26a70SApple OSS Distributions kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr);
1085*a1e26a70SApple OSS Distributions if (kret != KERN_SUCCESS) {
1086*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1087*a1e26a70SApple OSS Distributions kret, 1, 0);
1088*a1e26a70SApple OSS Distributions goto out;
1089*a1e26a70SApple OSS Distributions }
1090*a1e26a70SApple OSS Distributions
1091*a1e26a70SApple OSS Distributions kret = thread_create_workq_waiting(proc_task(p),
1092*a1e26a70SApple OSS Distributions continuation,
1093*a1e26a70SApple OSS Distributions &th,
1094*a1e26a70SApple OSS Distributions is_permanently_bound);
1095*a1e26a70SApple OSS Distributions if (kret != KERN_SUCCESS) {
1096*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1097*a1e26a70SApple OSS Distributions kret, 0, 0);
1098*a1e26a70SApple OSS Distributions pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr);
1099*a1e26a70SApple OSS Distributions goto out;
1100*a1e26a70SApple OSS Distributions }
1101*a1e26a70SApple OSS Distributions
1102*a1e26a70SApple OSS Distributions // thread_create_workq_waiting() will return with the wq lock held
1103*a1e26a70SApple OSS Distributions // on success, because it calls workq_thread_init_and_wq_lock() above
1104*a1e26a70SApple OSS Distributions
1105*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1106*a1e26a70SApple OSS Distributions uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
1107*a1e26a70SApple OSS Distributions
1108*a1e26a70SApple OSS Distributions wq->wq_creations++;
1109*a1e26a70SApple OSS Distributions if (!is_permanently_bound) {
1110*a1e26a70SApple OSS Distributions wq->wq_thidlecount++;
1111*a1e26a70SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1112*a1e26a70SApple OSS Distributions }
1113*a1e26a70SApple OSS Distributions
1114*a1e26a70SApple OSS Distributions if (new_thread) {
1115*a1e26a70SApple OSS Distributions *new_thread = th;
1116*a1e26a70SApple OSS Distributions }
1117*a1e26a70SApple OSS Distributions
1118*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0);
1119*a1e26a70SApple OSS Distributions return kret;
1120*a1e26a70SApple OSS Distributions
1121*a1e26a70SApple OSS Distributions out:
1122*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
1123*a1e26a70SApple OSS Distributions /*
1124*a1e26a70SApple OSS Distributions * Do not redrive here if we went under wq_max_threads again,
1125*a1e26a70SApple OSS Distributions * it is the responsibility of the callers of this function
1126*a1e26a70SApple OSS Distributions * to do so when it fails.
1127*a1e26a70SApple OSS Distributions */
1128*a1e26a70SApple OSS Distributions wq->wq_nthreads--;
1129*a1e26a70SApple OSS Distributions return kret;
1130*a1e26a70SApple OSS Distributions }
1131*a1e26a70SApple OSS Distributions
1132*a1e26a70SApple OSS Distributions static inline bool
workq_thread_is_overcommit(struct uthread * uth)1133*a1e26a70SApple OSS Distributions workq_thread_is_overcommit(struct uthread *uth)
1134*a1e26a70SApple OSS Distributions {
1135*a1e26a70SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0;
1136*a1e26a70SApple OSS Distributions }
1137*a1e26a70SApple OSS Distributions
1138*a1e26a70SApple OSS Distributions static inline bool
workq_thread_is_nonovercommit(struct uthread * uth)1139*a1e26a70SApple OSS Distributions workq_thread_is_nonovercommit(struct uthread *uth)
1140*a1e26a70SApple OSS Distributions {
1141*a1e26a70SApple OSS Distributions return (uth->uu_workq_flags & (UT_WORKQ_OVERCOMMIT |
1142*a1e26a70SApple OSS Distributions UT_WORKQ_COOPERATIVE)) == 0;
1143*a1e26a70SApple OSS Distributions }
1144*a1e26a70SApple OSS Distributions
1145*a1e26a70SApple OSS Distributions static inline bool
workq_thread_is_cooperative(struct uthread * uth)1146*a1e26a70SApple OSS Distributions workq_thread_is_cooperative(struct uthread *uth)
1147*a1e26a70SApple OSS Distributions {
1148*a1e26a70SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_COOPERATIVE) != 0;
1149*a1e26a70SApple OSS Distributions }
1150*a1e26a70SApple OSS Distributions
1151*a1e26a70SApple OSS Distributions bool
workq_thread_is_permanently_bound(struct uthread * uth)1152*a1e26a70SApple OSS Distributions workq_thread_is_permanently_bound(struct uthread *uth)
1153*a1e26a70SApple OSS Distributions {
1154*a1e26a70SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_PERMANENT_BIND) != 0;
1155*a1e26a70SApple OSS Distributions }
1156*a1e26a70SApple OSS Distributions
1157*a1e26a70SApple OSS Distributions static inline void
workq_thread_set_type(struct uthread * uth,uint16_t flags)1158*a1e26a70SApple OSS Distributions workq_thread_set_type(struct uthread *uth, uint16_t flags)
1159*a1e26a70SApple OSS Distributions {
1160*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1161*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= flags;
1162*a1e26a70SApple OSS Distributions }
1163*a1e26a70SApple OSS Distributions
1164*a1e26a70SApple OSS Distributions
1165*a1e26a70SApple OSS Distributions #define WORKQ_UNPARK_FOR_DEATH_WAS_IDLE 0x1
1166*a1e26a70SApple OSS Distributions
1167*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
1168*a1e26a70SApple OSS Distributions static void
workq_unpark_for_death_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t death_flags,uint32_t setup_flags)1169*a1e26a70SApple OSS Distributions workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq,
1170*a1e26a70SApple OSS Distributions struct uthread *uth, uint32_t death_flags, uint32_t setup_flags)
1171*a1e26a70SApple OSS Distributions {
1172*a1e26a70SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
1173*a1e26a70SApple OSS Distributions bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW;
1174*a1e26a70SApple OSS Distributions
1175*a1e26a70SApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
1176*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
1177*a1e26a70SApple OSS Distributions qos = WORKQ_THREAD_QOS_CLEANUP;
1178*a1e26a70SApple OSS Distributions }
1179*a1e26a70SApple OSS Distributions
1180*a1e26a70SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
1181*a1e26a70SApple OSS Distributions
1182*a1e26a70SApple OSS Distributions if (death_flags & WORKQ_UNPARK_FOR_DEATH_WAS_IDLE) {
1183*a1e26a70SApple OSS Distributions wq->wq_thidlecount--;
1184*a1e26a70SApple OSS Distributions if (first_use) {
1185*a1e26a70SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1186*a1e26a70SApple OSS Distributions } else {
1187*a1e26a70SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1188*a1e26a70SApple OSS Distributions }
1189*a1e26a70SApple OSS Distributions }
1190*a1e26a70SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1191*a1e26a70SApple OSS Distributions
1192*a1e26a70SApple OSS Distributions workq_unlock(wq);
1193*a1e26a70SApple OSS Distributions
1194*a1e26a70SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
1195*a1e26a70SApple OSS Distributions __assert_only kern_return_t kr;
1196*a1e26a70SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
1197*a1e26a70SApple OSS Distributions assert(kr == KERN_SUCCESS);
1198*a1e26a70SApple OSS Distributions }
1199*a1e26a70SApple OSS Distributions
1200*a1e26a70SApple OSS Distributions uint32_t flags = WQ_FLAG_THREAD_NEWSPI | qos | WQ_FLAG_THREAD_PRIO_QOS;
1201*a1e26a70SApple OSS Distributions thread_t th = get_machthread(uth);
1202*a1e26a70SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1203*a1e26a70SApple OSS Distributions
1204*a1e26a70SApple OSS Distributions if (!first_use) {
1205*a1e26a70SApple OSS Distributions flags |= WQ_FLAG_THREAD_REUSE;
1206*a1e26a70SApple OSS Distributions }
1207*a1e26a70SApple OSS Distributions
1208*a1e26a70SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
1209*a1e26a70SApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, flags);
1210*a1e26a70SApple OSS Distributions __builtin_unreachable();
1211*a1e26a70SApple OSS Distributions }
1212*a1e26a70SApple OSS Distributions
1213*a1e26a70SApple OSS Distributions bool
workq_is_current_thread_updating_turnstile(struct workqueue * wq)1214*a1e26a70SApple OSS Distributions workq_is_current_thread_updating_turnstile(struct workqueue *wq)
1215*a1e26a70SApple OSS Distributions {
1216*a1e26a70SApple OSS Distributions return wq->wq_turnstile_updater == current_thread();
1217*a1e26a70SApple OSS Distributions }
1218*a1e26a70SApple OSS Distributions
1219*a1e26a70SApple OSS Distributions __attribute__((always_inline))
1220*a1e26a70SApple OSS Distributions static inline void
1221*a1e26a70SApple OSS Distributions workq_perform_turnstile_operation_locked(struct workqueue *wq,
1222*a1e26a70SApple OSS Distributions void (^operation)(void))
1223*a1e26a70SApple OSS Distributions {
1224*a1e26a70SApple OSS Distributions workq_lock_held(wq);
1225*a1e26a70SApple OSS Distributions wq->wq_turnstile_updater = current_thread();
1226*a1e26a70SApple OSS Distributions operation();
1227*a1e26a70SApple OSS Distributions wq->wq_turnstile_updater = THREAD_NULL;
1228*a1e26a70SApple OSS Distributions }
1229*a1e26a70SApple OSS Distributions
1230*a1e26a70SApple OSS Distributions static void
workq_turnstile_update_inheritor(struct workqueue * wq,turnstile_inheritor_t inheritor,turnstile_update_flags_t flags)1231*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(struct workqueue *wq,
1232*a1e26a70SApple OSS Distributions turnstile_inheritor_t inheritor,
1233*a1e26a70SApple OSS Distributions turnstile_update_flags_t flags)
1234*a1e26a70SApple OSS Distributions {
1235*a1e26a70SApple OSS Distributions if (wq->wq_inheritor == inheritor) {
1236*a1e26a70SApple OSS Distributions return;
1237*a1e26a70SApple OSS Distributions }
1238*a1e26a70SApple OSS Distributions wq->wq_inheritor = inheritor;
1239*a1e26a70SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
1240*a1e26a70SApple OSS Distributions turnstile_update_inheritor(wq->wq_turnstile, inheritor,
1241*a1e26a70SApple OSS Distributions flags | TURNSTILE_IMMEDIATE_UPDATE);
1242*a1e26a70SApple OSS Distributions turnstile_update_inheritor_complete(wq->wq_turnstile,
1243*a1e26a70SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
1244*a1e26a70SApple OSS Distributions });
1245*a1e26a70SApple OSS Distributions }
1246*a1e26a70SApple OSS Distributions
1247*a1e26a70SApple OSS Distributions static void
workq_push_idle_thread(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)1248*a1e26a70SApple OSS Distributions workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth,
1249*a1e26a70SApple OSS Distributions uint32_t setup_flags)
1250*a1e26a70SApple OSS Distributions {
1251*a1e26a70SApple OSS Distributions uint64_t now = mach_absolute_time();
1252*a1e26a70SApple OSS Distributions bool is_creator = (uth == wq->wq_creator);
1253*a1e26a70SApple OSS Distributions
1254*a1e26a70SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
1255*a1e26a70SApple OSS Distributions assert(!is_creator);
1256*a1e26a70SApple OSS Distributions
1257*a1e26a70SApple OSS Distributions thread_qos_t thread_qos = uth->uu_workq_pri.qos_req;
1258*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, thread_qos);
1259*a1e26a70SApple OSS Distributions
1260*a1e26a70SApple OSS Distributions /* Before we get here, we always go through
1261*a1e26a70SApple OSS Distributions * workq_select_threadreq_or_park_and_unlock. If we got here, it means
1262*a1e26a70SApple OSS Distributions * that we went through the logic in workq_threadreq_select which
1263*a1e26a70SApple OSS Distributions * did the refresh for the next best cooperative qos while
1264*a1e26a70SApple OSS Distributions * excluding the current thread - we shouldn't need to do it again.
1265*a1e26a70SApple OSS Distributions */
1266*a1e26a70SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
1267*a1e26a70SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
1268*a1e26a70SApple OSS Distributions assert(!is_creator);
1269*a1e26a70SApple OSS Distributions
1270*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
1271*a1e26a70SApple OSS Distributions }
1272*a1e26a70SApple OSS Distributions
1273*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1274*a1e26a70SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
1275*a1e26a70SApple OSS Distributions wq->wq_threads_scheduled--;
1276*a1e26a70SApple OSS Distributions
1277*a1e26a70SApple OSS Distributions if (is_creator) {
1278*a1e26a70SApple OSS Distributions wq->wq_creator = NULL;
1279*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
1280*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
1281*a1e26a70SApple OSS Distributions }
1282*a1e26a70SApple OSS Distributions
1283*a1e26a70SApple OSS Distributions if (wq->wq_inheritor == get_machthread(uth)) {
1284*a1e26a70SApple OSS Distributions assert(wq->wq_creator == NULL);
1285*a1e26a70SApple OSS Distributions if (wq->wq_reqcount) {
1286*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
1287*a1e26a70SApple OSS Distributions } else {
1288*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
1289*a1e26a70SApple OSS Distributions }
1290*a1e26a70SApple OSS Distributions }
1291*a1e26a70SApple OSS Distributions
1292*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
1293*a1e26a70SApple OSS Distributions assert(is_creator || (_wq_flags(wq) & WQ_EXITING));
1294*a1e26a70SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1295*a1e26a70SApple OSS Distributions wq->wq_thidlecount++;
1296*a1e26a70SApple OSS Distributions return;
1297*a1e26a70SApple OSS Distributions }
1298*a1e26a70SApple OSS Distributions
1299*a1e26a70SApple OSS Distributions if (!is_creator) {
1300*a1e26a70SApple OSS Distributions _wq_thactive_dec(wq, uth->uu_workq_pri.qos_bucket);
1301*a1e26a70SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(uth->uu_workq_pri.qos_bucket)]--;
1302*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_IDLE_CLEANUP;
1303*a1e26a70SApple OSS Distributions }
1304*a1e26a70SApple OSS Distributions
1305*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp = now;
1306*a1e26a70SApple OSS Distributions
1307*a1e26a70SApple OSS Distributions struct uthread *oldest = workq_oldest_killable_idle_thread(wq);
1308*a1e26a70SApple OSS Distributions uint16_t cur_idle = wq->wq_thidlecount;
1309*a1e26a70SApple OSS Distributions
1310*a1e26a70SApple OSS Distributions if (cur_idle >= wq_max_constrained_threads ||
1311*a1e26a70SApple OSS Distributions (wq->wq_thdying_count == 0 && oldest &&
1312*a1e26a70SApple OSS Distributions workq_should_kill_idle_thread(wq, oldest, now))) {
1313*a1e26a70SApple OSS Distributions /*
1314*a1e26a70SApple OSS Distributions * Immediately kill threads if we have too may of them.
1315*a1e26a70SApple OSS Distributions *
1316*a1e26a70SApple OSS Distributions * And swap "place" with the oldest one we'd have woken up.
1317*a1e26a70SApple OSS Distributions * This is a relatively desperate situation where we really
1318*a1e26a70SApple OSS Distributions * need to kill threads quickly and it's best to kill
1319*a1e26a70SApple OSS Distributions * the one that's currently on core than context switching.
1320*a1e26a70SApple OSS Distributions */
1321*a1e26a70SApple OSS Distributions if (oldest) {
1322*a1e26a70SApple OSS Distributions oldest->uu_save.uus_workq_park_data.idle_stamp = now;
1323*a1e26a70SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, oldest, uu_workq_entry);
1324*a1e26a70SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, oldest, uu_workq_entry);
1325*a1e26a70SApple OSS Distributions }
1326*a1e26a70SApple OSS Distributions
1327*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
1328*a1e26a70SApple OSS Distributions wq, cur_idle, 0, 0);
1329*a1e26a70SApple OSS Distributions wq->wq_thdying_count++;
1330*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
1331*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
1332*a1e26a70SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth, 0, setup_flags);
1333*a1e26a70SApple OSS Distributions __builtin_unreachable();
1334*a1e26a70SApple OSS Distributions }
1335*a1e26a70SApple OSS Distributions
1336*a1e26a70SApple OSS Distributions struct uthread *tail = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
1337*a1e26a70SApple OSS Distributions
1338*a1e26a70SApple OSS Distributions cur_idle += 1;
1339*a1e26a70SApple OSS Distributions wq->wq_thidlecount = cur_idle;
1340*a1e26a70SApple OSS Distributions
1341*a1e26a70SApple OSS Distributions if (cur_idle >= wq_death_max_load && tail &&
1342*a1e26a70SApple OSS Distributions tail->uu_save.uus_workq_park_data.has_stack) {
1343*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = false;
1344*a1e26a70SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry);
1345*a1e26a70SApple OSS Distributions } else {
1346*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = true;
1347*a1e26a70SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, uth, uu_workq_entry);
1348*a1e26a70SApple OSS Distributions }
1349*a1e26a70SApple OSS Distributions
1350*a1e26a70SApple OSS Distributions if (!tail) {
1351*a1e26a70SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
1352*a1e26a70SApple OSS Distributions workq_death_call_schedule(wq, now + delay);
1353*a1e26a70SApple OSS Distributions }
1354*a1e26a70SApple OSS Distributions }
1355*a1e26a70SApple OSS Distributions
1356*a1e26a70SApple OSS Distributions #pragma mark thread requests
1357*a1e26a70SApple OSS Distributions
1358*a1e26a70SApple OSS Distributions static inline bool
workq_tr_is_overcommit(workq_tr_flags_t tr_flags)1359*a1e26a70SApple OSS Distributions workq_tr_is_overcommit(workq_tr_flags_t tr_flags)
1360*a1e26a70SApple OSS Distributions {
1361*a1e26a70SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) != 0;
1362*a1e26a70SApple OSS Distributions }
1363*a1e26a70SApple OSS Distributions
1364*a1e26a70SApple OSS Distributions static inline bool
workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)1365*a1e26a70SApple OSS Distributions workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)
1366*a1e26a70SApple OSS Distributions {
1367*a1e26a70SApple OSS Distributions return (tr_flags & (WORKQ_TR_FLAG_OVERCOMMIT |
1368*a1e26a70SApple OSS Distributions WORKQ_TR_FLAG_COOPERATIVE |
1369*a1e26a70SApple OSS Distributions WORKQ_TR_FLAG_PERMANENT_BIND)) == 0;
1370*a1e26a70SApple OSS Distributions }
1371*a1e26a70SApple OSS Distributions
1372*a1e26a70SApple OSS Distributions static inline bool
workq_tr_is_cooperative(workq_tr_flags_t tr_flags)1373*a1e26a70SApple OSS Distributions workq_tr_is_cooperative(workq_tr_flags_t tr_flags)
1374*a1e26a70SApple OSS Distributions {
1375*a1e26a70SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_COOPERATIVE) != 0;
1376*a1e26a70SApple OSS Distributions }
1377*a1e26a70SApple OSS Distributions
1378*a1e26a70SApple OSS Distributions #define workq_threadreq_is_overcommit(req) workq_tr_is_overcommit((req)->tr_flags)
1379*a1e26a70SApple OSS Distributions #define workq_threadreq_is_nonovercommit(req) workq_tr_is_nonovercommit((req)->tr_flags)
1380*a1e26a70SApple OSS Distributions #define workq_threadreq_is_cooperative(req) workq_tr_is_cooperative((req)->tr_flags)
1381*a1e26a70SApple OSS Distributions
1382*a1e26a70SApple OSS Distributions static inline int
workq_priority_for_req(workq_threadreq_t req)1383*a1e26a70SApple OSS Distributions workq_priority_for_req(workq_threadreq_t req)
1384*a1e26a70SApple OSS Distributions {
1385*a1e26a70SApple OSS Distributions thread_qos_t qos = req->tr_qos;
1386*a1e26a70SApple OSS Distributions
1387*a1e26a70SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1388*a1e26a70SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
1389*a1e26a70SApple OSS Distributions assert(trp.trp_flags & TRP_PRIORITY);
1390*a1e26a70SApple OSS Distributions return trp.trp_pri;
1391*a1e26a70SApple OSS Distributions }
1392*a1e26a70SApple OSS Distributions return thread_workq_pri_for_qos(qos);
1393*a1e26a70SApple OSS Distributions }
1394*a1e26a70SApple OSS Distributions
1395*a1e26a70SApple OSS Distributions static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue * wq,workq_threadreq_t req)1396*a1e26a70SApple OSS Distributions workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
1397*a1e26a70SApple OSS Distributions {
1398*a1e26a70SApple OSS Distributions assert(!workq_tr_is_cooperative(req->tr_flags));
1399*a1e26a70SApple OSS Distributions
1400*a1e26a70SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1401*a1e26a70SApple OSS Distributions return &wq->wq_special_queue;
1402*a1e26a70SApple OSS Distributions } else if (workq_tr_is_overcommit(req->tr_flags)) {
1403*a1e26a70SApple OSS Distributions return &wq->wq_overcommit_queue;
1404*a1e26a70SApple OSS Distributions } else {
1405*a1e26a70SApple OSS Distributions return &wq->wq_constrained_queue;
1406*a1e26a70SApple OSS Distributions }
1407*a1e26a70SApple OSS Distributions }
1408*a1e26a70SApple OSS Distributions
1409*a1e26a70SApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1410*a1e26a70SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos_internal(struct workqueue * wq,thread_qos_t qos)1411*a1e26a70SApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos_internal(struct workqueue *wq, thread_qos_t qos)
1412*a1e26a70SApple OSS Distributions {
1413*a1e26a70SApple OSS Distributions uint64_t num_cooperative_threads = 0;
1414*a1e26a70SApple OSS Distributions
1415*a1e26a70SApple OSS Distributions for (thread_qos_t cur_qos = WORKQ_THREAD_QOS_MAX; cur_qos >= qos; cur_qos--) {
1416*a1e26a70SApple OSS Distributions uint8_t bucket = _wq_bucket(cur_qos);
1417*a1e26a70SApple OSS Distributions num_cooperative_threads += wq->wq_cooperative_queue_scheduled_count[bucket];
1418*a1e26a70SApple OSS Distributions }
1419*a1e26a70SApple OSS Distributions
1420*a1e26a70SApple OSS Distributions return num_cooperative_threads;
1421*a1e26a70SApple OSS Distributions }
1422*a1e26a70SApple OSS Distributions
1423*a1e26a70SApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1424*a1e26a70SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos_locked(struct workqueue * wq,thread_qos_t qos)1425*a1e26a70SApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos_locked(struct workqueue *wq, thread_qos_t qos)
1426*a1e26a70SApple OSS Distributions {
1427*a1e26a70SApple OSS Distributions workq_lock_held(wq);
1428*a1e26a70SApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos_internal(wq, qos);
1429*a1e26a70SApple OSS Distributions }
1430*a1e26a70SApple OSS Distributions
1431*a1e26a70SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_total(struct workqueue * wq)1432*a1e26a70SApple OSS Distributions workq_num_cooperative_threads_scheduled_total(struct workqueue *wq)
1433*a1e26a70SApple OSS Distributions {
1434*a1e26a70SApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos_locked(wq, WORKQ_THREAD_QOS_MIN);
1435*a1e26a70SApple OSS Distributions }
1436*a1e26a70SApple OSS Distributions
1437*a1e26a70SApple OSS Distributions static bool
workq_has_cooperative_thread_requests(struct workqueue * wq)1438*a1e26a70SApple OSS Distributions workq_has_cooperative_thread_requests(struct workqueue *wq)
1439*a1e26a70SApple OSS Distributions {
1440*a1e26a70SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1441*a1e26a70SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1442*a1e26a70SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1443*a1e26a70SApple OSS Distributions return true;
1444*a1e26a70SApple OSS Distributions }
1445*a1e26a70SApple OSS Distributions }
1446*a1e26a70SApple OSS Distributions
1447*a1e26a70SApple OSS Distributions return false;
1448*a1e26a70SApple OSS Distributions }
1449*a1e26a70SApple OSS Distributions
1450*a1e26a70SApple OSS Distributions /*
1451*a1e26a70SApple OSS Distributions * Determines the next QoS bucket we should service next in the cooperative
1452*a1e26a70SApple OSS Distributions * pool. This function will always return a QoS for cooperative pool as long as
1453*a1e26a70SApple OSS Distributions * there are requests to be serviced.
1454*a1e26a70SApple OSS Distributions *
1455*a1e26a70SApple OSS Distributions * Unlike the other thread pools, for the cooperative thread pool the schedule
1456*a1e26a70SApple OSS Distributions * counts for the various buckets in the pool affect the next best request for
1457*a1e26a70SApple OSS Distributions * it.
1458*a1e26a70SApple OSS Distributions *
1459*a1e26a70SApple OSS Distributions * This function is called in the following contexts:
1460*a1e26a70SApple OSS Distributions *
1461*a1e26a70SApple OSS Distributions * a) When determining the best thread QoS for cooperative bucket for the
1462*a1e26a70SApple OSS Distributions * creator/thread reuse
1463*a1e26a70SApple OSS Distributions *
1464*a1e26a70SApple OSS Distributions * b) Once (a) has happened and thread has bound to a thread request, figuring
1465*a1e26a70SApple OSS Distributions * out whether the next best request for this pool has changed so that creator
1466*a1e26a70SApple OSS Distributions * can be scheduled.
1467*a1e26a70SApple OSS Distributions *
1468*a1e26a70SApple OSS Distributions * Returns true if the cooperative queue's best qos changed from previous
1469*a1e26a70SApple OSS Distributions * value.
1470*a1e26a70SApple OSS Distributions */
1471*a1e26a70SApple OSS Distributions static bool
_wq_cooperative_queue_refresh_best_req_qos(struct workqueue * wq)1472*a1e26a70SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq)
1473*a1e26a70SApple OSS Distributions {
1474*a1e26a70SApple OSS Distributions workq_lock_held(wq);
1475*a1e26a70SApple OSS Distributions
1476*a1e26a70SApple OSS Distributions thread_qos_t old_best_req_qos = wq->wq_cooperative_queue_best_req_qos;
1477*a1e26a70SApple OSS Distributions
1478*a1e26a70SApple OSS Distributions /* We determine the next best cooperative thread request based on the
1479*a1e26a70SApple OSS Distributions * following:
1480*a1e26a70SApple OSS Distributions *
1481*a1e26a70SApple OSS Distributions * 1. Take the MAX of the following:
1482*a1e26a70SApple OSS Distributions * a) Highest qos with pending TRs such that number of scheduled
1483*a1e26a70SApple OSS Distributions * threads so far with >= qos is < wq_max_cooperative_threads
1484*a1e26a70SApple OSS Distributions * b) Highest qos bucket with pending TRs but no scheduled threads for that bucket
1485*a1e26a70SApple OSS Distributions *
1486*a1e26a70SApple OSS Distributions * 2. If the result of (1) is UN, then we pick the highest priority amongst
1487*a1e26a70SApple OSS Distributions * pending thread requests in the pool.
1488*a1e26a70SApple OSS Distributions *
1489*a1e26a70SApple OSS Distributions */
1490*a1e26a70SApple OSS Distributions thread_qos_t highest_qos_with_no_scheduled = THREAD_QOS_UNSPECIFIED;
1491*a1e26a70SApple OSS Distributions thread_qos_t highest_qos_req_with_width = THREAD_QOS_UNSPECIFIED;
1492*a1e26a70SApple OSS Distributions
1493*a1e26a70SApple OSS Distributions thread_qos_t highest_qos_req = THREAD_QOS_UNSPECIFIED;
1494*a1e26a70SApple OSS Distributions
1495*a1e26a70SApple OSS Distributions int scheduled_count_till_qos = 0;
1496*a1e26a70SApple OSS Distributions
1497*a1e26a70SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1498*a1e26a70SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1499*a1e26a70SApple OSS Distributions uint8_t scheduled_count_for_bucket = wq->wq_cooperative_queue_scheduled_count[bucket];
1500*a1e26a70SApple OSS Distributions scheduled_count_till_qos += scheduled_count_for_bucket;
1501*a1e26a70SApple OSS Distributions
1502*a1e26a70SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1503*a1e26a70SApple OSS Distributions if (qos > highest_qos_req) {
1504*a1e26a70SApple OSS Distributions highest_qos_req = qos;
1505*a1e26a70SApple OSS Distributions }
1506*a1e26a70SApple OSS Distributions /*
1507*a1e26a70SApple OSS Distributions * The pool isn't saturated for threads at and above this QoS, and
1508*a1e26a70SApple OSS Distributions * this qos bucket has pending requests
1509*a1e26a70SApple OSS Distributions */
1510*a1e26a70SApple OSS Distributions if (scheduled_count_till_qos < wq_cooperative_queue_max_size(wq)) {
1511*a1e26a70SApple OSS Distributions if (qos > highest_qos_req_with_width) {
1512*a1e26a70SApple OSS Distributions highest_qos_req_with_width = qos;
1513*a1e26a70SApple OSS Distributions }
1514*a1e26a70SApple OSS Distributions }
1515*a1e26a70SApple OSS Distributions
1516*a1e26a70SApple OSS Distributions /*
1517*a1e26a70SApple OSS Distributions * There are no threads scheduled for this bucket but there
1518*a1e26a70SApple OSS Distributions * is work pending, give it at least 1 thread
1519*a1e26a70SApple OSS Distributions */
1520*a1e26a70SApple OSS Distributions if (scheduled_count_for_bucket == 0) {
1521*a1e26a70SApple OSS Distributions if (qos > highest_qos_with_no_scheduled) {
1522*a1e26a70SApple OSS Distributions highest_qos_with_no_scheduled = qos;
1523*a1e26a70SApple OSS Distributions }
1524*a1e26a70SApple OSS Distributions }
1525*a1e26a70SApple OSS Distributions }
1526*a1e26a70SApple OSS Distributions }
1527*a1e26a70SApple OSS Distributions
1528*a1e26a70SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = MAX(highest_qos_with_no_scheduled, highest_qos_req_with_width);
1529*a1e26a70SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1530*a1e26a70SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = highest_qos_req;
1531*a1e26a70SApple OSS Distributions }
1532*a1e26a70SApple OSS Distributions
1533*a1e26a70SApple OSS Distributions #if MACH_ASSERT
1534*a1e26a70SApple OSS Distributions /* Assert that if we are showing up the next best req as UN, then there
1535*a1e26a70SApple OSS Distributions * actually is no thread request in the cooperative pool buckets */
1536*a1e26a70SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1537*a1e26a70SApple OSS Distributions assert(!workq_has_cooperative_thread_requests(wq));
1538*a1e26a70SApple OSS Distributions }
1539*a1e26a70SApple OSS Distributions #endif
1540*a1e26a70SApple OSS Distributions
1541*a1e26a70SApple OSS Distributions return old_best_req_qos != wq->wq_cooperative_queue_best_req_qos;
1542*a1e26a70SApple OSS Distributions }
1543*a1e26a70SApple OSS Distributions
1544*a1e26a70SApple OSS Distributions /*
1545*a1e26a70SApple OSS Distributions * Returns whether or not the input thread (or creator thread if uth is NULL)
1546*a1e26a70SApple OSS Distributions * should be allowed to work as part of the cooperative pool for the <input qos>
1547*a1e26a70SApple OSS Distributions * bucket.
1548*a1e26a70SApple OSS Distributions *
1549*a1e26a70SApple OSS Distributions * This function is called in a bunch of places:
1550*a1e26a70SApple OSS Distributions * a) Quantum expires for a thread and it is part of the cooperative pool
1551*a1e26a70SApple OSS Distributions * b) When trying to pick a thread request for the creator thread to
1552*a1e26a70SApple OSS Distributions * represent.
1553*a1e26a70SApple OSS Distributions * c) When a thread is trying to pick a thread request to actually bind to
1554*a1e26a70SApple OSS Distributions * and service.
1555*a1e26a70SApple OSS Distributions *
1556*a1e26a70SApple OSS Distributions * Called with workq lock held.
1557*a1e26a70SApple OSS Distributions */
1558*a1e26a70SApple OSS Distributions
1559*a1e26a70SApple OSS Distributions #define WQ_COOPERATIVE_POOL_UNSATURATED 1
1560*a1e26a70SApple OSS Distributions #define WQ_COOPERATIVE_BUCKET_UNSERVICED 2
1561*a1e26a70SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS 3
1562*a1e26a70SApple OSS Distributions
1563*a1e26a70SApple OSS Distributions static bool
workq_cooperative_allowance(struct workqueue * wq,thread_qos_t qos,struct uthread * uth,bool may_start_timer)1564*a1e26a70SApple OSS Distributions workq_cooperative_allowance(struct workqueue *wq, thread_qos_t qos, struct uthread *uth,
1565*a1e26a70SApple OSS Distributions bool may_start_timer)
1566*a1e26a70SApple OSS Distributions {
1567*a1e26a70SApple OSS Distributions workq_lock_held(wq);
1568*a1e26a70SApple OSS Distributions
1569*a1e26a70SApple OSS Distributions bool exclude_thread_as_scheduled = false;
1570*a1e26a70SApple OSS Distributions bool passed_admissions = false;
1571*a1e26a70SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1572*a1e26a70SApple OSS Distributions
1573*a1e26a70SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
1574*a1e26a70SApple OSS Distributions exclude_thread_as_scheduled = true;
1575*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
1576*a1e26a70SApple OSS Distributions }
1577*a1e26a70SApple OSS Distributions
1578*a1e26a70SApple OSS Distributions /*
1579*a1e26a70SApple OSS Distributions * We have not saturated the pool yet, let this thread continue
1580*a1e26a70SApple OSS Distributions */
1581*a1e26a70SApple OSS Distributions uint64_t total_cooperative_threads;
1582*a1e26a70SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
1583*a1e26a70SApple OSS Distributions if (total_cooperative_threads < wq_cooperative_queue_max_size(wq)) {
1584*a1e26a70SApple OSS Distributions passed_admissions = true;
1585*a1e26a70SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1586*a1e26a70SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1587*a1e26a70SApple OSS Distributions WQ_COOPERATIVE_POOL_UNSATURATED);
1588*a1e26a70SApple OSS Distributions goto out;
1589*a1e26a70SApple OSS Distributions }
1590*a1e26a70SApple OSS Distributions
1591*a1e26a70SApple OSS Distributions /*
1592*a1e26a70SApple OSS Distributions * Without this thread, nothing is servicing the bucket which has pending
1593*a1e26a70SApple OSS Distributions * work
1594*a1e26a70SApple OSS Distributions */
1595*a1e26a70SApple OSS Distributions uint64_t bucket_scheduled = wq->wq_cooperative_queue_scheduled_count[bucket];
1596*a1e26a70SApple OSS Distributions if (bucket_scheduled == 0 &&
1597*a1e26a70SApple OSS Distributions !STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1598*a1e26a70SApple OSS Distributions passed_admissions = true;
1599*a1e26a70SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1600*a1e26a70SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1601*a1e26a70SApple OSS Distributions WQ_COOPERATIVE_BUCKET_UNSERVICED);
1602*a1e26a70SApple OSS Distributions goto out;
1603*a1e26a70SApple OSS Distributions }
1604*a1e26a70SApple OSS Distributions
1605*a1e26a70SApple OSS Distributions /*
1606*a1e26a70SApple OSS Distributions * If number of threads at the QoS bucket >= input QoS exceeds the max we want
1607*a1e26a70SApple OSS Distributions * for the pool, deny this thread
1608*a1e26a70SApple OSS Distributions */
1609*a1e26a70SApple OSS Distributions uint64_t aggregate_down_to_qos = workq_num_cooperative_threads_scheduled_to_qos_locked(wq, qos);
1610*a1e26a70SApple OSS Distributions passed_admissions = (aggregate_down_to_qos < wq_cooperative_queue_max_size(wq));
1611*a1e26a70SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE, aggregate_down_to_qos,
1612*a1e26a70SApple OSS Distributions qos, passed_admissions, WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS);
1613*a1e26a70SApple OSS Distributions
1614*a1e26a70SApple OSS Distributions if (!passed_admissions && may_start_timer) {
1615*a1e26a70SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
1616*a1e26a70SApple OSS Distributions }
1617*a1e26a70SApple OSS Distributions
1618*a1e26a70SApple OSS Distributions out:
1619*a1e26a70SApple OSS Distributions if (exclude_thread_as_scheduled) {
1620*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
1621*a1e26a70SApple OSS Distributions }
1622*a1e26a70SApple OSS Distributions return passed_admissions;
1623*a1e26a70SApple OSS Distributions }
1624*a1e26a70SApple OSS Distributions
1625*a1e26a70SApple OSS Distributions /*
1626*a1e26a70SApple OSS Distributions * returns true if the best request for the pool changed as a result of
1627*a1e26a70SApple OSS Distributions * enqueuing this thread request.
1628*a1e26a70SApple OSS Distributions */
1629*a1e26a70SApple OSS Distributions static bool
workq_threadreq_enqueue(struct workqueue * wq,workq_threadreq_t req)1630*a1e26a70SApple OSS Distributions workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req)
1631*a1e26a70SApple OSS Distributions {
1632*a1e26a70SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_NEW);
1633*a1e26a70SApple OSS Distributions
1634*a1e26a70SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_QUEUED;
1635*a1e26a70SApple OSS Distributions wq->wq_reqcount += req->tr_count;
1636*a1e26a70SApple OSS Distributions
1637*a1e26a70SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1638*a1e26a70SApple OSS Distributions assert(wq->wq_event_manager_threadreq == NULL);
1639*a1e26a70SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_KEVENT);
1640*a1e26a70SApple OSS Distributions assert(req->tr_count == 1);
1641*a1e26a70SApple OSS Distributions wq->wq_event_manager_threadreq = req;
1642*a1e26a70SApple OSS Distributions return true;
1643*a1e26a70SApple OSS Distributions }
1644*a1e26a70SApple OSS Distributions
1645*a1e26a70SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1646*a1e26a70SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1647*a1e26a70SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1648*a1e26a70SApple OSS Distributions
1649*a1e26a70SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1650*a1e26a70SApple OSS Distributions STAILQ_INSERT_TAIL(bucket, req, tr_link);
1651*a1e26a70SApple OSS Distributions
1652*a1e26a70SApple OSS Distributions return _wq_cooperative_queue_refresh_best_req_qos(wq);
1653*a1e26a70SApple OSS Distributions }
1654*a1e26a70SApple OSS Distributions
1655*a1e26a70SApple OSS Distributions struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
1656*a1e26a70SApple OSS Distributions
1657*a1e26a70SApple OSS Distributions priority_queue_entry_set_sched_pri(q, &req->tr_entry,
1658*a1e26a70SApple OSS Distributions workq_priority_for_req(req), false);
1659*a1e26a70SApple OSS Distributions
1660*a1e26a70SApple OSS Distributions if (priority_queue_insert(q, &req->tr_entry)) {
1661*a1e26a70SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1662*a1e26a70SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1663*a1e26a70SApple OSS Distributions }
1664*a1e26a70SApple OSS Distributions return true;
1665*a1e26a70SApple OSS Distributions }
1666*a1e26a70SApple OSS Distributions return false;
1667*a1e26a70SApple OSS Distributions }
1668*a1e26a70SApple OSS Distributions
1669*a1e26a70SApple OSS Distributions /*
1670*a1e26a70SApple OSS Distributions * returns true if one of the following is true (so as to update creator if
1671*a1e26a70SApple OSS Distributions * needed):
1672*a1e26a70SApple OSS Distributions *
1673*a1e26a70SApple OSS Distributions * (a) the next highest request of the pool we dequeued the request from changed
1674*a1e26a70SApple OSS Distributions * (b) the next highest requests of the pool the current thread used to be a
1675*a1e26a70SApple OSS Distributions * part of, changed
1676*a1e26a70SApple OSS Distributions *
1677*a1e26a70SApple OSS Distributions * For overcommit, special and constrained pools, the next highest QoS for each
1678*a1e26a70SApple OSS Distributions * pool just a MAX of pending requests so tracking (a) is sufficient.
1679*a1e26a70SApple OSS Distributions *
1680*a1e26a70SApple OSS Distributions * But for cooperative thread pool, the next highest QoS for the pool depends on
1681*a1e26a70SApple OSS Distributions * schedule counts in the pool as well. So if the current thread used to be
1682*a1e26a70SApple OSS Distributions * cooperative in it's previous logical run ie (b), then that can also affect
1683*a1e26a70SApple OSS Distributions * cooperative pool's next best QoS requests.
1684*a1e26a70SApple OSS Distributions */
1685*a1e26a70SApple OSS Distributions static bool
workq_threadreq_dequeue(struct workqueue * wq,workq_threadreq_t req,bool cooperative_sched_count_changed)1686*a1e26a70SApple OSS Distributions workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req,
1687*a1e26a70SApple OSS Distributions bool cooperative_sched_count_changed)
1688*a1e26a70SApple OSS Distributions {
1689*a1e26a70SApple OSS Distributions wq->wq_reqcount--;
1690*a1e26a70SApple OSS Distributions
1691*a1e26a70SApple OSS Distributions bool next_highest_request_changed = false;
1692*a1e26a70SApple OSS Distributions
1693*a1e26a70SApple OSS Distributions if (--req->tr_count == 0) {
1694*a1e26a70SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1695*a1e26a70SApple OSS Distributions assert(wq->wq_event_manager_threadreq == req);
1696*a1e26a70SApple OSS Distributions assert(req->tr_count == 0);
1697*a1e26a70SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
1698*a1e26a70SApple OSS Distributions
1699*a1e26a70SApple OSS Distributions /* If a cooperative thread was the one which picked up the manager
1700*a1e26a70SApple OSS Distributions * thread request, we need to reevaluate the cooperative pool
1701*a1e26a70SApple OSS Distributions * anyways.
1702*a1e26a70SApple OSS Distributions */
1703*a1e26a70SApple OSS Distributions if (cooperative_sched_count_changed) {
1704*a1e26a70SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
1705*a1e26a70SApple OSS Distributions }
1706*a1e26a70SApple OSS Distributions return true;
1707*a1e26a70SApple OSS Distributions }
1708*a1e26a70SApple OSS Distributions
1709*a1e26a70SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1710*a1e26a70SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1711*a1e26a70SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1712*a1e26a70SApple OSS Distributions /* Account for the fact that BG and MT are coalesced when
1713*a1e26a70SApple OSS Distributions * calculating best request for cooperative pool
1714*a1e26a70SApple OSS Distributions */
1715*a1e26a70SApple OSS Distributions assert(_wq_bucket(req->tr_qos) == _wq_bucket(wq->wq_cooperative_queue_best_req_qos));
1716*a1e26a70SApple OSS Distributions
1717*a1e26a70SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1718*a1e26a70SApple OSS Distributions __assert_only workq_threadreq_t head = STAILQ_FIRST(bucket);
1719*a1e26a70SApple OSS Distributions
1720*a1e26a70SApple OSS Distributions assert(head == req);
1721*a1e26a70SApple OSS Distributions STAILQ_REMOVE_HEAD(bucket, tr_link);
1722*a1e26a70SApple OSS Distributions
1723*a1e26a70SApple OSS Distributions /*
1724*a1e26a70SApple OSS Distributions * If the request we're dequeueing is cooperative, then the sched
1725*a1e26a70SApple OSS Distributions * counts definitely changed.
1726*a1e26a70SApple OSS Distributions */
1727*a1e26a70SApple OSS Distributions assert(cooperative_sched_count_changed);
1728*a1e26a70SApple OSS Distributions }
1729*a1e26a70SApple OSS Distributions
1730*a1e26a70SApple OSS Distributions /*
1731*a1e26a70SApple OSS Distributions * We want to do the cooperative pool refresh after dequeueing a
1732*a1e26a70SApple OSS Distributions * cooperative thread request if any (to combine both effects into 1
1733*a1e26a70SApple OSS Distributions * refresh operation)
1734*a1e26a70SApple OSS Distributions */
1735*a1e26a70SApple OSS Distributions if (cooperative_sched_count_changed) {
1736*a1e26a70SApple OSS Distributions next_highest_request_changed = _wq_cooperative_queue_refresh_best_req_qos(wq);
1737*a1e26a70SApple OSS Distributions }
1738*a1e26a70SApple OSS Distributions
1739*a1e26a70SApple OSS Distributions if (!workq_threadreq_is_cooperative(req)) {
1740*a1e26a70SApple OSS Distributions /*
1741*a1e26a70SApple OSS Distributions * All other types of requests are enqueued in priority queues
1742*a1e26a70SApple OSS Distributions */
1743*a1e26a70SApple OSS Distributions
1744*a1e26a70SApple OSS Distributions if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
1745*a1e26a70SApple OSS Distributions &req->tr_entry)) {
1746*a1e26a70SApple OSS Distributions next_highest_request_changed |= true;
1747*a1e26a70SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1748*a1e26a70SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1749*a1e26a70SApple OSS Distributions }
1750*a1e26a70SApple OSS Distributions }
1751*a1e26a70SApple OSS Distributions }
1752*a1e26a70SApple OSS Distributions }
1753*a1e26a70SApple OSS Distributions
1754*a1e26a70SApple OSS Distributions return next_highest_request_changed;
1755*a1e26a70SApple OSS Distributions }
1756*a1e26a70SApple OSS Distributions
1757*a1e26a70SApple OSS Distributions static void
workq_threadreq_destroy(proc_t p,workq_threadreq_t req)1758*a1e26a70SApple OSS Distributions workq_threadreq_destroy(proc_t p, workq_threadreq_t req)
1759*a1e26a70SApple OSS Distributions {
1760*a1e26a70SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_CANCELED;
1761*a1e26a70SApple OSS Distributions if (req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT)) {
1762*a1e26a70SApple OSS Distributions kqueue_threadreq_cancel(p, req);
1763*a1e26a70SApple OSS Distributions } else {
1764*a1e26a70SApple OSS Distributions zfree(workq_zone_threadreq, req);
1765*a1e26a70SApple OSS Distributions }
1766*a1e26a70SApple OSS Distributions }
1767*a1e26a70SApple OSS Distributions
1768*a1e26a70SApple OSS Distributions #pragma mark workqueue thread creation thread calls
1769*a1e26a70SApple OSS Distributions
1770*a1e26a70SApple OSS Distributions static inline bool
workq_thread_call_prepost(struct workqueue * wq,uint32_t sched,uint32_t pend,uint32_t fail_mask)1771*a1e26a70SApple OSS Distributions workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend,
1772*a1e26a70SApple OSS Distributions uint32_t fail_mask)
1773*a1e26a70SApple OSS Distributions {
1774*a1e26a70SApple OSS Distributions uint32_t old_flags, new_flags;
1775*a1e26a70SApple OSS Distributions
1776*a1e26a70SApple OSS Distributions os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, {
1777*a1e26a70SApple OSS Distributions if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) {
1778*a1e26a70SApple OSS Distributions os_atomic_rmw_loop_give_up(return false);
1779*a1e26a70SApple OSS Distributions }
1780*a1e26a70SApple OSS Distributions if (__improbable(old_flags & WQ_PROC_SUSPENDED)) {
1781*a1e26a70SApple OSS Distributions new_flags = old_flags | pend;
1782*a1e26a70SApple OSS Distributions } else {
1783*a1e26a70SApple OSS Distributions new_flags = old_flags | sched;
1784*a1e26a70SApple OSS Distributions }
1785*a1e26a70SApple OSS Distributions });
1786*a1e26a70SApple OSS Distributions
1787*a1e26a70SApple OSS Distributions return (old_flags & WQ_PROC_SUSPENDED) == 0;
1788*a1e26a70SApple OSS Distributions }
1789*a1e26a70SApple OSS Distributions
1790*a1e26a70SApple OSS Distributions #define WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART 0x1
1791*a1e26a70SApple OSS Distributions
1792*a1e26a70SApple OSS Distributions static bool
workq_schedule_delayed_thread_creation(struct workqueue * wq,int flags)1793*a1e26a70SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags)
1794*a1e26a70SApple OSS Distributions {
1795*a1e26a70SApple OSS Distributions assert(!preemption_enabled());
1796*a1e26a70SApple OSS Distributions
1797*a1e26a70SApple OSS Distributions if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED,
1798*a1e26a70SApple OSS Distributions WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
1799*a1e26a70SApple OSS Distributions WQ_IMMEDIATE_CALL_SCHEDULED)) {
1800*a1e26a70SApple OSS Distributions return false;
1801*a1e26a70SApple OSS Distributions }
1802*a1e26a70SApple OSS Distributions
1803*a1e26a70SApple OSS Distributions uint64_t now = mach_absolute_time();
1804*a1e26a70SApple OSS Distributions
1805*a1e26a70SApple OSS Distributions if (flags & WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART) {
1806*a1e26a70SApple OSS Distributions /* do not change the window */
1807*a1e26a70SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
1808*a1e26a70SApple OSS Distributions wq->wq_timer_interval *= 2;
1809*a1e26a70SApple OSS Distributions if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
1810*a1e26a70SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
1811*a1e26a70SApple OSS Distributions }
1812*a1e26a70SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
1813*a1e26a70SApple OSS Distributions wq->wq_timer_interval /= 2;
1814*a1e26a70SApple OSS Distributions if (wq->wq_timer_interval < wq_stalled_window.abstime) {
1815*a1e26a70SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
1816*a1e26a70SApple OSS Distributions }
1817*a1e26a70SApple OSS Distributions }
1818*a1e26a70SApple OSS Distributions
1819*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1820*a1e26a70SApple OSS Distributions _wq_flags(wq), wq->wq_timer_interval);
1821*a1e26a70SApple OSS Distributions
1822*a1e26a70SApple OSS Distributions thread_call_t call = wq->wq_delayed_call;
1823*a1e26a70SApple OSS Distributions uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED;
1824*a1e26a70SApple OSS Distributions uint64_t deadline = now + wq->wq_timer_interval;
1825*a1e26a70SApple OSS Distributions if (thread_call_enter1_delayed(call, (void *)arg, deadline)) {
1826*a1e26a70SApple OSS Distributions panic("delayed_call was already enqueued");
1827*a1e26a70SApple OSS Distributions }
1828*a1e26a70SApple OSS Distributions return true;
1829*a1e26a70SApple OSS Distributions }
1830*a1e26a70SApple OSS Distributions
1831*a1e26a70SApple OSS Distributions static void
workq_schedule_immediate_thread_creation(struct workqueue * wq)1832*a1e26a70SApple OSS Distributions workq_schedule_immediate_thread_creation(struct workqueue *wq)
1833*a1e26a70SApple OSS Distributions {
1834*a1e26a70SApple OSS Distributions assert(!preemption_enabled());
1835*a1e26a70SApple OSS Distributions
1836*a1e26a70SApple OSS Distributions if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED,
1837*a1e26a70SApple OSS Distributions WQ_IMMEDIATE_CALL_PENDED, 0)) {
1838*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1839*a1e26a70SApple OSS Distributions _wq_flags(wq), 0);
1840*a1e26a70SApple OSS Distributions
1841*a1e26a70SApple OSS Distributions uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED;
1842*a1e26a70SApple OSS Distributions if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) {
1843*a1e26a70SApple OSS Distributions panic("immediate_call was already enqueued");
1844*a1e26a70SApple OSS Distributions }
1845*a1e26a70SApple OSS Distributions }
1846*a1e26a70SApple OSS Distributions }
1847*a1e26a70SApple OSS Distributions
1848*a1e26a70SApple OSS Distributions void
workq_proc_suspended(struct proc * p)1849*a1e26a70SApple OSS Distributions workq_proc_suspended(struct proc *p)
1850*a1e26a70SApple OSS Distributions {
1851*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1852*a1e26a70SApple OSS Distributions
1853*a1e26a70SApple OSS Distributions if (wq) {
1854*a1e26a70SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
1855*a1e26a70SApple OSS Distributions }
1856*a1e26a70SApple OSS Distributions }
1857*a1e26a70SApple OSS Distributions
1858*a1e26a70SApple OSS Distributions void
workq_proc_resumed(struct proc * p)1859*a1e26a70SApple OSS Distributions workq_proc_resumed(struct proc *p)
1860*a1e26a70SApple OSS Distributions {
1861*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1862*a1e26a70SApple OSS Distributions uint32_t wq_flags;
1863*a1e26a70SApple OSS Distributions
1864*a1e26a70SApple OSS Distributions if (!wq) {
1865*a1e26a70SApple OSS Distributions return;
1866*a1e26a70SApple OSS Distributions }
1867*a1e26a70SApple OSS Distributions
1868*a1e26a70SApple OSS Distributions wq_flags = os_atomic_andnot_orig(&wq->wq_flags, WQ_PROC_SUSPENDED |
1869*a1e26a70SApple OSS Distributions WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED, relaxed);
1870*a1e26a70SApple OSS Distributions if ((wq_flags & WQ_EXITING) == 0) {
1871*a1e26a70SApple OSS Distributions disable_preemption();
1872*a1e26a70SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) {
1873*a1e26a70SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
1874*a1e26a70SApple OSS Distributions } else if (wq_flags & WQ_DELAYED_CALL_PENDED) {
1875*a1e26a70SApple OSS Distributions workq_schedule_delayed_thread_creation(wq,
1876*a1e26a70SApple OSS Distributions WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
1877*a1e26a70SApple OSS Distributions }
1878*a1e26a70SApple OSS Distributions enable_preemption();
1879*a1e26a70SApple OSS Distributions }
1880*a1e26a70SApple OSS Distributions }
1881*a1e26a70SApple OSS Distributions
1882*a1e26a70SApple OSS Distributions /**
1883*a1e26a70SApple OSS Distributions * returns whether lastblocked_tsp is within wq_stalled_window usecs of now
1884*a1e26a70SApple OSS Distributions */
1885*a1e26a70SApple OSS Distributions static bool
workq_thread_is_busy(uint64_t now,_Atomic uint64_t * lastblocked_tsp)1886*a1e26a70SApple OSS Distributions workq_thread_is_busy(uint64_t now, _Atomic uint64_t *lastblocked_tsp)
1887*a1e26a70SApple OSS Distributions {
1888*a1e26a70SApple OSS Distributions uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed);
1889*a1e26a70SApple OSS Distributions if (now <= lastblocked_ts) {
1890*a1e26a70SApple OSS Distributions /*
1891*a1e26a70SApple OSS Distributions * Because the update of the timestamp when a thread blocks
1892*a1e26a70SApple OSS Distributions * isn't serialized against us looking at it (i.e. we don't hold
1893*a1e26a70SApple OSS Distributions * the workq lock), it's possible to have a timestamp that matches
1894*a1e26a70SApple OSS Distributions * the current time or that even looks to be in the future relative
1895*a1e26a70SApple OSS Distributions * to when we grabbed the current time...
1896*a1e26a70SApple OSS Distributions *
1897*a1e26a70SApple OSS Distributions * Just treat this as a busy thread since it must have just blocked.
1898*a1e26a70SApple OSS Distributions */
1899*a1e26a70SApple OSS Distributions return true;
1900*a1e26a70SApple OSS Distributions }
1901*a1e26a70SApple OSS Distributions return (now - lastblocked_ts) < wq_stalled_window.abstime;
1902*a1e26a70SApple OSS Distributions }
1903*a1e26a70SApple OSS Distributions
1904*a1e26a70SApple OSS Distributions static void
workq_add_new_threads_call(void * _p,void * flags)1905*a1e26a70SApple OSS Distributions workq_add_new_threads_call(void *_p, void *flags)
1906*a1e26a70SApple OSS Distributions {
1907*a1e26a70SApple OSS Distributions proc_t p = _p;
1908*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1909*a1e26a70SApple OSS Distributions uint32_t my_flag = (uint32_t)(uintptr_t)flags;
1910*a1e26a70SApple OSS Distributions
1911*a1e26a70SApple OSS Distributions /*
1912*a1e26a70SApple OSS Distributions * workq_exit() will set the workqueue to NULL before
1913*a1e26a70SApple OSS Distributions * it cancels thread calls.
1914*a1e26a70SApple OSS Distributions */
1915*a1e26a70SApple OSS Distributions if (!wq) {
1916*a1e26a70SApple OSS Distributions return;
1917*a1e26a70SApple OSS Distributions }
1918*a1e26a70SApple OSS Distributions
1919*a1e26a70SApple OSS Distributions assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) ||
1920*a1e26a70SApple OSS Distributions (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
1921*a1e26a70SApple OSS Distributions
1922*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq),
1923*a1e26a70SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1924*a1e26a70SApple OSS Distributions
1925*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
1926*a1e26a70SApple OSS Distributions
1927*a1e26a70SApple OSS Distributions wq->wq_thread_call_last_run = mach_absolute_time();
1928*a1e26a70SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, my_flag, release);
1929*a1e26a70SApple OSS Distributions
1930*a1e26a70SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
1931*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
1932*a1e26a70SApple OSS Distributions
1933*a1e26a70SApple OSS Distributions workq_unlock(wq);
1934*a1e26a70SApple OSS Distributions
1935*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0,
1936*a1e26a70SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1937*a1e26a70SApple OSS Distributions }
1938*a1e26a70SApple OSS Distributions
1939*a1e26a70SApple OSS Distributions #pragma mark thread state tracking
1940*a1e26a70SApple OSS Distributions
1941*a1e26a70SApple OSS Distributions static void
workq_sched_callback(int type,thread_t thread)1942*a1e26a70SApple OSS Distributions workq_sched_callback(int type, thread_t thread)
1943*a1e26a70SApple OSS Distributions {
1944*a1e26a70SApple OSS Distributions thread_ro_t tro = get_thread_ro(thread);
1945*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
1946*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(tro->tro_proc);
1947*a1e26a70SApple OSS Distributions thread_qos_t req_qos, qos = uth->uu_workq_pri.qos_bucket;
1948*a1e26a70SApple OSS Distributions wq_thactive_t old_thactive;
1949*a1e26a70SApple OSS Distributions bool start_timer = false;
1950*a1e26a70SApple OSS Distributions
1951*a1e26a70SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
1952*a1e26a70SApple OSS Distributions return;
1953*a1e26a70SApple OSS Distributions }
1954*a1e26a70SApple OSS Distributions
1955*a1e26a70SApple OSS Distributions switch (type) {
1956*a1e26a70SApple OSS Distributions case SCHED_CALL_BLOCK:
1957*a1e26a70SApple OSS Distributions old_thactive = _wq_thactive_dec(wq, qos);
1958*a1e26a70SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1959*a1e26a70SApple OSS Distributions
1960*a1e26a70SApple OSS Distributions /*
1961*a1e26a70SApple OSS Distributions * Remember the timestamp of the last thread that blocked in this
1962*a1e26a70SApple OSS Distributions * bucket, it used used by admission checks to ignore one thread
1963*a1e26a70SApple OSS Distributions * being inactive if this timestamp is recent enough.
1964*a1e26a70SApple OSS Distributions *
1965*a1e26a70SApple OSS Distributions * If we collide with another thread trying to update the
1966*a1e26a70SApple OSS Distributions * last_blocked (really unlikely since another thread would have to
1967*a1e26a70SApple OSS Distributions * get scheduled and then block after we start down this path), it's
1968*a1e26a70SApple OSS Distributions * not a problem. Either timestamp is adequate, so no need to retry
1969*a1e26a70SApple OSS Distributions */
1970*a1e26a70SApple OSS Distributions os_atomic_store_wide(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
1971*a1e26a70SApple OSS Distributions thread_last_run_time(thread), relaxed);
1972*a1e26a70SApple OSS Distributions
1973*a1e26a70SApple OSS Distributions if (req_qos == THREAD_QOS_UNSPECIFIED) {
1974*a1e26a70SApple OSS Distributions /*
1975*a1e26a70SApple OSS Distributions * No pending request at the moment we could unblock, move on.
1976*a1e26a70SApple OSS Distributions */
1977*a1e26a70SApple OSS Distributions } else if (qos < req_qos) {
1978*a1e26a70SApple OSS Distributions /*
1979*a1e26a70SApple OSS Distributions * The blocking thread is at a lower QoS than the highest currently
1980*a1e26a70SApple OSS Distributions * pending constrained request, nothing has to be redriven
1981*a1e26a70SApple OSS Distributions */
1982*a1e26a70SApple OSS Distributions } else {
1983*a1e26a70SApple OSS Distributions uint32_t max_busycount, old_req_count;
1984*a1e26a70SApple OSS Distributions old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
1985*a1e26a70SApple OSS Distributions req_qos, NULL, &max_busycount);
1986*a1e26a70SApple OSS Distributions /*
1987*a1e26a70SApple OSS Distributions * If it is possible that may_start_constrained_thread had refused
1988*a1e26a70SApple OSS Distributions * admission due to being over the max concurrency, we may need to
1989*a1e26a70SApple OSS Distributions * spin up a new thread.
1990*a1e26a70SApple OSS Distributions *
1991*a1e26a70SApple OSS Distributions * We take into account the maximum number of busy threads
1992*a1e26a70SApple OSS Distributions * that can affect may_start_constrained_thread as looking at the
1993*a1e26a70SApple OSS Distributions * actual number may_start_constrained_thread will see is racy.
1994*a1e26a70SApple OSS Distributions *
1995*a1e26a70SApple OSS Distributions * IOW at NCPU = 4, for IN (req_qos = 1), if the old req count is
1996*a1e26a70SApple OSS Distributions * between NCPU (4) and NCPU - 2 (2) we need to redrive.
1997*a1e26a70SApple OSS Distributions */
1998*a1e26a70SApple OSS Distributions uint32_t conc = wq_max_parallelism[_wq_bucket(qos)];
1999*a1e26a70SApple OSS Distributions if (old_req_count <= conc && conc <= old_req_count + max_busycount) {
2000*a1e26a70SApple OSS Distributions start_timer = workq_schedule_delayed_thread_creation(wq, 0);
2001*a1e26a70SApple OSS Distributions }
2002*a1e26a70SApple OSS Distributions }
2003*a1e26a70SApple OSS Distributions if (__improbable(kdebug_enable)) {
2004*a1e26a70SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
2005*a1e26a70SApple OSS Distributions old_thactive, qos, NULL, NULL);
2006*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
2007*a1e26a70SApple OSS Distributions old - 1, qos | (req_qos << 8),
2008*a1e26a70SApple OSS Distributions wq->wq_reqcount << 1 | start_timer);
2009*a1e26a70SApple OSS Distributions }
2010*a1e26a70SApple OSS Distributions break;
2011*a1e26a70SApple OSS Distributions
2012*a1e26a70SApple OSS Distributions case SCHED_CALL_UNBLOCK:
2013*a1e26a70SApple OSS Distributions /*
2014*a1e26a70SApple OSS Distributions * we cannot take the workqueue_lock here...
2015*a1e26a70SApple OSS Distributions * an UNBLOCK can occur from a timer event which
2016*a1e26a70SApple OSS Distributions * is run from an interrupt context... if the workqueue_lock
2017*a1e26a70SApple OSS Distributions * is already held by this processor, we'll deadlock...
2018*a1e26a70SApple OSS Distributions * the thread lock for the thread being UNBLOCKED
2019*a1e26a70SApple OSS Distributions * is also held
2020*a1e26a70SApple OSS Distributions */
2021*a1e26a70SApple OSS Distributions old_thactive = _wq_thactive_inc(wq, qos);
2022*a1e26a70SApple OSS Distributions if (__improbable(kdebug_enable)) {
2023*a1e26a70SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
2024*a1e26a70SApple OSS Distributions old_thactive, qos, NULL, NULL);
2025*a1e26a70SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
2026*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
2027*a1e26a70SApple OSS Distributions old + 1, qos | (req_qos << 8),
2028*a1e26a70SApple OSS Distributions wq->wq_threads_scheduled);
2029*a1e26a70SApple OSS Distributions }
2030*a1e26a70SApple OSS Distributions break;
2031*a1e26a70SApple OSS Distributions }
2032*a1e26a70SApple OSS Distributions }
2033*a1e26a70SApple OSS Distributions
2034*a1e26a70SApple OSS Distributions #pragma mark workq lifecycle
2035*a1e26a70SApple OSS Distributions
2036*a1e26a70SApple OSS Distributions void
workq_reference(struct workqueue * wq)2037*a1e26a70SApple OSS Distributions workq_reference(struct workqueue *wq)
2038*a1e26a70SApple OSS Distributions {
2039*a1e26a70SApple OSS Distributions os_ref_retain(&wq->wq_refcnt);
2040*a1e26a70SApple OSS Distributions }
2041*a1e26a70SApple OSS Distributions
2042*a1e26a70SApple OSS Distributions static void
workq_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)2043*a1e26a70SApple OSS Distributions workq_deallocate_queue_invoke(mpsc_queue_chain_t e,
2044*a1e26a70SApple OSS Distributions __assert_only mpsc_daemon_queue_t dq)
2045*a1e26a70SApple OSS Distributions {
2046*a1e26a70SApple OSS Distributions struct workqueue *wq;
2047*a1e26a70SApple OSS Distributions struct turnstile *ts;
2048*a1e26a70SApple OSS Distributions
2049*a1e26a70SApple OSS Distributions wq = mpsc_queue_element(e, struct workqueue, wq_destroy_link);
2050*a1e26a70SApple OSS Distributions assert(dq == &workq_deallocate_queue);
2051*a1e26a70SApple OSS Distributions
2052*a1e26a70SApple OSS Distributions turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts, TURNSTILE_WORKQS);
2053*a1e26a70SApple OSS Distributions assert(ts);
2054*a1e26a70SApple OSS Distributions turnstile_cleanup();
2055*a1e26a70SApple OSS Distributions turnstile_deallocate(ts);
2056*a1e26a70SApple OSS Distributions
2057*a1e26a70SApple OSS Distributions lck_ticket_destroy(&wq->wq_lock, &workq_lck_grp);
2058*a1e26a70SApple OSS Distributions zfree(workq_zone_workqueue, wq);
2059*a1e26a70SApple OSS Distributions }
2060*a1e26a70SApple OSS Distributions
2061*a1e26a70SApple OSS Distributions static void
workq_deallocate(struct workqueue * wq)2062*a1e26a70SApple OSS Distributions workq_deallocate(struct workqueue *wq)
2063*a1e26a70SApple OSS Distributions {
2064*a1e26a70SApple OSS Distributions if (os_ref_release_relaxed(&wq->wq_refcnt) == 0) {
2065*a1e26a70SApple OSS Distributions workq_deallocate_queue_invoke(&wq->wq_destroy_link,
2066*a1e26a70SApple OSS Distributions &workq_deallocate_queue);
2067*a1e26a70SApple OSS Distributions }
2068*a1e26a70SApple OSS Distributions }
2069*a1e26a70SApple OSS Distributions
2070*a1e26a70SApple OSS Distributions void
workq_deallocate_safe(struct workqueue * wq)2071*a1e26a70SApple OSS Distributions workq_deallocate_safe(struct workqueue *wq)
2072*a1e26a70SApple OSS Distributions {
2073*a1e26a70SApple OSS Distributions if (__improbable(os_ref_release_relaxed(&wq->wq_refcnt) == 0)) {
2074*a1e26a70SApple OSS Distributions mpsc_daemon_enqueue(&workq_deallocate_queue, &wq->wq_destroy_link,
2075*a1e26a70SApple OSS Distributions MPSC_QUEUE_DISABLE_PREEMPTION);
2076*a1e26a70SApple OSS Distributions }
2077*a1e26a70SApple OSS Distributions }
2078*a1e26a70SApple OSS Distributions
2079*a1e26a70SApple OSS Distributions /**
2080*a1e26a70SApple OSS Distributions * Setup per-process state for the workqueue.
2081*a1e26a70SApple OSS Distributions */
2082*a1e26a70SApple OSS Distributions int
workq_open(struct proc * p,__unused struct workq_open_args * uap,__unused int32_t * retval)2083*a1e26a70SApple OSS Distributions workq_open(struct proc *p, __unused struct workq_open_args *uap,
2084*a1e26a70SApple OSS Distributions __unused int32_t *retval)
2085*a1e26a70SApple OSS Distributions {
2086*a1e26a70SApple OSS Distributions struct workqueue *wq;
2087*a1e26a70SApple OSS Distributions int error = 0;
2088*a1e26a70SApple OSS Distributions
2089*a1e26a70SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
2090*a1e26a70SApple OSS Distributions return EINVAL;
2091*a1e26a70SApple OSS Distributions }
2092*a1e26a70SApple OSS Distributions
2093*a1e26a70SApple OSS Distributions if (wq_init_constrained_limit) {
2094*a1e26a70SApple OSS Distributions uint32_t limit, num_cpus = ml_wait_max_cpus();
2095*a1e26a70SApple OSS Distributions
2096*a1e26a70SApple OSS Distributions /*
2097*a1e26a70SApple OSS Distributions * set up the limit for the constrained pool
2098*a1e26a70SApple OSS Distributions * this is a virtual pool in that we don't
2099*a1e26a70SApple OSS Distributions * maintain it on a separate idle and run list
2100*a1e26a70SApple OSS Distributions */
2101*a1e26a70SApple OSS Distributions limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR;
2102*a1e26a70SApple OSS Distributions
2103*a1e26a70SApple OSS Distributions if (limit > wq_max_constrained_threads) {
2104*a1e26a70SApple OSS Distributions wq_max_constrained_threads = limit;
2105*a1e26a70SApple OSS Distributions }
2106*a1e26a70SApple OSS Distributions
2107*a1e26a70SApple OSS Distributions if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
2108*a1e26a70SApple OSS Distributions wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
2109*a1e26a70SApple OSS Distributions }
2110*a1e26a70SApple OSS Distributions if (wq_max_threads > CONFIG_THREAD_MAX - 20) {
2111*a1e26a70SApple OSS Distributions wq_max_threads = CONFIG_THREAD_MAX - 20;
2112*a1e26a70SApple OSS Distributions }
2113*a1e26a70SApple OSS Distributions
2114*a1e26a70SApple OSS Distributions wq_death_max_load = (uint16_t)fls(num_cpus) + 1;
2115*a1e26a70SApple OSS Distributions
2116*a1e26a70SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) {
2117*a1e26a70SApple OSS Distributions wq_max_parallelism[_wq_bucket(qos)] =
2118*a1e26a70SApple OSS Distributions qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
2119*a1e26a70SApple OSS Distributions }
2120*a1e26a70SApple OSS Distributions
2121*a1e26a70SApple OSS Distributions wq_max_cooperative_threads = num_cpus;
2122*a1e26a70SApple OSS Distributions
2123*a1e26a70SApple OSS Distributions wq_init_constrained_limit = 0;
2124*a1e26a70SApple OSS Distributions }
2125*a1e26a70SApple OSS Distributions
2126*a1e26a70SApple OSS Distributions if (proc_get_wqptr(p) == NULL) {
2127*a1e26a70SApple OSS Distributions if (proc_init_wqptr_or_wait(p) == FALSE) {
2128*a1e26a70SApple OSS Distributions assert(proc_get_wqptr(p) != NULL);
2129*a1e26a70SApple OSS Distributions goto out;
2130*a1e26a70SApple OSS Distributions }
2131*a1e26a70SApple OSS Distributions
2132*a1e26a70SApple OSS Distributions wq = zalloc_flags(workq_zone_workqueue, Z_WAITOK | Z_ZERO);
2133*a1e26a70SApple OSS Distributions
2134*a1e26a70SApple OSS Distributions os_ref_init_count(&wq->wq_refcnt, &workq_refgrp, 1);
2135*a1e26a70SApple OSS Distributions
2136*a1e26a70SApple OSS Distributions // Start the event manager at the priority hinted at by the policy engine
2137*a1e26a70SApple OSS Distributions thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
2138*a1e26a70SApple OSS Distributions pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
2139*a1e26a70SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pp;
2140*a1e26a70SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
2141*a1e26a70SApple OSS Distributions wq->wq_proc = p;
2142*a1e26a70SApple OSS Distributions turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
2143*a1e26a70SApple OSS Distributions TURNSTILE_WORKQS);
2144*a1e26a70SApple OSS Distributions
2145*a1e26a70SApple OSS Distributions TAILQ_INIT(&wq->wq_thrunlist);
2146*a1e26a70SApple OSS Distributions TAILQ_INIT(&wq->wq_thnewlist);
2147*a1e26a70SApple OSS Distributions TAILQ_INIT(&wq->wq_thidlelist);
2148*a1e26a70SApple OSS Distributions priority_queue_init(&wq->wq_overcommit_queue);
2149*a1e26a70SApple OSS Distributions priority_queue_init(&wq->wq_constrained_queue);
2150*a1e26a70SApple OSS Distributions priority_queue_init(&wq->wq_special_queue);
2151*a1e26a70SApple OSS Distributions for (int bucket = 0; bucket < WORKQ_NUM_QOS_BUCKETS; bucket++) {
2152*a1e26a70SApple OSS Distributions STAILQ_INIT(&wq->wq_cooperative_queue[bucket]);
2153*a1e26a70SApple OSS Distributions }
2154*a1e26a70SApple OSS Distributions
2155*a1e26a70SApple OSS Distributions /* We are only using the delayed thread call for the constrained pool
2156*a1e26a70SApple OSS Distributions * which can't have work at >= UI QoS and so we can be fine with a
2157*a1e26a70SApple OSS Distributions * UI QoS thread call.
2158*a1e26a70SApple OSS Distributions */
2159*a1e26a70SApple OSS Distributions wq->wq_delayed_call = thread_call_allocate_with_qos(
2160*a1e26a70SApple OSS Distributions workq_add_new_threads_call, p, THREAD_QOS_USER_INTERACTIVE,
2161*a1e26a70SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2162*a1e26a70SApple OSS Distributions wq->wq_immediate_call = thread_call_allocate_with_options(
2163*a1e26a70SApple OSS Distributions workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
2164*a1e26a70SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2165*a1e26a70SApple OSS Distributions wq->wq_death_call = thread_call_allocate_with_options(
2166*a1e26a70SApple OSS Distributions workq_kill_old_threads_call, wq,
2167*a1e26a70SApple OSS Distributions THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
2168*a1e26a70SApple OSS Distributions
2169*a1e26a70SApple OSS Distributions lck_ticket_init(&wq->wq_lock, &workq_lck_grp);
2170*a1e26a70SApple OSS Distributions
2171*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
2172*a1e26a70SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2173*a1e26a70SApple OSS Distributions proc_set_wqptr(p, wq);
2174*a1e26a70SApple OSS Distributions }
2175*a1e26a70SApple OSS Distributions out:
2176*a1e26a70SApple OSS Distributions
2177*a1e26a70SApple OSS Distributions return error;
2178*a1e26a70SApple OSS Distributions }
2179*a1e26a70SApple OSS Distributions
2180*a1e26a70SApple OSS Distributions /*
2181*a1e26a70SApple OSS Distributions * Routine: workq_mark_exiting
2182*a1e26a70SApple OSS Distributions *
2183*a1e26a70SApple OSS Distributions * Function: Mark the work queue such that new threads will not be added to the
2184*a1e26a70SApple OSS Distributions * work queue after we return.
2185*a1e26a70SApple OSS Distributions *
2186*a1e26a70SApple OSS Distributions * Conditions: Called against the current process.
2187*a1e26a70SApple OSS Distributions */
2188*a1e26a70SApple OSS Distributions void
workq_mark_exiting(struct proc * p)2189*a1e26a70SApple OSS Distributions workq_mark_exiting(struct proc *p)
2190*a1e26a70SApple OSS Distributions {
2191*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2192*a1e26a70SApple OSS Distributions uint32_t wq_flags;
2193*a1e26a70SApple OSS Distributions workq_threadreq_t mgr_req;
2194*a1e26a70SApple OSS Distributions
2195*a1e26a70SApple OSS Distributions if (!wq) {
2196*a1e26a70SApple OSS Distributions return;
2197*a1e26a70SApple OSS Distributions }
2198*a1e26a70SApple OSS Distributions
2199*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0);
2200*a1e26a70SApple OSS Distributions
2201*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
2202*a1e26a70SApple OSS Distributions
2203*a1e26a70SApple OSS Distributions wq_flags = os_atomic_or_orig(&wq->wq_flags, WQ_EXITING, relaxed);
2204*a1e26a70SApple OSS Distributions if (__improbable(wq_flags & WQ_EXITING)) {
2205*a1e26a70SApple OSS Distributions panic("workq_mark_exiting called twice");
2206*a1e26a70SApple OSS Distributions }
2207*a1e26a70SApple OSS Distributions
2208*a1e26a70SApple OSS Distributions /*
2209*a1e26a70SApple OSS Distributions * Opportunistically try to cancel thread calls that are likely in flight.
2210*a1e26a70SApple OSS Distributions * workq_exit() will do the proper cleanup.
2211*a1e26a70SApple OSS Distributions */
2212*a1e26a70SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_SCHEDULED) {
2213*a1e26a70SApple OSS Distributions thread_call_cancel(wq->wq_immediate_call);
2214*a1e26a70SApple OSS Distributions }
2215*a1e26a70SApple OSS Distributions if (wq_flags & WQ_DELAYED_CALL_SCHEDULED) {
2216*a1e26a70SApple OSS Distributions thread_call_cancel(wq->wq_delayed_call);
2217*a1e26a70SApple OSS Distributions }
2218*a1e26a70SApple OSS Distributions if (wq_flags & WQ_DEATH_CALL_SCHEDULED) {
2219*a1e26a70SApple OSS Distributions thread_call_cancel(wq->wq_death_call);
2220*a1e26a70SApple OSS Distributions }
2221*a1e26a70SApple OSS Distributions
2222*a1e26a70SApple OSS Distributions mgr_req = wq->wq_event_manager_threadreq;
2223*a1e26a70SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
2224*a1e26a70SApple OSS Distributions wq->wq_reqcount = 0; /* workq_schedule_creator must not look at queues */
2225*a1e26a70SApple OSS Distributions wq->wq_creator = NULL;
2226*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
2227*a1e26a70SApple OSS Distributions
2228*a1e26a70SApple OSS Distributions workq_unlock(wq);
2229*a1e26a70SApple OSS Distributions
2230*a1e26a70SApple OSS Distributions if (mgr_req) {
2231*a1e26a70SApple OSS Distributions kqueue_threadreq_cancel(p, mgr_req);
2232*a1e26a70SApple OSS Distributions }
2233*a1e26a70SApple OSS Distributions /*
2234*a1e26a70SApple OSS Distributions * No one touches the priority queues once WQ_EXITING is set.
2235*a1e26a70SApple OSS Distributions * It is hence safe to do the tear down without holding any lock.
2236*a1e26a70SApple OSS Distributions */
2237*a1e26a70SApple OSS Distributions priority_queue_destroy(&wq->wq_overcommit_queue,
2238*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2239*a1e26a70SApple OSS Distributions workq_threadreq_destroy(p, e);
2240*a1e26a70SApple OSS Distributions });
2241*a1e26a70SApple OSS Distributions priority_queue_destroy(&wq->wq_constrained_queue,
2242*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2243*a1e26a70SApple OSS Distributions workq_threadreq_destroy(p, e);
2244*a1e26a70SApple OSS Distributions });
2245*a1e26a70SApple OSS Distributions priority_queue_destroy(&wq->wq_special_queue,
2246*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2247*a1e26a70SApple OSS Distributions workq_threadreq_destroy(p, e);
2248*a1e26a70SApple OSS Distributions });
2249*a1e26a70SApple OSS Distributions
2250*a1e26a70SApple OSS Distributions WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0);
2251*a1e26a70SApple OSS Distributions }
2252*a1e26a70SApple OSS Distributions
2253*a1e26a70SApple OSS Distributions /*
2254*a1e26a70SApple OSS Distributions * Routine: workq_exit
2255*a1e26a70SApple OSS Distributions *
2256*a1e26a70SApple OSS Distributions * Function: clean up the work queue structure(s) now that there are no threads
2257*a1e26a70SApple OSS Distributions * left running inside the work queue (except possibly current_thread).
2258*a1e26a70SApple OSS Distributions *
2259*a1e26a70SApple OSS Distributions * Conditions: Called by the last thread in the process.
2260*a1e26a70SApple OSS Distributions * Called against current process.
2261*a1e26a70SApple OSS Distributions */
2262*a1e26a70SApple OSS Distributions void
workq_exit(struct proc * p)2263*a1e26a70SApple OSS Distributions workq_exit(struct proc *p)
2264*a1e26a70SApple OSS Distributions {
2265*a1e26a70SApple OSS Distributions struct workqueue *wq;
2266*a1e26a70SApple OSS Distributions struct uthread *uth, *tmp;
2267*a1e26a70SApple OSS Distributions
2268*a1e26a70SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, NULL, relaxed);
2269*a1e26a70SApple OSS Distributions if (wq != NULL) {
2270*a1e26a70SApple OSS Distributions thread_t th = current_thread();
2271*a1e26a70SApple OSS Distributions
2272*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0);
2273*a1e26a70SApple OSS Distributions
2274*a1e26a70SApple OSS Distributions if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
2275*a1e26a70SApple OSS Distributions /*
2276*a1e26a70SApple OSS Distributions * <rdar://problem/40111515> Make sure we will no longer call the
2277*a1e26a70SApple OSS Distributions * sched call, if we ever block this thread, which the cancel_wait
2278*a1e26a70SApple OSS Distributions * below can do.
2279*a1e26a70SApple OSS Distributions */
2280*a1e26a70SApple OSS Distributions thread_sched_call(th, NULL);
2281*a1e26a70SApple OSS Distributions }
2282*a1e26a70SApple OSS Distributions
2283*a1e26a70SApple OSS Distributions /*
2284*a1e26a70SApple OSS Distributions * Thread calls are always scheduled by the proc itself or under the
2285*a1e26a70SApple OSS Distributions * workqueue spinlock if WQ_EXITING is not yet set.
2286*a1e26a70SApple OSS Distributions *
2287*a1e26a70SApple OSS Distributions * Either way, when this runs, the proc has no threads left beside
2288*a1e26a70SApple OSS Distributions * the one running this very code, so we know no thread call can be
2289*a1e26a70SApple OSS Distributions * dispatched anymore.
2290*a1e26a70SApple OSS Distributions */
2291*a1e26a70SApple OSS Distributions thread_call_cancel_wait(wq->wq_delayed_call);
2292*a1e26a70SApple OSS Distributions thread_call_cancel_wait(wq->wq_immediate_call);
2293*a1e26a70SApple OSS Distributions thread_call_cancel_wait(wq->wq_death_call);
2294*a1e26a70SApple OSS Distributions thread_call_free(wq->wq_delayed_call);
2295*a1e26a70SApple OSS Distributions thread_call_free(wq->wq_immediate_call);
2296*a1e26a70SApple OSS Distributions thread_call_free(wq->wq_death_call);
2297*a1e26a70SApple OSS Distributions
2298*a1e26a70SApple OSS Distributions /*
2299*a1e26a70SApple OSS Distributions * Clean up workqueue data structures for threads that exited and
2300*a1e26a70SApple OSS Distributions * didn't get a chance to clean up after themselves.
2301*a1e26a70SApple OSS Distributions *
2302*a1e26a70SApple OSS Distributions * idle/new threads should have been interrupted and died on their own
2303*a1e26a70SApple OSS Distributions */
2304*a1e26a70SApple OSS Distributions TAILQ_FOREACH_SAFE(uth, &wq->wq_thrunlist, uu_workq_entry, tmp) {
2305*a1e26a70SApple OSS Distributions thread_t mth = get_machthread(uth);
2306*a1e26a70SApple OSS Distributions thread_sched_call(mth, NULL);
2307*a1e26a70SApple OSS Distributions thread_deallocate(mth);
2308*a1e26a70SApple OSS Distributions }
2309*a1e26a70SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thnewlist));
2310*a1e26a70SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thidlelist));
2311*a1e26a70SApple OSS Distributions
2312*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq,
2313*a1e26a70SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2314*a1e26a70SApple OSS Distributions
2315*a1e26a70SApple OSS Distributions workq_deallocate(wq);
2316*a1e26a70SApple OSS Distributions
2317*a1e26a70SApple OSS Distributions WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0);
2318*a1e26a70SApple OSS Distributions }
2319*a1e26a70SApple OSS Distributions }
2320*a1e26a70SApple OSS Distributions
2321*a1e26a70SApple OSS Distributions
2322*a1e26a70SApple OSS Distributions #pragma mark bsd thread control
2323*a1e26a70SApple OSS Distributions
2324*a1e26a70SApple OSS Distributions bool
bsdthread_part_of_cooperative_workqueue(struct uthread * uth)2325*a1e26a70SApple OSS Distributions bsdthread_part_of_cooperative_workqueue(struct uthread *uth)
2326*a1e26a70SApple OSS Distributions {
2327*a1e26a70SApple OSS Distributions return (workq_thread_is_cooperative(uth) || workq_thread_is_nonovercommit(uth)) &&
2328*a1e26a70SApple OSS Distributions (uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER) &&
2329*a1e26a70SApple OSS Distributions (!workq_thread_is_permanently_bound(uth));
2330*a1e26a70SApple OSS Distributions }
2331*a1e26a70SApple OSS Distributions
2332*a1e26a70SApple OSS Distributions static bool
_pthread_priority_to_policy(pthread_priority_t priority,thread_qos_policy_data_t * data)2333*a1e26a70SApple OSS Distributions _pthread_priority_to_policy(pthread_priority_t priority,
2334*a1e26a70SApple OSS Distributions thread_qos_policy_data_t *data)
2335*a1e26a70SApple OSS Distributions {
2336*a1e26a70SApple OSS Distributions data->qos_tier = _pthread_priority_thread_qos(priority);
2337*a1e26a70SApple OSS Distributions data->tier_importance = _pthread_priority_relpri(priority);
2338*a1e26a70SApple OSS Distributions if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 ||
2339*a1e26a70SApple OSS Distributions data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
2340*a1e26a70SApple OSS Distributions return false;
2341*a1e26a70SApple OSS Distributions }
2342*a1e26a70SApple OSS Distributions return true;
2343*a1e26a70SApple OSS Distributions }
2344*a1e26a70SApple OSS Distributions
2345*a1e26a70SApple OSS Distributions static int
bsdthread_set_self(proc_t p,thread_t th,pthread_priority_t priority,mach_port_name_t voucher,enum workq_set_self_flags flags)2346*a1e26a70SApple OSS Distributions bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority,
2347*a1e26a70SApple OSS Distributions mach_port_name_t voucher, enum workq_set_self_flags flags)
2348*a1e26a70SApple OSS Distributions {
2349*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
2350*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2351*a1e26a70SApple OSS Distributions
2352*a1e26a70SApple OSS Distributions kern_return_t kr;
2353*a1e26a70SApple OSS Distributions int unbind_rv = 0, qos_rv = 0, voucher_rv = 0, fixedpri_rv = 0;
2354*a1e26a70SApple OSS Distributions bool is_wq_thread = (thread_get_tag(th) & THREAD_TAG_WORKQUEUE);
2355*a1e26a70SApple OSS Distributions
2356*a1e26a70SApple OSS Distributions assert(th == current_thread());
2357*a1e26a70SApple OSS Distributions if (flags & WORKQ_SET_SELF_WQ_KEVENT_UNBIND) {
2358*a1e26a70SApple OSS Distributions if (!is_wq_thread) {
2359*a1e26a70SApple OSS Distributions unbind_rv = EINVAL;
2360*a1e26a70SApple OSS Distributions goto qos;
2361*a1e26a70SApple OSS Distributions }
2362*a1e26a70SApple OSS Distributions
2363*a1e26a70SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
2364*a1e26a70SApple OSS Distributions unbind_rv = EINVAL;
2365*a1e26a70SApple OSS Distributions goto qos;
2366*a1e26a70SApple OSS Distributions }
2367*a1e26a70SApple OSS Distributions
2368*a1e26a70SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
2369*a1e26a70SApple OSS Distributions if (kqr == NULL) {
2370*a1e26a70SApple OSS Distributions unbind_rv = EALREADY;
2371*a1e26a70SApple OSS Distributions goto qos;
2372*a1e26a70SApple OSS Distributions }
2373*a1e26a70SApple OSS Distributions
2374*a1e26a70SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2375*a1e26a70SApple OSS Distributions unbind_rv = EINVAL;
2376*a1e26a70SApple OSS Distributions goto qos;
2377*a1e26a70SApple OSS Distributions }
2378*a1e26a70SApple OSS Distributions
2379*a1e26a70SApple OSS Distributions kqueue_threadreq_unbind(p, kqr);
2380*a1e26a70SApple OSS Distributions }
2381*a1e26a70SApple OSS Distributions
2382*a1e26a70SApple OSS Distributions qos:
2383*a1e26a70SApple OSS Distributions if (flags & (WORKQ_SET_SELF_QOS_FLAG | WORKQ_SET_SELF_QOS_OVERRIDE_FLAG)) {
2384*a1e26a70SApple OSS Distributions assert(flags & WORKQ_SET_SELF_QOS_FLAG);
2385*a1e26a70SApple OSS Distributions
2386*a1e26a70SApple OSS Distributions thread_qos_policy_data_t new_policy;
2387*a1e26a70SApple OSS Distributions thread_qos_t qos_override = THREAD_QOS_UNSPECIFIED;
2388*a1e26a70SApple OSS Distributions
2389*a1e26a70SApple OSS Distributions if (!_pthread_priority_to_policy(priority, &new_policy)) {
2390*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2391*a1e26a70SApple OSS Distributions goto voucher;
2392*a1e26a70SApple OSS Distributions }
2393*a1e26a70SApple OSS Distributions
2394*a1e26a70SApple OSS Distributions if (flags & WORKQ_SET_SELF_QOS_OVERRIDE_FLAG) {
2395*a1e26a70SApple OSS Distributions /*
2396*a1e26a70SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is set, we definitely
2397*a1e26a70SApple OSS Distributions * should have an override QoS in the pthread_priority_t and we should
2398*a1e26a70SApple OSS Distributions * only come into this path for cooperative thread requests
2399*a1e26a70SApple OSS Distributions */
2400*a1e26a70SApple OSS Distributions if (!_pthread_priority_has_override_qos(priority) ||
2401*a1e26a70SApple OSS Distributions !_pthread_priority_is_cooperative(priority)) {
2402*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2403*a1e26a70SApple OSS Distributions goto voucher;
2404*a1e26a70SApple OSS Distributions }
2405*a1e26a70SApple OSS Distributions qos_override = _pthread_priority_thread_override_qos(priority);
2406*a1e26a70SApple OSS Distributions } else {
2407*a1e26a70SApple OSS Distributions /*
2408*a1e26a70SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is not set, we definitely
2409*a1e26a70SApple OSS Distributions * should not have an override QoS in the pthread_priority_t
2410*a1e26a70SApple OSS Distributions */
2411*a1e26a70SApple OSS Distributions if (_pthread_priority_has_override_qos(priority)) {
2412*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2413*a1e26a70SApple OSS Distributions goto voucher;
2414*a1e26a70SApple OSS Distributions }
2415*a1e26a70SApple OSS Distributions }
2416*a1e26a70SApple OSS Distributions
2417*a1e26a70SApple OSS Distributions if (!is_wq_thread) {
2418*a1e26a70SApple OSS Distributions /*
2419*a1e26a70SApple OSS Distributions * Threads opted out of QoS can't change QoS
2420*a1e26a70SApple OSS Distributions */
2421*a1e26a70SApple OSS Distributions if (!thread_has_qos_policy(th)) {
2422*a1e26a70SApple OSS Distributions qos_rv = EPERM;
2423*a1e26a70SApple OSS Distributions goto voucher;
2424*a1e26a70SApple OSS Distributions }
2425*a1e26a70SApple OSS Distributions } else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER ||
2426*a1e26a70SApple OSS Distributions uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_ABOVEUI) {
2427*a1e26a70SApple OSS Distributions /*
2428*a1e26a70SApple OSS Distributions * Workqueue manager threads or threads above UI can't change QoS
2429*a1e26a70SApple OSS Distributions */
2430*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2431*a1e26a70SApple OSS Distributions goto voucher;
2432*a1e26a70SApple OSS Distributions } else {
2433*a1e26a70SApple OSS Distributions /*
2434*a1e26a70SApple OSS Distributions * For workqueue threads, possibly adjust buckets and redrive thread
2435*a1e26a70SApple OSS Distributions * requests.
2436*a1e26a70SApple OSS Distributions *
2437*a1e26a70SApple OSS Distributions * Transitions allowed:
2438*a1e26a70SApple OSS Distributions *
2439*a1e26a70SApple OSS Distributions * overcommit --> non-overcommit
2440*a1e26a70SApple OSS Distributions * overcommit --> overcommit
2441*a1e26a70SApple OSS Distributions * non-overcommit --> non-overcommit
2442*a1e26a70SApple OSS Distributions * non-overcommit --> overcommit (to be deprecated later)
2443*a1e26a70SApple OSS Distributions * cooperative --> cooperative
2444*a1e26a70SApple OSS Distributions *
2445*a1e26a70SApple OSS Distributions * All other transitions aren't allowed so reject them.
2446*a1e26a70SApple OSS Distributions */
2447*a1e26a70SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_cooperative(priority)) {
2448*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2449*a1e26a70SApple OSS Distributions goto voucher;
2450*a1e26a70SApple OSS Distributions } else if (workq_thread_is_cooperative(uth) && !_pthread_priority_is_cooperative(priority)) {
2451*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2452*a1e26a70SApple OSS Distributions goto voucher;
2453*a1e26a70SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_cooperative(priority)) {
2454*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2455*a1e26a70SApple OSS Distributions goto voucher;
2456*a1e26a70SApple OSS Distributions }
2457*a1e26a70SApple OSS Distributions
2458*a1e26a70SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2459*a1e26a70SApple OSS Distributions bool force_run = false;
2460*a1e26a70SApple OSS Distributions
2461*a1e26a70SApple OSS Distributions if (qos_override) {
2462*a1e26a70SApple OSS Distributions /*
2463*a1e26a70SApple OSS Distributions * We're in the case of a thread clarifying that it is for eg. not IN
2464*a1e26a70SApple OSS Distributions * req QoS but rather, UT req QoS with IN override. However, this can
2465*a1e26a70SApple OSS Distributions * race with a concurrent override happening to the thread via
2466*a1e26a70SApple OSS Distributions * workq_thread_add_dispatch_override so this needs to be
2467*a1e26a70SApple OSS Distributions * synchronized with the thread mutex.
2468*a1e26a70SApple OSS Distributions */
2469*a1e26a70SApple OSS Distributions thread_mtx_lock(th);
2470*a1e26a70SApple OSS Distributions }
2471*a1e26a70SApple OSS Distributions
2472*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
2473*a1e26a70SApple OSS Distributions
2474*a1e26a70SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2475*a1e26a70SApple OSS Distributions new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
2476*a1e26a70SApple OSS Distributions
2477*a1e26a70SApple OSS Distributions if (old_pri.qos_override < qos_override) {
2478*a1e26a70SApple OSS Distributions /*
2479*a1e26a70SApple OSS Distributions * Since this can race with a concurrent override via
2480*a1e26a70SApple OSS Distributions * workq_thread_add_dispatch_override, only adjust override value if we
2481*a1e26a70SApple OSS Distributions * are higher - this is a saturating function.
2482*a1e26a70SApple OSS Distributions *
2483*a1e26a70SApple OSS Distributions * We should not be changing the final override values, we should simply
2484*a1e26a70SApple OSS Distributions * be redistributing the current value with a different breakdown of req
2485*a1e26a70SApple OSS Distributions * vs override QoS - assert to that effect. Therefore, buckets should
2486*a1e26a70SApple OSS Distributions * not change.
2487*a1e26a70SApple OSS Distributions */
2488*a1e26a70SApple OSS Distributions new_pri.qos_override = qos_override;
2489*a1e26a70SApple OSS Distributions assert(workq_pri_override(new_pri) == workq_pri_override(old_pri));
2490*a1e26a70SApple OSS Distributions assert(workq_pri_bucket(new_pri) == workq_pri_bucket(old_pri));
2491*a1e26a70SApple OSS Distributions }
2492*a1e26a70SApple OSS Distributions
2493*a1e26a70SApple OSS Distributions /* Adjust schedule counts for various types of transitions */
2494*a1e26a70SApple OSS Distributions
2495*a1e26a70SApple OSS Distributions /* overcommit -> non-overcommit */
2496*a1e26a70SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_nonovercommit(priority)) {
2497*a1e26a70SApple OSS Distributions workq_thread_set_type(uth, 0);
2498*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
2499*a1e26a70SApple OSS Distributions
2500*a1e26a70SApple OSS Distributions /* non-overcommit -> overcommit */
2501*a1e26a70SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_overcommit(priority)) {
2502*a1e26a70SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
2503*a1e26a70SApple OSS Distributions force_run = (wq->wq_constrained_threads_scheduled-- == wq_max_constrained_threads);
2504*a1e26a70SApple OSS Distributions
2505*a1e26a70SApple OSS Distributions /* cooperative -> cooperative */
2506*a1e26a70SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
2507*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_pri.qos_req);
2508*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_pri.qos_req);
2509*a1e26a70SApple OSS Distributions
2510*a1e26a70SApple OSS Distributions /* We're changing schedule counts within cooperative pool, we
2511*a1e26a70SApple OSS Distributions * need to refresh best cooperative QoS logic again */
2512*a1e26a70SApple OSS Distributions force_run = _wq_cooperative_queue_refresh_best_req_qos(wq);
2513*a1e26a70SApple OSS Distributions }
2514*a1e26a70SApple OSS Distributions
2515*a1e26a70SApple OSS Distributions /*
2516*a1e26a70SApple OSS Distributions * This will set up an override on the thread if any and will also call
2517*a1e26a70SApple OSS Distributions * schedule_creator if needed
2518*a1e26a70SApple OSS Distributions */
2519*a1e26a70SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
2520*a1e26a70SApple OSS Distributions workq_unlock(wq);
2521*a1e26a70SApple OSS Distributions
2522*a1e26a70SApple OSS Distributions if (qos_override) {
2523*a1e26a70SApple OSS Distributions thread_mtx_unlock(th);
2524*a1e26a70SApple OSS Distributions }
2525*a1e26a70SApple OSS Distributions
2526*a1e26a70SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
2527*a1e26a70SApple OSS Distributions thread_disarm_workqueue_quantum(th);
2528*a1e26a70SApple OSS Distributions } else {
2529*a1e26a70SApple OSS Distributions /* If the thread changed QoS buckets, the quantum duration
2530*a1e26a70SApple OSS Distributions * may have changed too */
2531*a1e26a70SApple OSS Distributions thread_arm_workqueue_quantum(th);
2532*a1e26a70SApple OSS Distributions }
2533*a1e26a70SApple OSS Distributions }
2534*a1e26a70SApple OSS Distributions
2535*a1e26a70SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_QOS_POLICY,
2536*a1e26a70SApple OSS Distributions (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
2537*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
2538*a1e26a70SApple OSS Distributions qos_rv = EINVAL;
2539*a1e26a70SApple OSS Distributions }
2540*a1e26a70SApple OSS Distributions }
2541*a1e26a70SApple OSS Distributions
2542*a1e26a70SApple OSS Distributions voucher:
2543*a1e26a70SApple OSS Distributions if (flags & WORKQ_SET_SELF_VOUCHER_FLAG) {
2544*a1e26a70SApple OSS Distributions kr = thread_set_voucher_name(voucher);
2545*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
2546*a1e26a70SApple OSS Distributions voucher_rv = ENOENT;
2547*a1e26a70SApple OSS Distributions goto fixedpri;
2548*a1e26a70SApple OSS Distributions }
2549*a1e26a70SApple OSS Distributions }
2550*a1e26a70SApple OSS Distributions
2551*a1e26a70SApple OSS Distributions fixedpri:
2552*a1e26a70SApple OSS Distributions if (qos_rv) {
2553*a1e26a70SApple OSS Distributions goto done;
2554*a1e26a70SApple OSS Distributions }
2555*a1e26a70SApple OSS Distributions if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) {
2556*a1e26a70SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 0};
2557*a1e26a70SApple OSS Distributions
2558*a1e26a70SApple OSS Distributions if (is_wq_thread) {
2559*a1e26a70SApple OSS Distributions /* Not allowed on workqueue threads */
2560*a1e26a70SApple OSS Distributions fixedpri_rv = ENOTSUP;
2561*a1e26a70SApple OSS Distributions goto done;
2562*a1e26a70SApple OSS Distributions }
2563*a1e26a70SApple OSS Distributions
2564*a1e26a70SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2565*a1e26a70SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2566*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
2567*a1e26a70SApple OSS Distributions fixedpri_rv = EINVAL;
2568*a1e26a70SApple OSS Distributions goto done;
2569*a1e26a70SApple OSS Distributions }
2570*a1e26a70SApple OSS Distributions } else if (flags & WORKQ_SET_SELF_TIMESHARE_FLAG) {
2571*a1e26a70SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 1};
2572*a1e26a70SApple OSS Distributions
2573*a1e26a70SApple OSS Distributions if (is_wq_thread) {
2574*a1e26a70SApple OSS Distributions /* Not allowed on workqueue threads */
2575*a1e26a70SApple OSS Distributions fixedpri_rv = ENOTSUP;
2576*a1e26a70SApple OSS Distributions goto done;
2577*a1e26a70SApple OSS Distributions }
2578*a1e26a70SApple OSS Distributions
2579*a1e26a70SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2580*a1e26a70SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2581*a1e26a70SApple OSS Distributions if (kr != KERN_SUCCESS) {
2582*a1e26a70SApple OSS Distributions fixedpri_rv = EINVAL;
2583*a1e26a70SApple OSS Distributions goto done;
2584*a1e26a70SApple OSS Distributions }
2585*a1e26a70SApple OSS Distributions }
2586*a1e26a70SApple OSS Distributions
2587*a1e26a70SApple OSS Distributions done:
2588*a1e26a70SApple OSS Distributions if (qos_rv && voucher_rv) {
2589*a1e26a70SApple OSS Distributions /* Both failed, give that a unique error. */
2590*a1e26a70SApple OSS Distributions return EBADMSG;
2591*a1e26a70SApple OSS Distributions }
2592*a1e26a70SApple OSS Distributions
2593*a1e26a70SApple OSS Distributions if (unbind_rv) {
2594*a1e26a70SApple OSS Distributions return unbind_rv;
2595*a1e26a70SApple OSS Distributions }
2596*a1e26a70SApple OSS Distributions
2597*a1e26a70SApple OSS Distributions if (qos_rv) {
2598*a1e26a70SApple OSS Distributions return qos_rv;
2599*a1e26a70SApple OSS Distributions }
2600*a1e26a70SApple OSS Distributions
2601*a1e26a70SApple OSS Distributions if (voucher_rv) {
2602*a1e26a70SApple OSS Distributions return voucher_rv;
2603*a1e26a70SApple OSS Distributions }
2604*a1e26a70SApple OSS Distributions
2605*a1e26a70SApple OSS Distributions if (fixedpri_rv) {
2606*a1e26a70SApple OSS Distributions return fixedpri_rv;
2607*a1e26a70SApple OSS Distributions }
2608*a1e26a70SApple OSS Distributions
2609*a1e26a70SApple OSS Distributions
2610*a1e26a70SApple OSS Distributions return 0;
2611*a1e26a70SApple OSS Distributions }
2612*a1e26a70SApple OSS Distributions
2613*a1e26a70SApple OSS Distributions static int
bsdthread_add_explicit_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t resource)2614*a1e26a70SApple OSS Distributions bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport,
2615*a1e26a70SApple OSS Distributions pthread_priority_t pp, user_addr_t resource)
2616*a1e26a70SApple OSS Distributions {
2617*a1e26a70SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2618*a1e26a70SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
2619*a1e26a70SApple OSS Distributions return EINVAL;
2620*a1e26a70SApple OSS Distributions }
2621*a1e26a70SApple OSS Distributions
2622*a1e26a70SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2623*a1e26a70SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2624*a1e26a70SApple OSS Distributions if (th == THREAD_NULL) {
2625*a1e26a70SApple OSS Distributions return ESRCH;
2626*a1e26a70SApple OSS Distributions }
2627*a1e26a70SApple OSS Distributions
2628*a1e26a70SApple OSS Distributions int rv = proc_thread_qos_add_override(proc_task(p), th, 0, qos, TRUE,
2629*a1e26a70SApple OSS Distributions resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2630*a1e26a70SApple OSS Distributions
2631*a1e26a70SApple OSS Distributions thread_deallocate(th);
2632*a1e26a70SApple OSS Distributions return rv;
2633*a1e26a70SApple OSS Distributions }
2634*a1e26a70SApple OSS Distributions
2635*a1e26a70SApple OSS Distributions static int
bsdthread_remove_explicit_override(proc_t p,mach_port_name_t kport,user_addr_t resource)2636*a1e26a70SApple OSS Distributions bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport,
2637*a1e26a70SApple OSS Distributions user_addr_t resource)
2638*a1e26a70SApple OSS Distributions {
2639*a1e26a70SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2640*a1e26a70SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2641*a1e26a70SApple OSS Distributions if (th == THREAD_NULL) {
2642*a1e26a70SApple OSS Distributions return ESRCH;
2643*a1e26a70SApple OSS Distributions }
2644*a1e26a70SApple OSS Distributions
2645*a1e26a70SApple OSS Distributions int rv = proc_thread_qos_remove_override(proc_task(p), th, 0, resource,
2646*a1e26a70SApple OSS Distributions THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2647*a1e26a70SApple OSS Distributions
2648*a1e26a70SApple OSS Distributions thread_deallocate(th);
2649*a1e26a70SApple OSS Distributions return rv;
2650*a1e26a70SApple OSS Distributions }
2651*a1e26a70SApple OSS Distributions
2652*a1e26a70SApple OSS Distributions static int
workq_thread_add_dispatch_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t ulock_addr)2653*a1e26a70SApple OSS Distributions workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport,
2654*a1e26a70SApple OSS Distributions pthread_priority_t pp, user_addr_t ulock_addr)
2655*a1e26a70SApple OSS Distributions {
2656*a1e26a70SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2657*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2658*a1e26a70SApple OSS Distributions
2659*a1e26a70SApple OSS Distributions thread_qos_t qos_override = _pthread_priority_thread_qos(pp);
2660*a1e26a70SApple OSS Distributions if (qos_override == THREAD_QOS_UNSPECIFIED) {
2661*a1e26a70SApple OSS Distributions return EINVAL;
2662*a1e26a70SApple OSS Distributions }
2663*a1e26a70SApple OSS Distributions
2664*a1e26a70SApple OSS Distributions thread_t thread = port_name_to_thread(kport,
2665*a1e26a70SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2666*a1e26a70SApple OSS Distributions if (thread == THREAD_NULL) {
2667*a1e26a70SApple OSS Distributions return ESRCH;
2668*a1e26a70SApple OSS Distributions }
2669*a1e26a70SApple OSS Distributions
2670*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2671*a1e26a70SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2672*a1e26a70SApple OSS Distributions thread_deallocate(thread);
2673*a1e26a70SApple OSS Distributions return EPERM;
2674*a1e26a70SApple OSS Distributions }
2675*a1e26a70SApple OSS Distributions
2676*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE,
2677*a1e26a70SApple OSS Distributions wq, thread_tid(thread), 1, pp);
2678*a1e26a70SApple OSS Distributions
2679*a1e26a70SApple OSS Distributions thread_mtx_lock(thread);
2680*a1e26a70SApple OSS Distributions
2681*a1e26a70SApple OSS Distributions if (ulock_addr) {
2682*a1e26a70SApple OSS Distributions uint32_t val;
2683*a1e26a70SApple OSS Distributions int rc;
2684*a1e26a70SApple OSS Distributions vm_fault_disable();
2685*a1e26a70SApple OSS Distributions rc = copyin_atomic32(ulock_addr, &val);
2686*a1e26a70SApple OSS Distributions vm_fault_enable();
2687*a1e26a70SApple OSS Distributions if (rc == 0 && ulock_owner_value_to_port_name(val) != kport) {
2688*a1e26a70SApple OSS Distributions goto out;
2689*a1e26a70SApple OSS Distributions }
2690*a1e26a70SApple OSS Distributions }
2691*a1e26a70SApple OSS Distributions
2692*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
2693*a1e26a70SApple OSS Distributions
2694*a1e26a70SApple OSS Distributions old_pri = uth->uu_workq_pri;
2695*a1e26a70SApple OSS Distributions if (old_pri.qos_override >= qos_override) {
2696*a1e26a70SApple OSS Distributions /* Nothing to do */
2697*a1e26a70SApple OSS Distributions } else if (thread == current_thread()) {
2698*a1e26a70SApple OSS Distributions new_pri = old_pri;
2699*a1e26a70SApple OSS Distributions new_pri.qos_override = qos_override;
2700*a1e26a70SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2701*a1e26a70SApple OSS Distributions } else {
2702*a1e26a70SApple OSS Distributions uth->uu_workq_pri.qos_override = qos_override;
2703*a1e26a70SApple OSS Distributions if (qos_override > workq_pri_override(old_pri)) {
2704*a1e26a70SApple OSS Distributions thread_set_workq_override(thread, qos_override);
2705*a1e26a70SApple OSS Distributions }
2706*a1e26a70SApple OSS Distributions }
2707*a1e26a70SApple OSS Distributions
2708*a1e26a70SApple OSS Distributions workq_unlock(wq);
2709*a1e26a70SApple OSS Distributions
2710*a1e26a70SApple OSS Distributions out:
2711*a1e26a70SApple OSS Distributions thread_mtx_unlock(thread);
2712*a1e26a70SApple OSS Distributions thread_deallocate(thread);
2713*a1e26a70SApple OSS Distributions return 0;
2714*a1e26a70SApple OSS Distributions }
2715*a1e26a70SApple OSS Distributions
2716*a1e26a70SApple OSS Distributions static int
workq_thread_reset_dispatch_override(proc_t p,thread_t thread)2717*a1e26a70SApple OSS Distributions workq_thread_reset_dispatch_override(proc_t p, thread_t thread)
2718*a1e26a70SApple OSS Distributions {
2719*a1e26a70SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2720*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2721*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2722*a1e26a70SApple OSS Distributions
2723*a1e26a70SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2724*a1e26a70SApple OSS Distributions return EPERM;
2725*a1e26a70SApple OSS Distributions }
2726*a1e26a70SApple OSS Distributions
2727*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_NONE, wq, 0, 0, 0);
2728*a1e26a70SApple OSS Distributions
2729*a1e26a70SApple OSS Distributions /*
2730*a1e26a70SApple OSS Distributions * workq_thread_add_dispatch_override takes the thread mutex before doing the
2731*a1e26a70SApple OSS Distributions * copyin to validate the drainer and apply the override. We need to do the
2732*a1e26a70SApple OSS Distributions * same here. See rdar://84472518
2733*a1e26a70SApple OSS Distributions */
2734*a1e26a70SApple OSS Distributions thread_mtx_lock(thread);
2735*a1e26a70SApple OSS Distributions
2736*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
2737*a1e26a70SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2738*a1e26a70SApple OSS Distributions new_pri.qos_override = THREAD_QOS_UNSPECIFIED;
2739*a1e26a70SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2740*a1e26a70SApple OSS Distributions workq_unlock(wq);
2741*a1e26a70SApple OSS Distributions
2742*a1e26a70SApple OSS Distributions thread_mtx_unlock(thread);
2743*a1e26a70SApple OSS Distributions return 0;
2744*a1e26a70SApple OSS Distributions }
2745*a1e26a70SApple OSS Distributions
2746*a1e26a70SApple OSS Distributions static int
workq_thread_allow_kill(__unused proc_t p,thread_t thread,bool enable)2747*a1e26a70SApple OSS Distributions workq_thread_allow_kill(__unused proc_t p, thread_t thread, bool enable)
2748*a1e26a70SApple OSS Distributions {
2749*a1e26a70SApple OSS Distributions if (!(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE)) {
2750*a1e26a70SApple OSS Distributions // If the thread isn't a workqueue thread, don't set the
2751*a1e26a70SApple OSS Distributions // kill_allowed bit; however, we still need to return 0
2752*a1e26a70SApple OSS Distributions // instead of an error code since this code is executed
2753*a1e26a70SApple OSS Distributions // on the abort path which needs to not depend on the
2754*a1e26a70SApple OSS Distributions // pthread_t (returning an error depends on pthread_t via
2755*a1e26a70SApple OSS Distributions // cerror_nocancel)
2756*a1e26a70SApple OSS Distributions return 0;
2757*a1e26a70SApple OSS Distributions }
2758*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2759*a1e26a70SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = enable;
2760*a1e26a70SApple OSS Distributions return 0;
2761*a1e26a70SApple OSS Distributions }
2762*a1e26a70SApple OSS Distributions
2763*a1e26a70SApple OSS Distributions static int
workq_allow_sigmask(proc_t p,sigset_t mask)2764*a1e26a70SApple OSS Distributions workq_allow_sigmask(proc_t p, sigset_t mask)
2765*a1e26a70SApple OSS Distributions {
2766*a1e26a70SApple OSS Distributions if (mask & workq_threadmask) {
2767*a1e26a70SApple OSS Distributions return EINVAL;
2768*a1e26a70SApple OSS Distributions }
2769*a1e26a70SApple OSS Distributions
2770*a1e26a70SApple OSS Distributions proc_lock(p);
2771*a1e26a70SApple OSS Distributions p->p_workq_allow_sigmask |= mask;
2772*a1e26a70SApple OSS Distributions proc_unlock(p);
2773*a1e26a70SApple OSS Distributions
2774*a1e26a70SApple OSS Distributions return 0;
2775*a1e26a70SApple OSS Distributions }
2776*a1e26a70SApple OSS Distributions
2777*a1e26a70SApple OSS Distributions static int
bsdthread_get_max_parallelism(thread_qos_t qos,unsigned long flags,int * retval)2778*a1e26a70SApple OSS Distributions bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags,
2779*a1e26a70SApple OSS Distributions int *retval)
2780*a1e26a70SApple OSS Distributions {
2781*a1e26a70SApple OSS Distributions static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
2782*a1e26a70SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
2783*a1e26a70SApple OSS Distributions static_assert(QOS_PARALLELISM_REALTIME ==
2784*a1e26a70SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
2785*a1e26a70SApple OSS Distributions static_assert(QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE ==
2786*a1e26a70SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC, "cluster shared resource");
2787*a1e26a70SApple OSS Distributions
2788*a1e26a70SApple OSS Distributions if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL | QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE)) {
2789*a1e26a70SApple OSS Distributions return EINVAL;
2790*a1e26a70SApple OSS Distributions }
2791*a1e26a70SApple OSS Distributions
2792*a1e26a70SApple OSS Distributions /* No units are present */
2793*a1e26a70SApple OSS Distributions if (flags & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) {
2794*a1e26a70SApple OSS Distributions return ENOTSUP;
2795*a1e26a70SApple OSS Distributions }
2796*a1e26a70SApple OSS Distributions
2797*a1e26a70SApple OSS Distributions if (flags & QOS_PARALLELISM_REALTIME) {
2798*a1e26a70SApple OSS Distributions if (qos) {
2799*a1e26a70SApple OSS Distributions return EINVAL;
2800*a1e26a70SApple OSS Distributions }
2801*a1e26a70SApple OSS Distributions } else if (qos == THREAD_QOS_UNSPECIFIED || qos >= THREAD_QOS_LAST) {
2802*a1e26a70SApple OSS Distributions return EINVAL;
2803*a1e26a70SApple OSS Distributions }
2804*a1e26a70SApple OSS Distributions
2805*a1e26a70SApple OSS Distributions *retval = qos_max_parallelism(qos, flags);
2806*a1e26a70SApple OSS Distributions return 0;
2807*a1e26a70SApple OSS Distributions }
2808*a1e26a70SApple OSS Distributions
2809*a1e26a70SApple OSS Distributions static int
bsdthread_dispatch_apply_attr(__unused struct proc * p,thread_t thread,unsigned long flags,uint64_t value1,__unused uint64_t value2)2810*a1e26a70SApple OSS Distributions bsdthread_dispatch_apply_attr(__unused struct proc *p, thread_t thread,
2811*a1e26a70SApple OSS Distributions unsigned long flags, uint64_t value1, __unused uint64_t value2)
2812*a1e26a70SApple OSS Distributions {
2813*a1e26a70SApple OSS Distributions uint32_t apply_worker_index;
2814*a1e26a70SApple OSS Distributions kern_return_t kr;
2815*a1e26a70SApple OSS Distributions
2816*a1e26a70SApple OSS Distributions switch (flags) {
2817*a1e26a70SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET:
2818*a1e26a70SApple OSS Distributions apply_worker_index = (uint32_t)value1;
2819*a1e26a70SApple OSS Distributions kr = thread_shared_rsrc_policy_set(thread, apply_worker_index, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2820*a1e26a70SApple OSS Distributions /*
2821*a1e26a70SApple OSS Distributions * KERN_INVALID_POLICY indicates that the thread was trying to bind to a
2822*a1e26a70SApple OSS Distributions * cluster which it was not eligible to execute on.
2823*a1e26a70SApple OSS Distributions */
2824*a1e26a70SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : ((kr == KERN_INVALID_POLICY) ? ENOTSUP : EINVAL);
2825*a1e26a70SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR:
2826*a1e26a70SApple OSS Distributions kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2827*a1e26a70SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : EINVAL;
2828*a1e26a70SApple OSS Distributions default:
2829*a1e26a70SApple OSS Distributions return EINVAL;
2830*a1e26a70SApple OSS Distributions }
2831*a1e26a70SApple OSS Distributions }
2832*a1e26a70SApple OSS Distributions
2833*a1e26a70SApple OSS Distributions #define ENSURE_UNUSED(arg) \
2834*a1e26a70SApple OSS Distributions ({ if ((arg) != 0) { return EINVAL; } })
2835*a1e26a70SApple OSS Distributions
2836*a1e26a70SApple OSS Distributions int
bsdthread_ctl(struct proc * p,struct bsdthread_ctl_args * uap,int * retval)2837*a1e26a70SApple OSS Distributions bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
2838*a1e26a70SApple OSS Distributions {
2839*a1e26a70SApple OSS Distributions switch (uap->cmd) {
2840*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_START:
2841*a1e26a70SApple OSS Distributions return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1,
2842*a1e26a70SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2843*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_END:
2844*a1e26a70SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2845*a1e26a70SApple OSS Distributions return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1,
2846*a1e26a70SApple OSS Distributions (user_addr_t)uap->arg2);
2847*a1e26a70SApple OSS Distributions
2848*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
2849*a1e26a70SApple OSS Distributions return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1,
2850*a1e26a70SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2851*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
2852*a1e26a70SApple OSS Distributions return workq_thread_reset_dispatch_override(p, current_thread());
2853*a1e26a70SApple OSS Distributions
2854*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_SET_SELF:
2855*a1e26a70SApple OSS Distributions return bsdthread_set_self(p, current_thread(),
2856*a1e26a70SApple OSS Distributions (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
2857*a1e26a70SApple OSS Distributions (enum workq_set_self_flags)uap->arg3);
2858*a1e26a70SApple OSS Distributions
2859*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
2860*a1e26a70SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2861*a1e26a70SApple OSS Distributions return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1,
2862*a1e26a70SApple OSS Distributions (unsigned long)uap->arg2, retval);
2863*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_KILL:
2864*a1e26a70SApple OSS Distributions ENSURE_UNUSED(uap->arg2);
2865*a1e26a70SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2866*a1e26a70SApple OSS Distributions return workq_thread_allow_kill(p, current_thread(), (bool)uap->arg1);
2867*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_DISPATCH_APPLY_ATTR:
2868*a1e26a70SApple OSS Distributions return bsdthread_dispatch_apply_attr(p, current_thread(),
2869*a1e26a70SApple OSS Distributions (unsigned long)uap->arg1, (uint64_t)uap->arg2,
2870*a1e26a70SApple OSS Distributions (uint64_t)uap->arg3);
2871*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_SIGMASK:
2872*a1e26a70SApple OSS Distributions return workq_allow_sigmask(p, (int)uap->arg1);
2873*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_SET_QOS:
2874*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
2875*a1e26a70SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
2876*a1e26a70SApple OSS Distributions /* no longer supported */
2877*a1e26a70SApple OSS Distributions return ENOTSUP;
2878*a1e26a70SApple OSS Distributions
2879*a1e26a70SApple OSS Distributions default:
2880*a1e26a70SApple OSS Distributions return EINVAL;
2881*a1e26a70SApple OSS Distributions }
2882*a1e26a70SApple OSS Distributions }
2883*a1e26a70SApple OSS Distributions
2884*a1e26a70SApple OSS Distributions #pragma mark workqueue thread manipulation
2885*a1e26a70SApple OSS Distributions
2886*a1e26a70SApple OSS Distributions static void __dead2
2887*a1e26a70SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2888*a1e26a70SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2889*a1e26a70SApple OSS Distributions
2890*a1e26a70SApple OSS Distributions static void __dead2
2891*a1e26a70SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2892*a1e26a70SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2893*a1e26a70SApple OSS Distributions
2894*a1e26a70SApple OSS Distributions static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2;
2895*a1e26a70SApple OSS Distributions
2896*a1e26a70SApple OSS Distributions #if KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD
2897*a1e26a70SApple OSS Distributions static inline uint64_t
workq_trace_req_id(workq_threadreq_t req)2898*a1e26a70SApple OSS Distributions workq_trace_req_id(workq_threadreq_t req)
2899*a1e26a70SApple OSS Distributions {
2900*a1e26a70SApple OSS Distributions struct kqworkloop *kqwl;
2901*a1e26a70SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2902*a1e26a70SApple OSS Distributions kqwl = __container_of(req, struct kqworkloop, kqwl_request);
2903*a1e26a70SApple OSS Distributions return kqwl->kqwl_dynamicid;
2904*a1e26a70SApple OSS Distributions }
2905*a1e26a70SApple OSS Distributions
2906*a1e26a70SApple OSS Distributions return VM_KERNEL_ADDRHIDE(req);
2907*a1e26a70SApple OSS Distributions }
2908*a1e26a70SApple OSS Distributions #endif
2909*a1e26a70SApple OSS Distributions
2910*a1e26a70SApple OSS Distributions /**
2911*a1e26a70SApple OSS Distributions * Entry point for libdispatch to ask for threads
2912*a1e26a70SApple OSS Distributions */
2913*a1e26a70SApple OSS Distributions static int
workq_reqthreads(struct proc * p,uint32_t reqcount,pthread_priority_t pp,bool cooperative)2914*a1e26a70SApple OSS Distributions workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp, bool cooperative)
2915*a1e26a70SApple OSS Distributions {
2916*a1e26a70SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2917*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2918*a1e26a70SApple OSS Distributions uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;
2919*a1e26a70SApple OSS Distributions int ret = 0;
2920*a1e26a70SApple OSS Distributions
2921*a1e26a70SApple OSS Distributions if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
2922*a1e26a70SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
2923*a1e26a70SApple OSS Distributions ret = EINVAL;
2924*a1e26a70SApple OSS Distributions goto exit;
2925*a1e26a70SApple OSS Distributions }
2926*a1e26a70SApple OSS Distributions
2927*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
2928*a1e26a70SApple OSS Distributions wq, reqcount, pp, cooperative);
2929*a1e26a70SApple OSS Distributions
2930*a1e26a70SApple OSS Distributions workq_threadreq_t req = zalloc(workq_zone_threadreq);
2931*a1e26a70SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
2932*a1e26a70SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
2933*a1e26a70SApple OSS Distributions req->tr_qos = qos;
2934*a1e26a70SApple OSS Distributions workq_tr_flags_t tr_flags = 0;
2935*a1e26a70SApple OSS Distributions
2936*a1e26a70SApple OSS Distributions if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
2937*a1e26a70SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
2938*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
2939*a1e26a70SApple OSS Distributions }
2940*a1e26a70SApple OSS Distributions
2941*a1e26a70SApple OSS Distributions if (cooperative) {
2942*a1e26a70SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_COOPERATIVE;
2943*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
2944*a1e26a70SApple OSS Distributions
2945*a1e26a70SApple OSS Distributions if (reqcount > 1) {
2946*a1e26a70SApple OSS Distributions ret = ENOTSUP;
2947*a1e26a70SApple OSS Distributions goto free_and_exit;
2948*a1e26a70SApple OSS Distributions }
2949*a1e26a70SApple OSS Distributions }
2950*a1e26a70SApple OSS Distributions
2951*a1e26a70SApple OSS Distributions /* A thread request cannot be both overcommit and cooperative */
2952*a1e26a70SApple OSS Distributions if (workq_tr_is_cooperative(tr_flags) &&
2953*a1e26a70SApple OSS Distributions workq_tr_is_overcommit(tr_flags)) {
2954*a1e26a70SApple OSS Distributions ret = EINVAL;
2955*a1e26a70SApple OSS Distributions goto free_and_exit;
2956*a1e26a70SApple OSS Distributions }
2957*a1e26a70SApple OSS Distributions req->tr_flags = tr_flags;
2958*a1e26a70SApple OSS Distributions
2959*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
2960*a1e26a70SApple OSS Distributions wq, workq_trace_req_id(req), req->tr_qos, reqcount);
2961*a1e26a70SApple OSS Distributions
2962*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
2963*a1e26a70SApple OSS Distributions do {
2964*a1e26a70SApple OSS Distributions if (_wq_exiting(wq)) {
2965*a1e26a70SApple OSS Distributions goto unlock_and_exit;
2966*a1e26a70SApple OSS Distributions }
2967*a1e26a70SApple OSS Distributions
2968*a1e26a70SApple OSS Distributions /*
2969*a1e26a70SApple OSS Distributions * When userspace is asking for parallelism, wakeup up to (reqcount - 1)
2970*a1e26a70SApple OSS Distributions * threads without pacing, to inform the scheduler of that workload.
2971*a1e26a70SApple OSS Distributions *
2972*a1e26a70SApple OSS Distributions * The last requests, or the ones that failed the admission checks are
2973*a1e26a70SApple OSS Distributions * enqueued and go through the regular creator codepath.
2974*a1e26a70SApple OSS Distributions *
2975*a1e26a70SApple OSS Distributions * If there aren't enough threads, add one, but re-evaluate everything
2976*a1e26a70SApple OSS Distributions * as conditions may now have changed.
2977*a1e26a70SApple OSS Distributions */
2978*a1e26a70SApple OSS Distributions unpaced = reqcount - 1;
2979*a1e26a70SApple OSS Distributions
2980*a1e26a70SApple OSS Distributions if (reqcount > 1) {
2981*a1e26a70SApple OSS Distributions /* We don't handle asking for parallelism on the cooperative
2982*a1e26a70SApple OSS Distributions * workqueue just yet */
2983*a1e26a70SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
2984*a1e26a70SApple OSS Distributions
2985*a1e26a70SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
2986*a1e26a70SApple OSS Distributions unpaced = workq_constrained_allowance(wq, qos, NULL, false, true);
2987*a1e26a70SApple OSS Distributions if (unpaced >= reqcount - 1) {
2988*a1e26a70SApple OSS Distributions unpaced = reqcount - 1;
2989*a1e26a70SApple OSS Distributions }
2990*a1e26a70SApple OSS Distributions }
2991*a1e26a70SApple OSS Distributions }
2992*a1e26a70SApple OSS Distributions
2993*a1e26a70SApple OSS Distributions /*
2994*a1e26a70SApple OSS Distributions * This path does not currently handle custom workloop parameters
2995*a1e26a70SApple OSS Distributions * when creating threads for parallelism.
2996*a1e26a70SApple OSS Distributions */
2997*a1e26a70SApple OSS Distributions assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
2998*a1e26a70SApple OSS Distributions
2999*a1e26a70SApple OSS Distributions /*
3000*a1e26a70SApple OSS Distributions * This is a trimmed down version of workq_threadreq_bind_and_unlock()
3001*a1e26a70SApple OSS Distributions */
3002*a1e26a70SApple OSS Distributions while (unpaced > 0 && wq->wq_thidlecount) {
3003*a1e26a70SApple OSS Distributions struct uthread *uth;
3004*a1e26a70SApple OSS Distributions bool needs_wakeup;
3005*a1e26a70SApple OSS Distributions uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;
3006*a1e26a70SApple OSS Distributions
3007*a1e26a70SApple OSS Distributions if (workq_tr_is_overcommit(req->tr_flags)) {
3008*a1e26a70SApple OSS Distributions uu_flags |= UT_WORKQ_OVERCOMMIT;
3009*a1e26a70SApple OSS Distributions }
3010*a1e26a70SApple OSS Distributions
3011*a1e26a70SApple OSS Distributions uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);
3012*a1e26a70SApple OSS Distributions
3013*a1e26a70SApple OSS Distributions _wq_thactive_inc(wq, qos);
3014*a1e26a70SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(qos)]++;
3015*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
3016*a1e26a70SApple OSS Distributions wq->wq_fulfilled++;
3017*a1e26a70SApple OSS Distributions
3018*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
3019*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.thread_request = req;
3020*a1e26a70SApple OSS Distributions if (needs_wakeup) {
3021*a1e26a70SApple OSS Distributions workq_thread_wakeup(uth);
3022*a1e26a70SApple OSS Distributions }
3023*a1e26a70SApple OSS Distributions unpaced--;
3024*a1e26a70SApple OSS Distributions reqcount--;
3025*a1e26a70SApple OSS Distributions }
3026*a1e26a70SApple OSS Distributions } while (unpaced && wq->wq_nthreads < wq_max_threads &&
3027*a1e26a70SApple OSS Distributions (workq_add_new_idle_thread(p, wq, workq_unpark_continue,
3028*a1e26a70SApple OSS Distributions false, NULL) == KERN_SUCCESS));
3029*a1e26a70SApple OSS Distributions
3030*a1e26a70SApple OSS Distributions if (_wq_exiting(wq)) {
3031*a1e26a70SApple OSS Distributions goto unlock_and_exit;
3032*a1e26a70SApple OSS Distributions }
3033*a1e26a70SApple OSS Distributions
3034*a1e26a70SApple OSS Distributions req->tr_count = (uint16_t)reqcount;
3035*a1e26a70SApple OSS Distributions if (workq_threadreq_enqueue(wq, req)) {
3036*a1e26a70SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
3037*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
3038*a1e26a70SApple OSS Distributions }
3039*a1e26a70SApple OSS Distributions workq_unlock(wq);
3040*a1e26a70SApple OSS Distributions return 0;
3041*a1e26a70SApple OSS Distributions
3042*a1e26a70SApple OSS Distributions unlock_and_exit:
3043*a1e26a70SApple OSS Distributions workq_unlock(wq);
3044*a1e26a70SApple OSS Distributions free_and_exit:
3045*a1e26a70SApple OSS Distributions zfree(workq_zone_threadreq, req);
3046*a1e26a70SApple OSS Distributions exit:
3047*a1e26a70SApple OSS Distributions return ret;
3048*a1e26a70SApple OSS Distributions }
3049*a1e26a70SApple OSS Distributions
3050*a1e26a70SApple OSS Distributions bool
workq_kern_threadreq_initiate(struct proc * p,workq_threadreq_t req,struct turnstile * workloop_ts,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3051*a1e26a70SApple OSS Distributions workq_kern_threadreq_initiate(struct proc *p, workq_threadreq_t req,
3052*a1e26a70SApple OSS Distributions struct turnstile *workloop_ts, thread_qos_t qos,
3053*a1e26a70SApple OSS Distributions workq_kern_threadreq_flags_t flags)
3054*a1e26a70SApple OSS Distributions {
3055*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3056*a1e26a70SApple OSS Distributions struct uthread *uth = NULL;
3057*a1e26a70SApple OSS Distributions
3058*a1e26a70SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT));
3059*a1e26a70SApple OSS Distributions
3060*a1e26a70SApple OSS Distributions /*
3061*a1e26a70SApple OSS Distributions * For any new initialization changes done to workqueue thread request below,
3062*a1e26a70SApple OSS Distributions * please also consider if they are relevant to permanently bound thread
3063*a1e26a70SApple OSS Distributions * request. See workq_kern_threadreq_permanent_bind.
3064*a1e26a70SApple OSS Distributions */
3065*a1e26a70SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3066*a1e26a70SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
3067*a1e26a70SApple OSS Distributions qos = thread_workq_qos_for_pri(trp.trp_pri);
3068*a1e26a70SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3069*a1e26a70SApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
3070*a1e26a70SApple OSS Distributions }
3071*a1e26a70SApple OSS Distributions }
3072*a1e26a70SApple OSS Distributions
3073*a1e26a70SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_IDLE);
3074*a1e26a70SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
3075*a1e26a70SApple OSS Distributions req->tr_count = 1;
3076*a1e26a70SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3077*a1e26a70SApple OSS Distributions req->tr_qos = qos;
3078*a1e26a70SApple OSS Distributions
3079*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq,
3080*a1e26a70SApple OSS Distributions workq_trace_req_id(req), qos, 1);
3081*a1e26a70SApple OSS Distributions
3082*a1e26a70SApple OSS Distributions if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) {
3083*a1e26a70SApple OSS Distributions /*
3084*a1e26a70SApple OSS Distributions * we're called back synchronously from the context of
3085*a1e26a70SApple OSS Distributions * kqueue_threadreq_unbind from within workq_thread_return()
3086*a1e26a70SApple OSS Distributions * we can try to match up this thread with this request !
3087*a1e26a70SApple OSS Distributions */
3088*a1e26a70SApple OSS Distributions uth = current_uthread();
3089*a1e26a70SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3090*a1e26a70SApple OSS Distributions }
3091*a1e26a70SApple OSS Distributions
3092*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3093*a1e26a70SApple OSS Distributions if (_wq_exiting(wq)) {
3094*a1e26a70SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_IDLE;
3095*a1e26a70SApple OSS Distributions workq_unlock(wq);
3096*a1e26a70SApple OSS Distributions return false;
3097*a1e26a70SApple OSS Distributions }
3098*a1e26a70SApple OSS Distributions
3099*a1e26a70SApple OSS Distributions if (uth && workq_threadreq_admissible(wq, uth, req)) {
3100*a1e26a70SApple OSS Distributions /* This is the case of the rebind - we were about to park and unbind
3101*a1e26a70SApple OSS Distributions * when more events came so keep the binding.
3102*a1e26a70SApple OSS Distributions */
3103*a1e26a70SApple OSS Distributions assert(uth != wq->wq_creator);
3104*a1e26a70SApple OSS Distributions
3105*a1e26a70SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
3106*a1e26a70SApple OSS Distributions _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
3107*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ false);
3108*a1e26a70SApple OSS Distributions }
3109*a1e26a70SApple OSS Distributions /*
3110*a1e26a70SApple OSS Distributions * We're called from workq_kern_threadreq_initiate()
3111*a1e26a70SApple OSS Distributions * due to an unbind, with the kq req held.
3112*a1e26a70SApple OSS Distributions */
3113*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
3114*a1e26a70SApple OSS Distributions workq_trace_req_id(req), req->tr_flags, 0);
3115*a1e26a70SApple OSS Distributions wq->wq_fulfilled++;
3116*a1e26a70SApple OSS Distributions
3117*a1e26a70SApple OSS Distributions kqueue_threadreq_bind(p, req, get_machthread(uth), 0);
3118*a1e26a70SApple OSS Distributions } else {
3119*a1e26a70SApple OSS Distributions if (workloop_ts) {
3120*a1e26a70SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3121*a1e26a70SApple OSS Distributions turnstile_update_inheritor(workloop_ts, wq->wq_turnstile,
3122*a1e26a70SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
3123*a1e26a70SApple OSS Distributions turnstile_update_inheritor_complete(workloop_ts,
3124*a1e26a70SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
3125*a1e26a70SApple OSS Distributions });
3126*a1e26a70SApple OSS Distributions }
3127*a1e26a70SApple OSS Distributions
3128*a1e26a70SApple OSS Distributions bool reevaluate_creator_thread_group = false;
3129*a1e26a70SApple OSS Distributions #if CONFIG_PREADOPT_TG
3130*a1e26a70SApple OSS Distributions reevaluate_creator_thread_group = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3131*a1e26a70SApple OSS Distributions #endif
3132*a1e26a70SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3133*a1e26a70SApple OSS Distributions * the creator needs a thread group pre-adoption */
3134*a1e26a70SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_thread_group) {
3135*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3136*a1e26a70SApple OSS Distributions }
3137*a1e26a70SApple OSS Distributions }
3138*a1e26a70SApple OSS Distributions
3139*a1e26a70SApple OSS Distributions workq_unlock(wq);
3140*a1e26a70SApple OSS Distributions
3141*a1e26a70SApple OSS Distributions return true;
3142*a1e26a70SApple OSS Distributions }
3143*a1e26a70SApple OSS Distributions
3144*a1e26a70SApple OSS Distributions void
workq_kern_threadreq_modify(struct proc * p,workq_threadreq_t req,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3145*a1e26a70SApple OSS Distributions workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
3146*a1e26a70SApple OSS Distributions thread_qos_t qos, workq_kern_threadreq_flags_t flags)
3147*a1e26a70SApple OSS Distributions {
3148*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3149*a1e26a70SApple OSS Distributions bool make_overcommit = false;
3150*a1e26a70SApple OSS Distributions
3151*a1e26a70SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3152*a1e26a70SApple OSS Distributions /* Requests outside-of-QoS shouldn't accept modify operations */
3153*a1e26a70SApple OSS Distributions return;
3154*a1e26a70SApple OSS Distributions }
3155*a1e26a70SApple OSS Distributions
3156*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3157*a1e26a70SApple OSS Distributions
3158*a1e26a70SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3159*a1e26a70SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP));
3160*a1e26a70SApple OSS Distributions
3161*a1e26a70SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3162*a1e26a70SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread, 0);
3163*a1e26a70SApple OSS Distributions workq_unlock(wq);
3164*a1e26a70SApple OSS Distributions return;
3165*a1e26a70SApple OSS Distributions }
3166*a1e26a70SApple OSS Distributions
3167*a1e26a70SApple OSS Distributions if (flags & WORKQ_THREADREQ_MAKE_OVERCOMMIT) {
3168*a1e26a70SApple OSS Distributions /* TODO (rokhinip): We come into this code path for kqwl thread
3169*a1e26a70SApple OSS Distributions * requests. kqwl requests cannot be cooperative.
3170*a1e26a70SApple OSS Distributions */
3171*a1e26a70SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
3172*a1e26a70SApple OSS Distributions
3173*a1e26a70SApple OSS Distributions make_overcommit = workq_threadreq_is_nonovercommit(req);
3174*a1e26a70SApple OSS Distributions }
3175*a1e26a70SApple OSS Distributions
3176*a1e26a70SApple OSS Distributions if (_wq_exiting(wq) || (req->tr_qos == qos && !make_overcommit)) {
3177*a1e26a70SApple OSS Distributions workq_unlock(wq);
3178*a1e26a70SApple OSS Distributions return;
3179*a1e26a70SApple OSS Distributions }
3180*a1e26a70SApple OSS Distributions
3181*a1e26a70SApple OSS Distributions assert(req->tr_count == 1);
3182*a1e26a70SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3183*a1e26a70SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3184*a1e26a70SApple OSS Distributions }
3185*a1e26a70SApple OSS Distributions
3186*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
3187*a1e26a70SApple OSS Distributions workq_trace_req_id(req), qos, 0);
3188*a1e26a70SApple OSS Distributions
3189*a1e26a70SApple OSS Distributions struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
3190*a1e26a70SApple OSS Distributions workq_threadreq_t req_max;
3191*a1e26a70SApple OSS Distributions
3192*a1e26a70SApple OSS Distributions /*
3193*a1e26a70SApple OSS Distributions * Stage 1: Dequeue the request from its priority queue.
3194*a1e26a70SApple OSS Distributions *
3195*a1e26a70SApple OSS Distributions * If we dequeue the root item of the constrained priority queue,
3196*a1e26a70SApple OSS Distributions * maintain the best constrained request qos invariant.
3197*a1e26a70SApple OSS Distributions */
3198*a1e26a70SApple OSS Distributions if (priority_queue_remove(pq, &req->tr_entry)) {
3199*a1e26a70SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
3200*a1e26a70SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
3201*a1e26a70SApple OSS Distributions }
3202*a1e26a70SApple OSS Distributions }
3203*a1e26a70SApple OSS Distributions
3204*a1e26a70SApple OSS Distributions /*
3205*a1e26a70SApple OSS Distributions * Stage 2: Apply changes to the thread request
3206*a1e26a70SApple OSS Distributions *
3207*a1e26a70SApple OSS Distributions * If the item will not become the root of the priority queue it belongs to,
3208*a1e26a70SApple OSS Distributions * then we need to wait in line, just enqueue and return quickly.
3209*a1e26a70SApple OSS Distributions */
3210*a1e26a70SApple OSS Distributions if (__improbable(make_overcommit)) {
3211*a1e26a70SApple OSS Distributions req->tr_flags ^= WORKQ_TR_FLAG_OVERCOMMIT;
3212*a1e26a70SApple OSS Distributions pq = workq_priority_queue_for_req(wq, req);
3213*a1e26a70SApple OSS Distributions }
3214*a1e26a70SApple OSS Distributions req->tr_qos = qos;
3215*a1e26a70SApple OSS Distributions
3216*a1e26a70SApple OSS Distributions req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
3217*a1e26a70SApple OSS Distributions if (req_max && req_max->tr_qos >= qos) {
3218*a1e26a70SApple OSS Distributions priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
3219*a1e26a70SApple OSS Distributions workq_priority_for_req(req), false);
3220*a1e26a70SApple OSS Distributions priority_queue_insert(pq, &req->tr_entry);
3221*a1e26a70SApple OSS Distributions workq_unlock(wq);
3222*a1e26a70SApple OSS Distributions return;
3223*a1e26a70SApple OSS Distributions }
3224*a1e26a70SApple OSS Distributions
3225*a1e26a70SApple OSS Distributions /*
3226*a1e26a70SApple OSS Distributions * Stage 3: Reevaluate whether we should run the thread request.
3227*a1e26a70SApple OSS Distributions *
3228*a1e26a70SApple OSS Distributions * Pretend the thread request is new again:
3229*a1e26a70SApple OSS Distributions * - adjust wq_reqcount to not count it anymore.
3230*a1e26a70SApple OSS Distributions * - make its state WORKQ_TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
3231*a1e26a70SApple OSS Distributions * properly attempts a synchronous bind)
3232*a1e26a70SApple OSS Distributions */
3233*a1e26a70SApple OSS Distributions wq->wq_reqcount--;
3234*a1e26a70SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3235*a1e26a70SApple OSS Distributions
3236*a1e26a70SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3237*a1e26a70SApple OSS Distributions * the creator needs a thread group pre-adoption if the request got a new TG */
3238*a1e26a70SApple OSS Distributions bool reevaluate_creator_tg = false;
3239*a1e26a70SApple OSS Distributions
3240*a1e26a70SApple OSS Distributions #if CONFIG_PREADOPT_TG
3241*a1e26a70SApple OSS Distributions reevaluate_creator_tg = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3242*a1e26a70SApple OSS Distributions #endif
3243*a1e26a70SApple OSS Distributions
3244*a1e26a70SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_tg) {
3245*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3246*a1e26a70SApple OSS Distributions }
3247*a1e26a70SApple OSS Distributions workq_unlock(wq);
3248*a1e26a70SApple OSS Distributions }
3249*a1e26a70SApple OSS Distributions
3250*a1e26a70SApple OSS Distributions void
workq_kern_bound_thread_reset_pri(workq_threadreq_t req,struct uthread * uth)3251*a1e26a70SApple OSS Distributions workq_kern_bound_thread_reset_pri(workq_threadreq_t req, struct uthread *uth)
3252*a1e26a70SApple OSS Distributions {
3253*a1e26a70SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
3254*a1e26a70SApple OSS Distributions
3255*a1e26a70SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS)) {
3256*a1e26a70SApple OSS Distributions /*
3257*a1e26a70SApple OSS Distributions * For requests outside-of-QoS, we set the scheduling policy and
3258*a1e26a70SApple OSS Distributions * absolute priority for the bound thread right at the initialization
3259*a1e26a70SApple OSS Distributions * time. See workq_kern_threadreq_permanent_bind.
3260*a1e26a70SApple OSS Distributions */
3261*a1e26a70SApple OSS Distributions return;
3262*a1e26a70SApple OSS Distributions }
3263*a1e26a70SApple OSS Distributions
3264*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(current_proc());
3265*a1e26a70SApple OSS Distributions if (req) {
3266*a1e26a70SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3267*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
3268*a1e26a70SApple OSS Distributions } else {
3269*a1e26a70SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
3270*a1e26a70SApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
3271*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
3272*a1e26a70SApple OSS Distributions } else {
3273*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
3274*a1e26a70SApple OSS Distributions }
3275*a1e26a70SApple OSS Distributions }
3276*a1e26a70SApple OSS Distributions }
3277*a1e26a70SApple OSS Distributions
3278*a1e26a70SApple OSS Distributions void
workq_kern_threadreq_lock(struct proc * p)3279*a1e26a70SApple OSS Distributions workq_kern_threadreq_lock(struct proc *p)
3280*a1e26a70SApple OSS Distributions {
3281*a1e26a70SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(p));
3282*a1e26a70SApple OSS Distributions }
3283*a1e26a70SApple OSS Distributions
3284*a1e26a70SApple OSS Distributions void
workq_kern_threadreq_unlock(struct proc * p)3285*a1e26a70SApple OSS Distributions workq_kern_threadreq_unlock(struct proc *p)
3286*a1e26a70SApple OSS Distributions {
3287*a1e26a70SApple OSS Distributions workq_unlock(proc_get_wqptr_fast(p));
3288*a1e26a70SApple OSS Distributions }
3289*a1e26a70SApple OSS Distributions
3290*a1e26a70SApple OSS Distributions void
workq_kern_threadreq_update_inheritor(struct proc * p,workq_threadreq_t req,thread_t owner,struct turnstile * wl_ts,turnstile_update_flags_t flags)3291*a1e26a70SApple OSS Distributions workq_kern_threadreq_update_inheritor(struct proc *p, workq_threadreq_t req,
3292*a1e26a70SApple OSS Distributions thread_t owner, struct turnstile *wl_ts,
3293*a1e26a70SApple OSS Distributions turnstile_update_flags_t flags)
3294*a1e26a70SApple OSS Distributions {
3295*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3296*a1e26a70SApple OSS Distributions turnstile_inheritor_t inheritor;
3297*a1e26a70SApple OSS Distributions
3298*a1e26a70SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3299*a1e26a70SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
3300*a1e26a70SApple OSS Distributions workq_lock_held(wq);
3301*a1e26a70SApple OSS Distributions
3302*a1e26a70SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3303*a1e26a70SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread,
3304*a1e26a70SApple OSS Distributions KQUEUE_THREADREQ_BIND_NO_INHERITOR_UPDATE);
3305*a1e26a70SApple OSS Distributions return;
3306*a1e26a70SApple OSS Distributions }
3307*a1e26a70SApple OSS Distributions
3308*a1e26a70SApple OSS Distributions if (_wq_exiting(wq)) {
3309*a1e26a70SApple OSS Distributions inheritor = TURNSTILE_INHERITOR_NULL;
3310*a1e26a70SApple OSS Distributions } else {
3311*a1e26a70SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3312*a1e26a70SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3313*a1e26a70SApple OSS Distributions }
3314*a1e26a70SApple OSS Distributions
3315*a1e26a70SApple OSS Distributions if (owner) {
3316*a1e26a70SApple OSS Distributions inheritor = owner;
3317*a1e26a70SApple OSS Distributions flags |= TURNSTILE_INHERITOR_THREAD;
3318*a1e26a70SApple OSS Distributions } else {
3319*a1e26a70SApple OSS Distributions inheritor = wq->wq_turnstile;
3320*a1e26a70SApple OSS Distributions flags |= TURNSTILE_INHERITOR_TURNSTILE;
3321*a1e26a70SApple OSS Distributions }
3322*a1e26a70SApple OSS Distributions }
3323*a1e26a70SApple OSS Distributions
3324*a1e26a70SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3325*a1e26a70SApple OSS Distributions turnstile_update_inheritor(wl_ts, inheritor, flags);
3326*a1e26a70SApple OSS Distributions });
3327*a1e26a70SApple OSS Distributions }
3328*a1e26a70SApple OSS Distributions
3329*a1e26a70SApple OSS Distributions /*
3330*a1e26a70SApple OSS Distributions * An entry point for kevent to request a newly created workqueue thread
3331*a1e26a70SApple OSS Distributions * and bind it permanently to the given workqueue thread request.
3332*a1e26a70SApple OSS Distributions *
3333*a1e26a70SApple OSS Distributions * It currently only supports fixed scheduler priority thread requests.
3334*a1e26a70SApple OSS Distributions *
3335*a1e26a70SApple OSS Distributions * The newly created thread counts towards wq_nthreads. This function returns
3336*a1e26a70SApple OSS Distributions * an error if we are above that limit. There is no concept of delayed thread
3337*a1e26a70SApple OSS Distributions * creation for such specially configured kqworkloops.
3338*a1e26a70SApple OSS Distributions *
3339*a1e26a70SApple OSS Distributions * If successful, the newly created thread will be parked in
3340*a1e26a70SApple OSS Distributions * workq_bound_thread_initialize_and_unpark_continue waiting for
3341*a1e26a70SApple OSS Distributions * new incoming events.
3342*a1e26a70SApple OSS Distributions */
3343*a1e26a70SApple OSS Distributions kern_return_t
workq_kern_threadreq_permanent_bind(struct proc * p,struct workq_threadreq_s * kqr)3344*a1e26a70SApple OSS Distributions workq_kern_threadreq_permanent_bind(struct proc *p, struct workq_threadreq_s *kqr)
3345*a1e26a70SApple OSS Distributions {
3346*a1e26a70SApple OSS Distributions kern_return_t ret = 0;
3347*a1e26a70SApple OSS Distributions thread_t new_thread = NULL;
3348*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3349*a1e26a70SApple OSS Distributions
3350*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3351*a1e26a70SApple OSS Distributions
3352*a1e26a70SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
3353*a1e26a70SApple OSS Distributions ret = EDOM;
3354*a1e26a70SApple OSS Distributions } else {
3355*a1e26a70SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3356*a1e26a70SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(kqr);
3357*a1e26a70SApple OSS Distributions /*
3358*a1e26a70SApple OSS Distributions * For requests outside-of-QoS, we fully initialize the thread
3359*a1e26a70SApple OSS Distributions * request here followed by preadopting the scheduling properties
3360*a1e26a70SApple OSS Distributions * on the newly created bound thread.
3361*a1e26a70SApple OSS Distributions */
3362*a1e26a70SApple OSS Distributions thread_qos_t qos = thread_workq_qos_for_pri(trp.trp_pri);
3363*a1e26a70SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3364*a1e26a70SApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
3365*a1e26a70SApple OSS Distributions }
3366*a1e26a70SApple OSS Distributions kqr->tr_qos = qos;
3367*a1e26a70SApple OSS Distributions }
3368*a1e26a70SApple OSS Distributions kqr->tr_count = 1;
3369*a1e26a70SApple OSS Distributions
3370*a1e26a70SApple OSS Distributions /* workq_lock dropped and retaken around thread creation below. */
3371*a1e26a70SApple OSS Distributions ret = workq_add_new_idle_thread(p, wq,
3372*a1e26a70SApple OSS Distributions workq_bound_thread_initialize_and_unpark_continue,
3373*a1e26a70SApple OSS Distributions true, &new_thread);
3374*a1e26a70SApple OSS Distributions if (ret == KERN_SUCCESS) {
3375*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(new_thread);
3376*a1e26a70SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3377*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, kqr, /*unpark*/ true);
3378*a1e26a70SApple OSS Distributions }
3379*a1e26a70SApple OSS Distributions /*
3380*a1e26a70SApple OSS Distributions * The newly created thread goes through a full bind to the kqwl
3381*a1e26a70SApple OSS Distributions * right upon creation.
3382*a1e26a70SApple OSS Distributions * It then falls back to soft bind/unbind upon wakeup/park.
3383*a1e26a70SApple OSS Distributions */
3384*a1e26a70SApple OSS Distributions kqueue_threadreq_bind_prepost(p, kqr, uth);
3385*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_PERMANENT_BIND;
3386*a1e26a70SApple OSS Distributions }
3387*a1e26a70SApple OSS Distributions }
3388*a1e26a70SApple OSS Distributions
3389*a1e26a70SApple OSS Distributions workq_unlock(wq);
3390*a1e26a70SApple OSS Distributions
3391*a1e26a70SApple OSS Distributions if (ret == KERN_SUCCESS) {
3392*a1e26a70SApple OSS Distributions kqueue_threadreq_bind_commit(p, new_thread);
3393*a1e26a70SApple OSS Distributions }
3394*a1e26a70SApple OSS Distributions return ret;
3395*a1e26a70SApple OSS Distributions }
3396*a1e26a70SApple OSS Distributions
3397*a1e26a70SApple OSS Distributions /*
3398*a1e26a70SApple OSS Distributions * Called with kqlock held. It does not need to take the process wide
3399*a1e26a70SApple OSS Distributions * global workq lock -> making it faster.
3400*a1e26a70SApple OSS Distributions */
3401*a1e26a70SApple OSS Distributions void
workq_kern_bound_thread_wakeup(struct workq_threadreq_s * kqr)3402*a1e26a70SApple OSS Distributions workq_kern_bound_thread_wakeup(struct workq_threadreq_s *kqr)
3403*a1e26a70SApple OSS Distributions {
3404*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3405*a1e26a70SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(kqr);
3406*a1e26a70SApple OSS Distributions
3407*a1e26a70SApple OSS Distributions /*
3408*a1e26a70SApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3409*a1e26a70SApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3410*a1e26a70SApple OSS Distributions */
3411*a1e26a70SApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING)) == 0);
3412*a1e26a70SApple OSS Distributions
3413*a1e26a70SApple OSS Distributions if (trp.trp_flags & TRP_RELEASED) {
3414*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
3415*a1e26a70SApple OSS Distributions } else {
3416*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING;
3417*a1e26a70SApple OSS Distributions }
3418*a1e26a70SApple OSS Distributions
3419*a1e26a70SApple OSS Distributions workq_thread_wakeup(uth);
3420*a1e26a70SApple OSS Distributions }
3421*a1e26a70SApple OSS Distributions
3422*a1e26a70SApple OSS Distributions /*
3423*a1e26a70SApple OSS Distributions * Called with kqlock held. Dropped before parking.
3424*a1e26a70SApple OSS Distributions * It does not need to take process wide global workqueue
3425*a1e26a70SApple OSS Distributions * lock -> making it faster.
3426*a1e26a70SApple OSS Distributions */
3427*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
3428*a1e26a70SApple OSS Distributions void
workq_kern_bound_thread_park(struct workq_threadreq_s * kqr)3429*a1e26a70SApple OSS Distributions workq_kern_bound_thread_park(struct workq_threadreq_s *kqr)
3430*a1e26a70SApple OSS Distributions {
3431*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3432*a1e26a70SApple OSS Distributions assert(uth == current_uthread());
3433*a1e26a70SApple OSS Distributions
3434*a1e26a70SApple OSS Distributions /*
3435*a1e26a70SApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3436*a1e26a70SApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3437*a1e26a70SApple OSS Distributions */
3438*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING);
3439*a1e26a70SApple OSS Distributions
3440*a1e26a70SApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3441*a1e26a70SApple OSS Distributions
3442*a1e26a70SApple OSS Distributions /*
3443*a1e26a70SApple OSS Distributions * TODO (pavhad) We could do the reusable userspace stack performance
3444*a1e26a70SApple OSS Distributions * optimization here.
3445*a1e26a70SApple OSS Distributions */
3446*a1e26a70SApple OSS Distributions
3447*a1e26a70SApple OSS Distributions kqworkloop_bound_thread_park_prepost(kqr);
3448*a1e26a70SApple OSS Distributions /* KQ_SLEEP bit is set and kqlock is dropped. */
3449*a1e26a70SApple OSS Distributions
3450*a1e26a70SApple OSS Distributions __assert_only kern_return_t kr;
3451*a1e26a70SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3452*a1e26a70SApple OSS Distributions assert(kr == KERN_SUCCESS);
3453*a1e26a70SApple OSS Distributions
3454*a1e26a70SApple OSS Distributions kqworkloop_bound_thread_park_commit(kqr,
3455*a1e26a70SApple OSS Distributions workq_parked_wait_event(uth), workq_bound_thread_unpark_continue);
3456*a1e26a70SApple OSS Distributions
3457*a1e26a70SApple OSS Distributions __builtin_unreachable();
3458*a1e26a70SApple OSS Distributions }
3459*a1e26a70SApple OSS Distributions
3460*a1e26a70SApple OSS Distributions /*
3461*a1e26a70SApple OSS Distributions * To terminate the permenantly bound workqueue thread. It unbinds itself
3462*a1e26a70SApple OSS Distributions * with the kqwl during uthread_cleanup -> kqueue_threadreq_unbind.
3463*a1e26a70SApple OSS Distributions * It is also when it will release its reference on the kqwl.
3464*a1e26a70SApple OSS Distributions */
3465*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
3466*a1e26a70SApple OSS Distributions void
workq_kern_bound_thread_terminate(struct workq_threadreq_s * kqr)3467*a1e26a70SApple OSS Distributions workq_kern_bound_thread_terminate(struct workq_threadreq_s *kqr)
3468*a1e26a70SApple OSS Distributions {
3469*a1e26a70SApple OSS Distributions proc_t p = current_proc();
3470*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3471*a1e26a70SApple OSS Distributions uint16_t uu_workq_flags_orig;
3472*a1e26a70SApple OSS Distributions
3473*a1e26a70SApple OSS Distributions assert(uth == current_uthread());
3474*a1e26a70SApple OSS Distributions
3475*a1e26a70SApple OSS Distributions /*
3476*a1e26a70SApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3477*a1e26a70SApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3478*a1e26a70SApple OSS Distributions */
3479*a1e26a70SApple OSS Distributions kqworkloop_bound_thread_terminate(kqr, &uu_workq_flags_orig);
3480*a1e26a70SApple OSS Distributions
3481*a1e26a70SApple OSS Distributions if (uu_workq_flags_orig & UT_WORKQ_WORK_INTERVAL_JOINED) {
3482*a1e26a70SApple OSS Distributions __assert_only kern_return_t kr;
3483*a1e26a70SApple OSS Distributions kr = kern_work_interval_join(get_machthread(uth), MACH_PORT_NULL);
3484*a1e26a70SApple OSS Distributions /* The bound thread un-joins the work interval and drops its +1 ref. */
3485*a1e26a70SApple OSS Distributions assert(kr == KERN_SUCCESS);
3486*a1e26a70SApple OSS Distributions }
3487*a1e26a70SApple OSS Distributions
3488*a1e26a70SApple OSS Distributions /*
3489*a1e26a70SApple OSS Distributions * Drop the voucher now that we are on our way to termination.
3490*a1e26a70SApple OSS Distributions */
3491*a1e26a70SApple OSS Distributions __assert_only kern_return_t kr;
3492*a1e26a70SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3493*a1e26a70SApple OSS Distributions assert(kr == KERN_SUCCESS);
3494*a1e26a70SApple OSS Distributions
3495*a1e26a70SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
3496*a1e26a70SApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
3497*a1e26a70SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3498*a1e26a70SApple OSS Distributions
3499*a1e26a70SApple OSS Distributions thread_t th = get_machthread(uth);
3500*a1e26a70SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
3501*a1e26a70SApple OSS Distributions
3502*a1e26a70SApple OSS Distributions if ((uu_workq_flags_orig & UT_WORKQ_NEW) == 0) {
3503*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
3504*a1e26a70SApple OSS Distributions }
3505*a1e26a70SApple OSS Distributions
3506*a1e26a70SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
3507*a1e26a70SApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, upcall_flags);
3508*a1e26a70SApple OSS Distributions __builtin_unreachable();
3509*a1e26a70SApple OSS Distributions }
3510*a1e26a70SApple OSS Distributions
3511*a1e26a70SApple OSS Distributions void
workq_kern_threadreq_redrive(struct proc * p,workq_kern_threadreq_flags_t flags)3512*a1e26a70SApple OSS Distributions workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags)
3513*a1e26a70SApple OSS Distributions {
3514*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3515*a1e26a70SApple OSS Distributions
3516*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3517*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3518*a1e26a70SApple OSS Distributions workq_unlock(wq);
3519*a1e26a70SApple OSS Distributions }
3520*a1e26a70SApple OSS Distributions
3521*a1e26a70SApple OSS Distributions /*
3522*a1e26a70SApple OSS Distributions * Always called at AST by the thread on itself
3523*a1e26a70SApple OSS Distributions *
3524*a1e26a70SApple OSS Distributions * Upon quantum expiry, the workqueue subsystem evaluates its state and decides
3525*a1e26a70SApple OSS Distributions * on what the thread should do next. The TSD value is always set by the thread
3526*a1e26a70SApple OSS Distributions * on itself in the kernel and cleared either by userspace when it acks the TSD
3527*a1e26a70SApple OSS Distributions * value and takes action, or by the thread in the kernel when the quantum
3528*a1e26a70SApple OSS Distributions * expires again.
3529*a1e26a70SApple OSS Distributions */
3530*a1e26a70SApple OSS Distributions void
workq_kern_quantum_expiry_reevaluate(proc_t proc,thread_t thread)3531*a1e26a70SApple OSS Distributions workq_kern_quantum_expiry_reevaluate(proc_t proc, thread_t thread)
3532*a1e26a70SApple OSS Distributions {
3533*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
3534*a1e26a70SApple OSS Distributions
3535*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3536*a1e26a70SApple OSS Distributions return;
3537*a1e26a70SApple OSS Distributions }
3538*a1e26a70SApple OSS Distributions
3539*a1e26a70SApple OSS Distributions if (!thread_supports_cooperative_workqueue(thread)) {
3540*a1e26a70SApple OSS Distributions panic("Quantum expired for thread that doesn't support cooperative workqueue");
3541*a1e26a70SApple OSS Distributions }
3542*a1e26a70SApple OSS Distributions
3543*a1e26a70SApple OSS Distributions thread_qos_t qos = uth->uu_workq_pri.qos_bucket;
3544*a1e26a70SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3545*a1e26a70SApple OSS Distributions panic("Thread should not have workq bucket of QoS UN");
3546*a1e26a70SApple OSS Distributions }
3547*a1e26a70SApple OSS Distributions
3548*a1e26a70SApple OSS Distributions assert(thread_has_expired_workqueue_quantum(thread, false));
3549*a1e26a70SApple OSS Distributions
3550*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(proc);
3551*a1e26a70SApple OSS Distributions assert(wq != NULL);
3552*a1e26a70SApple OSS Distributions
3553*a1e26a70SApple OSS Distributions /*
3554*a1e26a70SApple OSS Distributions * For starters, we're just going to evaluate and see if we need to narrow
3555*a1e26a70SApple OSS Distributions * the pool and tell this thread to park if needed. In the future, we'll
3556*a1e26a70SApple OSS Distributions * evaluate and convey other workqueue state information like needing to
3557*a1e26a70SApple OSS Distributions * pump kevents, etc.
3558*a1e26a70SApple OSS Distributions */
3559*a1e26a70SApple OSS Distributions uint64_t flags = 0;
3560*a1e26a70SApple OSS Distributions
3561*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3562*a1e26a70SApple OSS Distributions
3563*a1e26a70SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
3564*a1e26a70SApple OSS Distributions if (!workq_cooperative_allowance(wq, qos, uth, false)) {
3565*a1e26a70SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3566*a1e26a70SApple OSS Distributions } else {
3567*a1e26a70SApple OSS Distributions /* In the future, when we have kevent hookups for the cooperative
3568*a1e26a70SApple OSS Distributions * pool, we need fancier logic for what userspace should do. But
3569*a1e26a70SApple OSS Distributions * right now, only userspace thread requests exist - so we'll just
3570*a1e26a70SApple OSS Distributions * tell userspace to shuffle work items */
3571*a1e26a70SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_SHUFFLE;
3572*a1e26a70SApple OSS Distributions }
3573*a1e26a70SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
3574*a1e26a70SApple OSS Distributions if (!workq_constrained_allowance(wq, qos, uth, false, false)) {
3575*a1e26a70SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3576*a1e26a70SApple OSS Distributions }
3577*a1e26a70SApple OSS Distributions }
3578*a1e26a70SApple OSS Distributions workq_unlock(wq);
3579*a1e26a70SApple OSS Distributions
3580*a1e26a70SApple OSS Distributions WQ_TRACE(TRACE_wq_quantum_expiry_reevaluate, flags, 0, 0, 0);
3581*a1e26a70SApple OSS Distributions
3582*a1e26a70SApple OSS Distributions kevent_set_workq_quantum_expiry_user_tsd(proc, thread, flags);
3583*a1e26a70SApple OSS Distributions
3584*a1e26a70SApple OSS Distributions /* We have conveyed to userspace about what it needs to do upon quantum
3585*a1e26a70SApple OSS Distributions * expiry, now rearm the workqueue quantum again */
3586*a1e26a70SApple OSS Distributions thread_arm_workqueue_quantum(get_machthread(uth));
3587*a1e26a70SApple OSS Distributions }
3588*a1e26a70SApple OSS Distributions
3589*a1e26a70SApple OSS Distributions void
workq_schedule_creator_turnstile_redrive(struct workqueue * wq,bool locked)3590*a1e26a70SApple OSS Distributions workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked)
3591*a1e26a70SApple OSS Distributions {
3592*a1e26a70SApple OSS Distributions if (locked) {
3593*a1e26a70SApple OSS Distributions workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_NONE);
3594*a1e26a70SApple OSS Distributions } else {
3595*a1e26a70SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
3596*a1e26a70SApple OSS Distributions }
3597*a1e26a70SApple OSS Distributions }
3598*a1e26a70SApple OSS Distributions
3599*a1e26a70SApple OSS Distributions static int
workq_thread_return(struct proc * p,struct workq_kernreturn_args * uap,struct workqueue * wq)3600*a1e26a70SApple OSS Distributions workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap,
3601*a1e26a70SApple OSS Distributions struct workqueue *wq)
3602*a1e26a70SApple OSS Distributions {
3603*a1e26a70SApple OSS Distributions thread_t th = current_thread();
3604*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3605*a1e26a70SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
3606*a1e26a70SApple OSS Distributions workq_threadreq_param_t trp = { };
3607*a1e26a70SApple OSS Distributions int nevents = uap->affinity, error;
3608*a1e26a70SApple OSS Distributions user_addr_t eventlist = uap->item;
3609*a1e26a70SApple OSS Distributions
3610*a1e26a70SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3611*a1e26a70SApple OSS Distributions (uth->uu_workq_flags & UT_WORKQ_DYING)) {
3612*a1e26a70SApple OSS Distributions return EINVAL;
3613*a1e26a70SApple OSS Distributions }
3614*a1e26a70SApple OSS Distributions
3615*a1e26a70SApple OSS Distributions if (eventlist && nevents && kqr == NULL) {
3616*a1e26a70SApple OSS Distributions return EINVAL;
3617*a1e26a70SApple OSS Distributions }
3618*a1e26a70SApple OSS Distributions
3619*a1e26a70SApple OSS Distributions /*
3620*a1e26a70SApple OSS Distributions * Reset signal mask on the workqueue thread to default state,
3621*a1e26a70SApple OSS Distributions * but do not touch any signals that are marked for preservation.
3622*a1e26a70SApple OSS Distributions */
3623*a1e26a70SApple OSS Distributions sigset_t resettable = uth->uu_sigmask & ~p->p_workq_allow_sigmask;
3624*a1e26a70SApple OSS Distributions if (resettable != (sigset_t)~workq_threadmask) {
3625*a1e26a70SApple OSS Distributions proc_lock(p);
3626*a1e26a70SApple OSS Distributions uth->uu_sigmask |= ~workq_threadmask & ~p->p_workq_allow_sigmask;
3627*a1e26a70SApple OSS Distributions proc_unlock(p);
3628*a1e26a70SApple OSS Distributions }
3629*a1e26a70SApple OSS Distributions
3630*a1e26a70SApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
3631*a1e26a70SApple OSS Distributions /*
3632*a1e26a70SApple OSS Distributions * Ensure we store the threadreq param before unbinding
3633*a1e26a70SApple OSS Distributions * the kqr from this thread.
3634*a1e26a70SApple OSS Distributions */
3635*a1e26a70SApple OSS Distributions trp = kqueue_threadreq_workloop_param(kqr);
3636*a1e26a70SApple OSS Distributions }
3637*a1e26a70SApple OSS Distributions
3638*a1e26a70SApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_PERMANENT_BIND) {
3639*a1e26a70SApple OSS Distributions goto handle_stack_events;
3640*a1e26a70SApple OSS Distributions }
3641*a1e26a70SApple OSS Distributions
3642*a1e26a70SApple OSS Distributions /*
3643*a1e26a70SApple OSS Distributions * Freeze the base pri while we decide the fate of this thread.
3644*a1e26a70SApple OSS Distributions *
3645*a1e26a70SApple OSS Distributions * Either:
3646*a1e26a70SApple OSS Distributions * - we return to user and kevent_cleanup will have unfrozen the base pri,
3647*a1e26a70SApple OSS Distributions * - or we proceed to workq_select_threadreq_or_park_and_unlock() who will.
3648*a1e26a70SApple OSS Distributions */
3649*a1e26a70SApple OSS Distributions thread_freeze_base_pri(th);
3650*a1e26a70SApple OSS Distributions
3651*a1e26a70SApple OSS Distributions handle_stack_events:
3652*a1e26a70SApple OSS Distributions
3653*a1e26a70SApple OSS Distributions if (kqr) {
3654*a1e26a70SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI | WQ_FLAG_THREAD_REUSE;
3655*a1e26a70SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
3656*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
3657*a1e26a70SApple OSS Distributions } else {
3658*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
3659*a1e26a70SApple OSS Distributions }
3660*a1e26a70SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
3661*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
3662*a1e26a70SApple OSS Distributions } else {
3663*a1e26a70SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
3664*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
3665*a1e26a70SApple OSS Distributions }
3666*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
3667*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
3668*a1e26a70SApple OSS Distributions } else {
3669*a1e26a70SApple OSS Distributions upcall_flags |= uth->uu_workq_pri.qos_req |
3670*a1e26a70SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3671*a1e26a70SApple OSS Distributions }
3672*a1e26a70SApple OSS Distributions }
3673*a1e26a70SApple OSS Distributions error = pthread_functions->workq_handle_stack_events(p, th,
3674*a1e26a70SApple OSS Distributions get_task_map(proc_task(p)), uth->uu_workq_stackaddr,
3675*a1e26a70SApple OSS Distributions uth->uu_workq_thport, eventlist, nevents, upcall_flags);
3676*a1e26a70SApple OSS Distributions if (error) {
3677*a1e26a70SApple OSS Distributions assert(uth->uu_kqr_bound == kqr);
3678*a1e26a70SApple OSS Distributions return error;
3679*a1e26a70SApple OSS Distributions }
3680*a1e26a70SApple OSS Distributions
3681*a1e26a70SApple OSS Distributions // pthread is supposed to pass KEVENT_FLAG_PARKING here
3682*a1e26a70SApple OSS Distributions // which should cause the above call to either:
3683*a1e26a70SApple OSS Distributions // - not return
3684*a1e26a70SApple OSS Distributions // - return an error
3685*a1e26a70SApple OSS Distributions // - return 0 and have unbound properly
3686*a1e26a70SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3687*a1e26a70SApple OSS Distributions }
3688*a1e26a70SApple OSS Distributions
3689*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, uap->options, 0, 0);
3690*a1e26a70SApple OSS Distributions
3691*a1e26a70SApple OSS Distributions thread_sched_call(th, NULL);
3692*a1e26a70SApple OSS Distributions thread_will_park_or_terminate(th);
3693*a1e26a70SApple OSS Distributions #if CONFIG_WORKLOOP_DEBUG
3694*a1e26a70SApple OSS Distributions UU_KEVENT_HISTORY_WRITE_ENTRY(uth, { .uu_error = -1, });
3695*a1e26a70SApple OSS Distributions #endif
3696*a1e26a70SApple OSS Distributions
3697*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3698*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3699*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
3700*a1e26a70SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth,
3701*a1e26a70SApple OSS Distributions WQ_SETUP_CLEAR_VOUCHER);
3702*a1e26a70SApple OSS Distributions __builtin_unreachable();
3703*a1e26a70SApple OSS Distributions }
3704*a1e26a70SApple OSS Distributions
3705*a1e26a70SApple OSS Distributions /**
3706*a1e26a70SApple OSS Distributions * Multiplexed call to interact with the workqueue mechanism
3707*a1e26a70SApple OSS Distributions */
3708*a1e26a70SApple OSS Distributions int
workq_kernreturn(struct proc * p,struct workq_kernreturn_args * uap,int32_t * retval)3709*a1e26a70SApple OSS Distributions workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
3710*a1e26a70SApple OSS Distributions {
3711*a1e26a70SApple OSS Distributions int options = uap->options;
3712*a1e26a70SApple OSS Distributions int arg2 = uap->affinity;
3713*a1e26a70SApple OSS Distributions int arg3 = uap->prio;
3714*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
3715*a1e26a70SApple OSS Distributions int error = 0;
3716*a1e26a70SApple OSS Distributions
3717*a1e26a70SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
3718*a1e26a70SApple OSS Distributions return EINVAL;
3719*a1e26a70SApple OSS Distributions }
3720*a1e26a70SApple OSS Distributions
3721*a1e26a70SApple OSS Distributions switch (options) {
3722*a1e26a70SApple OSS Distributions case WQOPS_QUEUE_NEWSPISUPP: {
3723*a1e26a70SApple OSS Distributions /*
3724*a1e26a70SApple OSS Distributions * arg2 = offset of serialno into dispatch queue
3725*a1e26a70SApple OSS Distributions * arg3 = kevent support
3726*a1e26a70SApple OSS Distributions */
3727*a1e26a70SApple OSS Distributions int offset = arg2;
3728*a1e26a70SApple OSS Distributions if (arg3 & 0x01) {
3729*a1e26a70SApple OSS Distributions // If we get here, then userspace has indicated support for kevent delivery.
3730*a1e26a70SApple OSS Distributions }
3731*a1e26a70SApple OSS Distributions
3732*a1e26a70SApple OSS Distributions p->p_dispatchqueue_serialno_offset = (uint64_t)offset;
3733*a1e26a70SApple OSS Distributions break;
3734*a1e26a70SApple OSS Distributions }
3735*a1e26a70SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS: {
3736*a1e26a70SApple OSS Distributions /*
3737*a1e26a70SApple OSS Distributions * arg2 = number of threads to start
3738*a1e26a70SApple OSS Distributions * arg3 = priority
3739*a1e26a70SApple OSS Distributions */
3740*a1e26a70SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, false);
3741*a1e26a70SApple OSS Distributions break;
3742*a1e26a70SApple OSS Distributions }
3743*a1e26a70SApple OSS Distributions /* For requesting threads for the cooperative pool */
3744*a1e26a70SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS2: {
3745*a1e26a70SApple OSS Distributions /*
3746*a1e26a70SApple OSS Distributions * arg2 = number of threads to start
3747*a1e26a70SApple OSS Distributions * arg3 = priority
3748*a1e26a70SApple OSS Distributions */
3749*a1e26a70SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, true);
3750*a1e26a70SApple OSS Distributions break;
3751*a1e26a70SApple OSS Distributions }
3752*a1e26a70SApple OSS Distributions case WQOPS_SET_EVENT_MANAGER_PRIORITY: {
3753*a1e26a70SApple OSS Distributions /*
3754*a1e26a70SApple OSS Distributions * arg2 = priority for the manager thread
3755*a1e26a70SApple OSS Distributions *
3756*a1e26a70SApple OSS Distributions * if _PTHREAD_PRIORITY_SCHED_PRI_FLAG is set,
3757*a1e26a70SApple OSS Distributions * the low bits of the value contains a scheduling priority
3758*a1e26a70SApple OSS Distributions * instead of a QOS value
3759*a1e26a70SApple OSS Distributions */
3760*a1e26a70SApple OSS Distributions pthread_priority_t pri = arg2;
3761*a1e26a70SApple OSS Distributions
3762*a1e26a70SApple OSS Distributions if (wq == NULL) {
3763*a1e26a70SApple OSS Distributions error = EINVAL;
3764*a1e26a70SApple OSS Distributions break;
3765*a1e26a70SApple OSS Distributions }
3766*a1e26a70SApple OSS Distributions
3767*a1e26a70SApple OSS Distributions /*
3768*a1e26a70SApple OSS Distributions * Normalize the incoming priority so that it is ordered numerically.
3769*a1e26a70SApple OSS Distributions */
3770*a1e26a70SApple OSS Distributions if (_pthread_priority_has_sched_pri(pri)) {
3771*a1e26a70SApple OSS Distributions pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK |
3772*a1e26a70SApple OSS Distributions _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
3773*a1e26a70SApple OSS Distributions } else {
3774*a1e26a70SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pri);
3775*a1e26a70SApple OSS Distributions int relpri = _pthread_priority_relpri(pri);
3776*a1e26a70SApple OSS Distributions if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE ||
3777*a1e26a70SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
3778*a1e26a70SApple OSS Distributions error = EINVAL;
3779*a1e26a70SApple OSS Distributions break;
3780*a1e26a70SApple OSS Distributions }
3781*a1e26a70SApple OSS Distributions pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
3782*a1e26a70SApple OSS Distributions }
3783*a1e26a70SApple OSS Distributions
3784*a1e26a70SApple OSS Distributions /*
3785*a1e26a70SApple OSS Distributions * If userspace passes a scheduling priority, that wins over any QoS.
3786*a1e26a70SApple OSS Distributions * Userspace should takes care not to lower the priority this way.
3787*a1e26a70SApple OSS Distributions */
3788*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3789*a1e26a70SApple OSS Distributions if (wq->wq_event_manager_priority < (uint32_t)pri) {
3790*a1e26a70SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pri;
3791*a1e26a70SApple OSS Distributions }
3792*a1e26a70SApple OSS Distributions workq_unlock(wq);
3793*a1e26a70SApple OSS Distributions break;
3794*a1e26a70SApple OSS Distributions }
3795*a1e26a70SApple OSS Distributions case WQOPS_THREAD_KEVENT_RETURN:
3796*a1e26a70SApple OSS Distributions case WQOPS_THREAD_WORKLOOP_RETURN:
3797*a1e26a70SApple OSS Distributions case WQOPS_THREAD_RETURN: {
3798*a1e26a70SApple OSS Distributions error = workq_thread_return(p, uap, wq);
3799*a1e26a70SApple OSS Distributions break;
3800*a1e26a70SApple OSS Distributions }
3801*a1e26a70SApple OSS Distributions
3802*a1e26a70SApple OSS Distributions case WQOPS_SHOULD_NARROW: {
3803*a1e26a70SApple OSS Distributions /*
3804*a1e26a70SApple OSS Distributions * arg2 = priority to test
3805*a1e26a70SApple OSS Distributions * arg3 = unused
3806*a1e26a70SApple OSS Distributions */
3807*a1e26a70SApple OSS Distributions thread_t th = current_thread();
3808*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3809*a1e26a70SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3810*a1e26a70SApple OSS Distributions (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) {
3811*a1e26a70SApple OSS Distributions error = EINVAL;
3812*a1e26a70SApple OSS Distributions break;
3813*a1e26a70SApple OSS Distributions }
3814*a1e26a70SApple OSS Distributions
3815*a1e26a70SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(arg2);
3816*a1e26a70SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3817*a1e26a70SApple OSS Distributions error = EINVAL;
3818*a1e26a70SApple OSS Distributions break;
3819*a1e26a70SApple OSS Distributions }
3820*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3821*a1e26a70SApple OSS Distributions bool should_narrow = !workq_constrained_allowance(wq, qos, uth, false, false);
3822*a1e26a70SApple OSS Distributions workq_unlock(wq);
3823*a1e26a70SApple OSS Distributions
3824*a1e26a70SApple OSS Distributions *retval = should_narrow;
3825*a1e26a70SApple OSS Distributions break;
3826*a1e26a70SApple OSS Distributions }
3827*a1e26a70SApple OSS Distributions case WQOPS_SETUP_DISPATCH: {
3828*a1e26a70SApple OSS Distributions /*
3829*a1e26a70SApple OSS Distributions * item = pointer to workq_dispatch_config structure
3830*a1e26a70SApple OSS Distributions * arg2 = sizeof(item)
3831*a1e26a70SApple OSS Distributions */
3832*a1e26a70SApple OSS Distributions struct workq_dispatch_config cfg;
3833*a1e26a70SApple OSS Distributions bzero(&cfg, sizeof(cfg));
3834*a1e26a70SApple OSS Distributions
3835*a1e26a70SApple OSS Distributions error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
3836*a1e26a70SApple OSS Distributions if (error) {
3837*a1e26a70SApple OSS Distributions break;
3838*a1e26a70SApple OSS Distributions }
3839*a1e26a70SApple OSS Distributions
3840*a1e26a70SApple OSS Distributions if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
3841*a1e26a70SApple OSS Distributions cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
3842*a1e26a70SApple OSS Distributions error = ENOTSUP;
3843*a1e26a70SApple OSS Distributions break;
3844*a1e26a70SApple OSS Distributions }
3845*a1e26a70SApple OSS Distributions
3846*a1e26a70SApple OSS Distributions /* Load fields from version 1 */
3847*a1e26a70SApple OSS Distributions p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;
3848*a1e26a70SApple OSS Distributions
3849*a1e26a70SApple OSS Distributions /* Load fields from version 2 */
3850*a1e26a70SApple OSS Distributions if (cfg.wdc_version >= 2) {
3851*a1e26a70SApple OSS Distributions p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
3852*a1e26a70SApple OSS Distributions }
3853*a1e26a70SApple OSS Distributions
3854*a1e26a70SApple OSS Distributions break;
3855*a1e26a70SApple OSS Distributions }
3856*a1e26a70SApple OSS Distributions default:
3857*a1e26a70SApple OSS Distributions error = EINVAL;
3858*a1e26a70SApple OSS Distributions break;
3859*a1e26a70SApple OSS Distributions }
3860*a1e26a70SApple OSS Distributions
3861*a1e26a70SApple OSS Distributions return error;
3862*a1e26a70SApple OSS Distributions }
3863*a1e26a70SApple OSS Distributions
3864*a1e26a70SApple OSS Distributions /*
3865*a1e26a70SApple OSS Distributions * We have no work to do, park ourselves on the idle list.
3866*a1e26a70SApple OSS Distributions *
3867*a1e26a70SApple OSS Distributions * Consumes the workqueue lock and does not return.
3868*a1e26a70SApple OSS Distributions */
3869*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
3870*a1e26a70SApple OSS Distributions static void
workq_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)3871*a1e26a70SApple OSS Distributions workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth,
3872*a1e26a70SApple OSS Distributions uint32_t setup_flags)
3873*a1e26a70SApple OSS Distributions {
3874*a1e26a70SApple OSS Distributions assert(uth == current_uthread());
3875*a1e26a70SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3876*a1e26a70SApple OSS Distributions workq_push_idle_thread(p, wq, uth, setup_flags); // may not return
3877*a1e26a70SApple OSS Distributions
3878*a1e26a70SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
3879*a1e26a70SApple OSS Distributions
3880*a1e26a70SApple OSS Distributions #if CONFIG_PREADOPT_TG
3881*a1e26a70SApple OSS Distributions /* Clear the preadoption thread group on the thread.
3882*a1e26a70SApple OSS Distributions *
3883*a1e26a70SApple OSS Distributions * Case 1:
3884*a1e26a70SApple OSS Distributions * Creator thread which never picked up a thread request. We set a
3885*a1e26a70SApple OSS Distributions * preadoption thread group on creator threads but if it never picked
3886*a1e26a70SApple OSS Distributions * up a thread request and didn't go to userspace, then the thread will
3887*a1e26a70SApple OSS Distributions * park with a preadoption thread group but no explicitly adopted
3888*a1e26a70SApple OSS Distributions * voucher or work interval.
3889*a1e26a70SApple OSS Distributions *
3890*a1e26a70SApple OSS Distributions * We drop the preadoption thread group here before proceeding to park.
3891*a1e26a70SApple OSS Distributions * Note - we may get preempted when we drop the workq lock below.
3892*a1e26a70SApple OSS Distributions *
3893*a1e26a70SApple OSS Distributions * Case 2:
3894*a1e26a70SApple OSS Distributions * Thread picked up a thread request and bound to it and returned back
3895*a1e26a70SApple OSS Distributions * from userspace and is parking. At this point, preadoption thread
3896*a1e26a70SApple OSS Distributions * group should be NULL since the thread has unbound from the thread
3897*a1e26a70SApple OSS Distributions * request. So this operation should be a no-op.
3898*a1e26a70SApple OSS Distributions */
3899*a1e26a70SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
3900*a1e26a70SApple OSS Distributions #endif
3901*a1e26a70SApple OSS Distributions
3902*a1e26a70SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) &&
3903*a1e26a70SApple OSS Distributions !(uth->uu_workq_flags & UT_WORKQ_DYING)) {
3904*a1e26a70SApple OSS Distributions workq_unlock(wq);
3905*a1e26a70SApple OSS Distributions
3906*a1e26a70SApple OSS Distributions /*
3907*a1e26a70SApple OSS Distributions * workq_push_idle_thread() will unset `has_stack`
3908*a1e26a70SApple OSS Distributions * if it wants us to free the stack before parking.
3909*a1e26a70SApple OSS Distributions */
3910*a1e26a70SApple OSS Distributions if (!uth->uu_save.uus_workq_park_data.has_stack) {
3911*a1e26a70SApple OSS Distributions pthread_functions->workq_markfree_threadstack(p,
3912*a1e26a70SApple OSS Distributions get_machthread(uth), get_task_map(proc_task(p)),
3913*a1e26a70SApple OSS Distributions uth->uu_workq_stackaddr);
3914*a1e26a70SApple OSS Distributions }
3915*a1e26a70SApple OSS Distributions
3916*a1e26a70SApple OSS Distributions /*
3917*a1e26a70SApple OSS Distributions * When we remove the voucher from the thread, we may lose our importance
3918*a1e26a70SApple OSS Distributions * causing us to get preempted, so we do this after putting the thread on
3919*a1e26a70SApple OSS Distributions * the idle list. Then, when we get our importance back we'll be able to
3920*a1e26a70SApple OSS Distributions * use this thread from e.g. the kevent call out to deliver a boosting
3921*a1e26a70SApple OSS Distributions * message.
3922*a1e26a70SApple OSS Distributions *
3923*a1e26a70SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
3924*a1e26a70SApple OSS Distributions * thread since this thread could have become the creator again and
3925*a1e26a70SApple OSS Distributions * perhaps acquired a preadoption thread group.
3926*a1e26a70SApple OSS Distributions */
3927*a1e26a70SApple OSS Distributions __assert_only kern_return_t kr;
3928*a1e26a70SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3929*a1e26a70SApple OSS Distributions assert(kr == KERN_SUCCESS);
3930*a1e26a70SApple OSS Distributions
3931*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
3932*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
3933*a1e26a70SApple OSS Distributions setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
3934*a1e26a70SApple OSS Distributions }
3935*a1e26a70SApple OSS Distributions
3936*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3937*a1e26a70SApple OSS Distributions
3938*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
3939*a1e26a70SApple OSS Distributions /*
3940*a1e26a70SApple OSS Distributions * While we'd dropped the lock to unset our voucher, someone came
3941*a1e26a70SApple OSS Distributions * around and made us runnable. But because we weren't waiting on the
3942*a1e26a70SApple OSS Distributions * event their thread_wakeup() was ineffectual. To correct for that,
3943*a1e26a70SApple OSS Distributions * we just run the continuation ourselves.
3944*a1e26a70SApple OSS Distributions */
3945*a1e26a70SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
3946*a1e26a70SApple OSS Distributions __builtin_unreachable();
3947*a1e26a70SApple OSS Distributions }
3948*a1e26a70SApple OSS Distributions
3949*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3950*a1e26a70SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
3951*a1e26a70SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, setup_flags);
3952*a1e26a70SApple OSS Distributions __builtin_unreachable();
3953*a1e26a70SApple OSS Distributions }
3954*a1e26a70SApple OSS Distributions
3955*a1e26a70SApple OSS Distributions /* Disarm the workqueue quantum since the thread is now idle */
3956*a1e26a70SApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3957*a1e26a70SApple OSS Distributions
3958*a1e26a70SApple OSS Distributions thread_set_pending_block_hint(get_machthread(uth), kThreadWaitParkedWorkQueue);
3959*a1e26a70SApple OSS Distributions assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
3960*a1e26a70SApple OSS Distributions workq_unlock(wq);
3961*a1e26a70SApple OSS Distributions thread_block(workq_unpark_continue);
3962*a1e26a70SApple OSS Distributions __builtin_unreachable();
3963*a1e26a70SApple OSS Distributions }
3964*a1e26a70SApple OSS Distributions
3965*a1e26a70SApple OSS Distributions static inline bool
workq_may_start_event_mgr_thread(struct workqueue * wq,struct uthread * uth)3966*a1e26a70SApple OSS Distributions workq_may_start_event_mgr_thread(struct workqueue *wq, struct uthread *uth)
3967*a1e26a70SApple OSS Distributions {
3968*a1e26a70SApple OSS Distributions /*
3969*a1e26a70SApple OSS Distributions * There's an event manager request and either:
3970*a1e26a70SApple OSS Distributions * - no event manager currently running
3971*a1e26a70SApple OSS Distributions * - we are re-using the event manager
3972*a1e26a70SApple OSS Distributions */
3973*a1e26a70SApple OSS Distributions return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 ||
3974*a1e26a70SApple OSS Distributions (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
3975*a1e26a70SApple OSS Distributions }
3976*a1e26a70SApple OSS Distributions
3977*a1e26a70SApple OSS Distributions /* Called with workq lock held. */
3978*a1e26a70SApple OSS Distributions static uint32_t
workq_constrained_allowance(struct workqueue * wq,thread_qos_t at_qos,struct uthread * uth,bool may_start_timer,bool record_failed_allowance)3979*a1e26a70SApple OSS Distributions workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
3980*a1e26a70SApple OSS Distributions struct uthread *uth, bool may_start_timer, bool record_failed_allowance)
3981*a1e26a70SApple OSS Distributions {
3982*a1e26a70SApple OSS Distributions assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
3983*a1e26a70SApple OSS Distributions uint32_t allowance_passed = 0;
3984*a1e26a70SApple OSS Distributions uint32_t count = 0;
3985*a1e26a70SApple OSS Distributions
3986*a1e26a70SApple OSS Distributions uint32_t max_count = wq->wq_constrained_threads_scheduled;
3987*a1e26a70SApple OSS Distributions if (uth && workq_thread_is_nonovercommit(uth)) {
3988*a1e26a70SApple OSS Distributions /*
3989*a1e26a70SApple OSS Distributions * don't count the current thread as scheduled
3990*a1e26a70SApple OSS Distributions */
3991*a1e26a70SApple OSS Distributions assert(max_count > 0);
3992*a1e26a70SApple OSS Distributions max_count--;
3993*a1e26a70SApple OSS Distributions }
3994*a1e26a70SApple OSS Distributions if (max_count >= wq_max_constrained_threads) {
3995*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
3996*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled,
3997*a1e26a70SApple OSS Distributions wq_max_constrained_threads);
3998*a1e26a70SApple OSS Distributions /*
3999*a1e26a70SApple OSS Distributions * we need 1 or more constrained threads to return to the kernel before
4000*a1e26a70SApple OSS Distributions * we can dispatch additional work
4001*a1e26a70SApple OSS Distributions */
4002*a1e26a70SApple OSS Distributions allowance_passed = 0;
4003*a1e26a70SApple OSS Distributions goto out;
4004*a1e26a70SApple OSS Distributions }
4005*a1e26a70SApple OSS Distributions max_count -= wq_max_constrained_threads;
4006*a1e26a70SApple OSS Distributions
4007*a1e26a70SApple OSS Distributions /*
4008*a1e26a70SApple OSS Distributions * Compute a metric for many how many threads are active. We find the
4009*a1e26a70SApple OSS Distributions * highest priority request outstanding and then add up the number of active
4010*a1e26a70SApple OSS Distributions * threads in that and all higher-priority buckets. We'll also add any
4011*a1e26a70SApple OSS Distributions * "busy" threads which are not currently active but blocked recently enough
4012*a1e26a70SApple OSS Distributions * that we can't be sure that they won't be unblocked soon and start
4013*a1e26a70SApple OSS Distributions * being active again.
4014*a1e26a70SApple OSS Distributions *
4015*a1e26a70SApple OSS Distributions * We'll then compare this metric to our max concurrency to decide whether
4016*a1e26a70SApple OSS Distributions * to add a new thread.
4017*a1e26a70SApple OSS Distributions */
4018*a1e26a70SApple OSS Distributions
4019*a1e26a70SApple OSS Distributions uint32_t busycount, thactive_count;
4020*a1e26a70SApple OSS Distributions
4021*a1e26a70SApple OSS Distributions thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
4022*a1e26a70SApple OSS Distributions at_qos, &busycount, NULL);
4023*a1e26a70SApple OSS Distributions
4024*a1e26a70SApple OSS Distributions if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
4025*a1e26a70SApple OSS Distributions at_qos <= uth->uu_workq_pri.qos_bucket) {
4026*a1e26a70SApple OSS Distributions /*
4027*a1e26a70SApple OSS Distributions * Don't count this thread as currently active, but only if it's not
4028*a1e26a70SApple OSS Distributions * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
4029*a1e26a70SApple OSS Distributions * managers.
4030*a1e26a70SApple OSS Distributions */
4031*a1e26a70SApple OSS Distributions assert(thactive_count > 0);
4032*a1e26a70SApple OSS Distributions thactive_count--;
4033*a1e26a70SApple OSS Distributions }
4034*a1e26a70SApple OSS Distributions
4035*a1e26a70SApple OSS Distributions count = wq_max_parallelism[_wq_bucket(at_qos)];
4036*a1e26a70SApple OSS Distributions if (count > thactive_count + busycount) {
4037*a1e26a70SApple OSS Distributions count -= thactive_count + busycount;
4038*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
4039*a1e26a70SApple OSS Distributions thactive_count, busycount);
4040*a1e26a70SApple OSS Distributions allowance_passed = MIN(count, max_count);
4041*a1e26a70SApple OSS Distributions goto out;
4042*a1e26a70SApple OSS Distributions } else {
4043*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
4044*a1e26a70SApple OSS Distributions thactive_count, busycount);
4045*a1e26a70SApple OSS Distributions allowance_passed = 0;
4046*a1e26a70SApple OSS Distributions }
4047*a1e26a70SApple OSS Distributions
4048*a1e26a70SApple OSS Distributions if (may_start_timer) {
4049*a1e26a70SApple OSS Distributions /*
4050*a1e26a70SApple OSS Distributions * If this is called from the add timer, we won't have another timer
4051*a1e26a70SApple OSS Distributions * fire when the thread exits the "busy" state, so rearm the timer.
4052*a1e26a70SApple OSS Distributions */
4053*a1e26a70SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4054*a1e26a70SApple OSS Distributions }
4055*a1e26a70SApple OSS Distributions
4056*a1e26a70SApple OSS Distributions out:
4057*a1e26a70SApple OSS Distributions if (record_failed_allowance) {
4058*a1e26a70SApple OSS Distributions wq->wq_exceeded_active_constrained_thread_limit = !allowance_passed;
4059*a1e26a70SApple OSS Distributions }
4060*a1e26a70SApple OSS Distributions return allowance_passed;
4061*a1e26a70SApple OSS Distributions }
4062*a1e26a70SApple OSS Distributions
4063*a1e26a70SApple OSS Distributions static bool
workq_threadreq_admissible(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req)4064*a1e26a70SApple OSS Distributions workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
4065*a1e26a70SApple OSS Distributions workq_threadreq_t req)
4066*a1e26a70SApple OSS Distributions {
4067*a1e26a70SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
4068*a1e26a70SApple OSS Distributions return workq_may_start_event_mgr_thread(wq, uth);
4069*a1e26a70SApple OSS Distributions }
4070*a1e26a70SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
4071*a1e26a70SApple OSS Distributions return workq_cooperative_allowance(wq, req->tr_qos, uth, true);
4072*a1e26a70SApple OSS Distributions }
4073*a1e26a70SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
4074*a1e26a70SApple OSS Distributions return workq_constrained_allowance(wq, req->tr_qos, uth, true, true);
4075*a1e26a70SApple OSS Distributions }
4076*a1e26a70SApple OSS Distributions
4077*a1e26a70SApple OSS Distributions return true;
4078*a1e26a70SApple OSS Distributions }
4079*a1e26a70SApple OSS Distributions
4080*a1e26a70SApple OSS Distributions /*
4081*a1e26a70SApple OSS Distributions * Called from the context of selecting thread requests for threads returning
4082*a1e26a70SApple OSS Distributions * from userspace or creator thread
4083*a1e26a70SApple OSS Distributions */
4084*a1e26a70SApple OSS Distributions static workq_threadreq_t
workq_cooperative_queue_best_req(struct workqueue * wq,struct uthread * uth)4085*a1e26a70SApple OSS Distributions workq_cooperative_queue_best_req(struct workqueue *wq, struct uthread *uth)
4086*a1e26a70SApple OSS Distributions {
4087*a1e26a70SApple OSS Distributions workq_lock_held(wq);
4088*a1e26a70SApple OSS Distributions
4089*a1e26a70SApple OSS Distributions /*
4090*a1e26a70SApple OSS Distributions * If the current thread is cooperative, we need to exclude it as part of
4091*a1e26a70SApple OSS Distributions * cooperative schedule count since this thread is looking for a new
4092*a1e26a70SApple OSS Distributions * request. Change in the schedule count for cooperative pool therefore
4093*a1e26a70SApple OSS Distributions * requires us to reeevaluate the next best request for it.
4094*a1e26a70SApple OSS Distributions */
4095*a1e26a70SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
4096*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
4097*a1e26a70SApple OSS Distributions
4098*a1e26a70SApple OSS Distributions (void) _wq_cooperative_queue_refresh_best_req_qos(wq);
4099*a1e26a70SApple OSS Distributions
4100*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
4101*a1e26a70SApple OSS Distributions } else {
4102*a1e26a70SApple OSS Distributions /*
4103*a1e26a70SApple OSS Distributions * The old value that was already precomputed should be safe to use -
4104*a1e26a70SApple OSS Distributions * add an assert that asserts that the best req QoS doesn't change in
4105*a1e26a70SApple OSS Distributions * this case
4106*a1e26a70SApple OSS Distributions */
4107*a1e26a70SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
4108*a1e26a70SApple OSS Distributions }
4109*a1e26a70SApple OSS Distributions
4110*a1e26a70SApple OSS Distributions thread_qos_t qos = wq->wq_cooperative_queue_best_req_qos;
4111*a1e26a70SApple OSS Distributions
4112*a1e26a70SApple OSS Distributions /* There are no eligible requests in the cooperative pool */
4113*a1e26a70SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
4114*a1e26a70SApple OSS Distributions return NULL;
4115*a1e26a70SApple OSS Distributions }
4116*a1e26a70SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_ABOVEUI);
4117*a1e26a70SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_MANAGER);
4118*a1e26a70SApple OSS Distributions
4119*a1e26a70SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
4120*a1e26a70SApple OSS Distributions assert(!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket]));
4121*a1e26a70SApple OSS Distributions
4122*a1e26a70SApple OSS Distributions return STAILQ_FIRST(&wq->wq_cooperative_queue[bucket]);
4123*a1e26a70SApple OSS Distributions }
4124*a1e26a70SApple OSS Distributions
4125*a1e26a70SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue * wq)4126*a1e26a70SApple OSS Distributions workq_threadreq_select_for_creator(struct workqueue *wq)
4127*a1e26a70SApple OSS Distributions {
4128*a1e26a70SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4129*a1e26a70SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4130*a1e26a70SApple OSS Distributions uint8_t pri = 0;
4131*a1e26a70SApple OSS Distributions
4132*a1e26a70SApple OSS Distributions /*
4133*a1e26a70SApple OSS Distributions * Compute the best priority request, and ignore the turnstile for now
4134*a1e26a70SApple OSS Distributions */
4135*a1e26a70SApple OSS Distributions
4136*a1e26a70SApple OSS Distributions req_pri = priority_queue_max(&wq->wq_special_queue,
4137*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4138*a1e26a70SApple OSS Distributions if (req_pri) {
4139*a1e26a70SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4140*a1e26a70SApple OSS Distributions &req_pri->tr_entry);
4141*a1e26a70SApple OSS Distributions }
4142*a1e26a70SApple OSS Distributions
4143*a1e26a70SApple OSS Distributions /*
4144*a1e26a70SApple OSS Distributions * Handle the manager thread request. The special queue might yield
4145*a1e26a70SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4146*a1e26a70SApple OSS Distributions */
4147*a1e26a70SApple OSS Distributions
4148*a1e26a70SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4149*a1e26a70SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
4150*a1e26a70SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4151*a1e26a70SApple OSS Distributions
4152*a1e26a70SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4153*a1e26a70SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4154*a1e26a70SApple OSS Distributions } else {
4155*a1e26a70SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4156*a1e26a70SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4157*a1e26a70SApple OSS Distributions }
4158*a1e26a70SApple OSS Distributions
4159*a1e26a70SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4160*a1e26a70SApple OSS Distributions }
4161*a1e26a70SApple OSS Distributions
4162*a1e26a70SApple OSS Distributions /*
4163*a1e26a70SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4164*a1e26a70SApple OSS Distributions *
4165*a1e26a70SApple OSS Distributions * Start by comparing the overcommit and the cooperative pool
4166*a1e26a70SApple OSS Distributions */
4167*a1e26a70SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4168*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4169*a1e26a70SApple OSS Distributions if (req_qos) {
4170*a1e26a70SApple OSS Distributions qos = req_qos->tr_qos;
4171*a1e26a70SApple OSS Distributions }
4172*a1e26a70SApple OSS Distributions
4173*a1e26a70SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, NULL);
4174*a1e26a70SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4175*a1e26a70SApple OSS Distributions /*
4176*a1e26a70SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4177*a1e26a70SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4178*a1e26a70SApple OSS Distributions * cooperative.
4179*a1e26a70SApple OSS Distributions *
4180*a1e26a70SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4181*a1e26a70SApple OSS Distributions */
4182*a1e26a70SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, NULL, true)) {
4183*a1e26a70SApple OSS Distributions req_qos = req_tmp;
4184*a1e26a70SApple OSS Distributions qos = req_qos->tr_qos;
4185*a1e26a70SApple OSS Distributions }
4186*a1e26a70SApple OSS Distributions }
4187*a1e26a70SApple OSS Distributions
4188*a1e26a70SApple OSS Distributions /*
4189*a1e26a70SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4190*a1e26a70SApple OSS Distributions * pool - and compare it with the constrained pool
4191*a1e26a70SApple OSS Distributions */
4192*a1e26a70SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4193*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4194*a1e26a70SApple OSS Distributions
4195*a1e26a70SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4196*a1e26a70SApple OSS Distributions /*
4197*a1e26a70SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4198*a1e26a70SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4199*a1e26a70SApple OSS Distributions */
4200*a1e26a70SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4201*a1e26a70SApple OSS Distributions return req_pri;
4202*a1e26a70SApple OSS Distributions }
4203*a1e26a70SApple OSS Distributions
4204*a1e26a70SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, NULL, true, true)) {
4205*a1e26a70SApple OSS Distributions /*
4206*a1e26a70SApple OSS Distributions * If the constrained thread request is the best one and passes
4207*a1e26a70SApple OSS Distributions * the admission check, pick it.
4208*a1e26a70SApple OSS Distributions */
4209*a1e26a70SApple OSS Distributions return req_tmp;
4210*a1e26a70SApple OSS Distributions }
4211*a1e26a70SApple OSS Distributions }
4212*a1e26a70SApple OSS Distributions
4213*a1e26a70SApple OSS Distributions /*
4214*a1e26a70SApple OSS Distributions * Compare the best of the QoS world with the priority
4215*a1e26a70SApple OSS Distributions */
4216*a1e26a70SApple OSS Distributions if (pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4217*a1e26a70SApple OSS Distributions return req_pri;
4218*a1e26a70SApple OSS Distributions }
4219*a1e26a70SApple OSS Distributions
4220*a1e26a70SApple OSS Distributions if (req_qos) {
4221*a1e26a70SApple OSS Distributions return req_qos;
4222*a1e26a70SApple OSS Distributions }
4223*a1e26a70SApple OSS Distributions
4224*a1e26a70SApple OSS Distributions /*
4225*a1e26a70SApple OSS Distributions * If we had no eligible request but we have a turnstile push,
4226*a1e26a70SApple OSS Distributions * it must be a non overcommit thread request that failed
4227*a1e26a70SApple OSS Distributions * the admission check.
4228*a1e26a70SApple OSS Distributions *
4229*a1e26a70SApple OSS Distributions * Just fake a BG thread request so that if the push stops the creator
4230*a1e26a70SApple OSS Distributions * priority just drops to 4.
4231*a1e26a70SApple OSS Distributions */
4232*a1e26a70SApple OSS Distributions if (turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, NULL)) {
4233*a1e26a70SApple OSS Distributions static struct workq_threadreq_s workq_sync_push_fake_req = {
4234*a1e26a70SApple OSS Distributions .tr_qos = THREAD_QOS_BACKGROUND,
4235*a1e26a70SApple OSS Distributions };
4236*a1e26a70SApple OSS Distributions
4237*a1e26a70SApple OSS Distributions return &workq_sync_push_fake_req;
4238*a1e26a70SApple OSS Distributions }
4239*a1e26a70SApple OSS Distributions
4240*a1e26a70SApple OSS Distributions return NULL;
4241*a1e26a70SApple OSS Distributions }
4242*a1e26a70SApple OSS Distributions
4243*a1e26a70SApple OSS Distributions /*
4244*a1e26a70SApple OSS Distributions * Returns true if this caused a change in the schedule counts of the
4245*a1e26a70SApple OSS Distributions * cooperative pool
4246*a1e26a70SApple OSS Distributions */
4247*a1e26a70SApple OSS Distributions static bool
workq_adjust_cooperative_constrained_schedule_counts(struct workqueue * wq,struct uthread * uth,thread_qos_t old_thread_qos,workq_tr_flags_t tr_flags)4248*a1e26a70SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(struct workqueue *wq,
4249*a1e26a70SApple OSS Distributions struct uthread *uth, thread_qos_t old_thread_qos, workq_tr_flags_t tr_flags)
4250*a1e26a70SApple OSS Distributions {
4251*a1e26a70SApple OSS Distributions workq_lock_held(wq);
4252*a1e26a70SApple OSS Distributions
4253*a1e26a70SApple OSS Distributions /*
4254*a1e26a70SApple OSS Distributions * Row: thread type
4255*a1e26a70SApple OSS Distributions * Column: Request type
4256*a1e26a70SApple OSS Distributions *
4257*a1e26a70SApple OSS Distributions * overcommit non-overcommit cooperative
4258*a1e26a70SApple OSS Distributions * overcommit X case 1 case 2
4259*a1e26a70SApple OSS Distributions * cooperative case 3 case 4 case 5
4260*a1e26a70SApple OSS Distributions * non-overcommit case 6 X case 7
4261*a1e26a70SApple OSS Distributions *
4262*a1e26a70SApple OSS Distributions * Move the thread to the right bucket depending on what state it currently
4263*a1e26a70SApple OSS Distributions * has and what state the thread req it picks, is going to have.
4264*a1e26a70SApple OSS Distributions *
4265*a1e26a70SApple OSS Distributions * Note that the creator thread is an overcommit thread.
4266*a1e26a70SApple OSS Distributions */
4267*a1e26a70SApple OSS Distributions thread_qos_t new_thread_qos = uth->uu_workq_pri.qos_req;
4268*a1e26a70SApple OSS Distributions
4269*a1e26a70SApple OSS Distributions /*
4270*a1e26a70SApple OSS Distributions * Anytime a cooperative bucket's schedule count changes, we need to
4271*a1e26a70SApple OSS Distributions * potentially refresh the next best QoS for that pool when we determine
4272*a1e26a70SApple OSS Distributions * the next request for the creator
4273*a1e26a70SApple OSS Distributions */
4274*a1e26a70SApple OSS Distributions bool cooperative_pool_sched_count_changed = false;
4275*a1e26a70SApple OSS Distributions
4276*a1e26a70SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
4277*a1e26a70SApple OSS Distributions if (workq_tr_is_nonovercommit(tr_flags)) {
4278*a1e26a70SApple OSS Distributions // Case 1: thread is overcommit, req is non-overcommit
4279*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4280*a1e26a70SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4281*a1e26a70SApple OSS Distributions // Case 2: thread is overcommit, req is cooperative
4282*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4283*a1e26a70SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4284*a1e26a70SApple OSS Distributions }
4285*a1e26a70SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
4286*a1e26a70SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4287*a1e26a70SApple OSS Distributions // Case 3: thread is cooperative, req is overcommit
4288*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4289*a1e26a70SApple OSS Distributions } else if (workq_tr_is_nonovercommit(tr_flags)) {
4290*a1e26a70SApple OSS Distributions // Case 4: thread is cooperative, req is non-overcommit
4291*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4292*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4293*a1e26a70SApple OSS Distributions } else {
4294*a1e26a70SApple OSS Distributions // Case 5: thread is cooperative, req is also cooperative
4295*a1e26a70SApple OSS Distributions assert(workq_tr_is_cooperative(tr_flags));
4296*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4297*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4298*a1e26a70SApple OSS Distributions }
4299*a1e26a70SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4300*a1e26a70SApple OSS Distributions } else {
4301*a1e26a70SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4302*a1e26a70SApple OSS Distributions // Case 6: Thread is non-overcommit, req is overcommit
4303*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4304*a1e26a70SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4305*a1e26a70SApple OSS Distributions // Case 7: Thread is non-overcommit, req is cooperative
4306*a1e26a70SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4307*a1e26a70SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4308*a1e26a70SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4309*a1e26a70SApple OSS Distributions }
4310*a1e26a70SApple OSS Distributions }
4311*a1e26a70SApple OSS Distributions
4312*a1e26a70SApple OSS Distributions return cooperative_pool_sched_count_changed;
4313*a1e26a70SApple OSS Distributions }
4314*a1e26a70SApple OSS Distributions
4315*a1e26a70SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select(struct workqueue * wq,struct uthread * uth)4316*a1e26a70SApple OSS Distributions workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
4317*a1e26a70SApple OSS Distributions {
4318*a1e26a70SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4319*a1e26a70SApple OSS Distributions uintptr_t proprietor;
4320*a1e26a70SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4321*a1e26a70SApple OSS Distributions uint8_t pri = 0;
4322*a1e26a70SApple OSS Distributions
4323*a1e26a70SApple OSS Distributions if (uth == wq->wq_creator) {
4324*a1e26a70SApple OSS Distributions uth = NULL;
4325*a1e26a70SApple OSS Distributions }
4326*a1e26a70SApple OSS Distributions
4327*a1e26a70SApple OSS Distributions /*
4328*a1e26a70SApple OSS Distributions * Compute the best priority request (special or turnstile)
4329*a1e26a70SApple OSS Distributions */
4330*a1e26a70SApple OSS Distributions
4331*a1e26a70SApple OSS Distributions pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
4332*a1e26a70SApple OSS Distributions &proprietor);
4333*a1e26a70SApple OSS Distributions if (pri) {
4334*a1e26a70SApple OSS Distributions struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
4335*a1e26a70SApple OSS Distributions req_pri = &kqwl->kqwl_request;
4336*a1e26a70SApple OSS Distributions if (req_pri->tr_state != WORKQ_TR_STATE_QUEUED) {
4337*a1e26a70SApple OSS Distributions panic("Invalid thread request (%p) state %d",
4338*a1e26a70SApple OSS Distributions req_pri, req_pri->tr_state);
4339*a1e26a70SApple OSS Distributions }
4340*a1e26a70SApple OSS Distributions } else {
4341*a1e26a70SApple OSS Distributions req_pri = NULL;
4342*a1e26a70SApple OSS Distributions }
4343*a1e26a70SApple OSS Distributions
4344*a1e26a70SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_special_queue,
4345*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4346*a1e26a70SApple OSS Distributions if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
4347*a1e26a70SApple OSS Distributions &req_tmp->tr_entry)) {
4348*a1e26a70SApple OSS Distributions req_pri = req_tmp;
4349*a1e26a70SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4350*a1e26a70SApple OSS Distributions &req_tmp->tr_entry);
4351*a1e26a70SApple OSS Distributions }
4352*a1e26a70SApple OSS Distributions
4353*a1e26a70SApple OSS Distributions /*
4354*a1e26a70SApple OSS Distributions * Handle the manager thread request. The special queue might yield
4355*a1e26a70SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4356*a1e26a70SApple OSS Distributions */
4357*a1e26a70SApple OSS Distributions
4358*a1e26a70SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4359*a1e26a70SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
4360*a1e26a70SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4361*a1e26a70SApple OSS Distributions
4362*a1e26a70SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4363*a1e26a70SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4364*a1e26a70SApple OSS Distributions } else {
4365*a1e26a70SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4366*a1e26a70SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4367*a1e26a70SApple OSS Distributions }
4368*a1e26a70SApple OSS Distributions
4369*a1e26a70SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4370*a1e26a70SApple OSS Distributions }
4371*a1e26a70SApple OSS Distributions
4372*a1e26a70SApple OSS Distributions /*
4373*a1e26a70SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4374*a1e26a70SApple OSS Distributions */
4375*a1e26a70SApple OSS Distributions
4376*a1e26a70SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4377*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4378*a1e26a70SApple OSS Distributions if (req_qos) {
4379*a1e26a70SApple OSS Distributions qos = req_qos->tr_qos;
4380*a1e26a70SApple OSS Distributions }
4381*a1e26a70SApple OSS Distributions
4382*a1e26a70SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, uth);
4383*a1e26a70SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4384*a1e26a70SApple OSS Distributions /*
4385*a1e26a70SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4386*a1e26a70SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4387*a1e26a70SApple OSS Distributions * cooperative.
4388*a1e26a70SApple OSS Distributions *
4389*a1e26a70SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4390*a1e26a70SApple OSS Distributions */
4391*a1e26a70SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, uth, true)) {
4392*a1e26a70SApple OSS Distributions req_qos = req_tmp;
4393*a1e26a70SApple OSS Distributions qos = req_qos->tr_qos;
4394*a1e26a70SApple OSS Distributions }
4395*a1e26a70SApple OSS Distributions }
4396*a1e26a70SApple OSS Distributions
4397*a1e26a70SApple OSS Distributions /*
4398*a1e26a70SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4399*a1e26a70SApple OSS Distributions * pool - and compare it with the constrained pool
4400*a1e26a70SApple OSS Distributions */
4401*a1e26a70SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4402*a1e26a70SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4403*a1e26a70SApple OSS Distributions
4404*a1e26a70SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4405*a1e26a70SApple OSS Distributions /*
4406*a1e26a70SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4407*a1e26a70SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4408*a1e26a70SApple OSS Distributions */
4409*a1e26a70SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4410*a1e26a70SApple OSS Distributions return req_pri;
4411*a1e26a70SApple OSS Distributions }
4412*a1e26a70SApple OSS Distributions
4413*a1e26a70SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, uth, true, true)) {
4414*a1e26a70SApple OSS Distributions /*
4415*a1e26a70SApple OSS Distributions * If the constrained thread request is the best one and passes
4416*a1e26a70SApple OSS Distributions * the admission check, pick it.
4417*a1e26a70SApple OSS Distributions */
4418*a1e26a70SApple OSS Distributions return req_tmp;
4419*a1e26a70SApple OSS Distributions }
4420*a1e26a70SApple OSS Distributions }
4421*a1e26a70SApple OSS Distributions
4422*a1e26a70SApple OSS Distributions if (req_pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4423*a1e26a70SApple OSS Distributions return req_pri;
4424*a1e26a70SApple OSS Distributions }
4425*a1e26a70SApple OSS Distributions
4426*a1e26a70SApple OSS Distributions return req_qos;
4427*a1e26a70SApple OSS Distributions }
4428*a1e26a70SApple OSS Distributions
4429*a1e26a70SApple OSS Distributions /*
4430*a1e26a70SApple OSS Distributions * The creator is an anonymous thread that is counted as scheduled,
4431*a1e26a70SApple OSS Distributions * but otherwise without its scheduler callback set or tracked as active
4432*a1e26a70SApple OSS Distributions * that is used to make other threads.
4433*a1e26a70SApple OSS Distributions *
4434*a1e26a70SApple OSS Distributions * When more requests are added or an existing one is hurried along,
4435*a1e26a70SApple OSS Distributions * a creator is elected and setup, or the existing one overridden accordingly.
4436*a1e26a70SApple OSS Distributions *
4437*a1e26a70SApple OSS Distributions * While this creator is in flight, because no request has been dequeued,
4438*a1e26a70SApple OSS Distributions * already running threads have a chance at stealing thread requests avoiding
4439*a1e26a70SApple OSS Distributions * useless context switches, and the creator once scheduled may not find any
4440*a1e26a70SApple OSS Distributions * work to do and will then just park again.
4441*a1e26a70SApple OSS Distributions *
4442*a1e26a70SApple OSS Distributions * The creator serves the dual purpose of informing the scheduler of work that
4443*a1e26a70SApple OSS Distributions * hasn't be materialized as threads yet, and also as a natural pacing mechanism
4444*a1e26a70SApple OSS Distributions * for thread creation.
4445*a1e26a70SApple OSS Distributions *
4446*a1e26a70SApple OSS Distributions * By being anonymous (and not bound to anything) it means that thread requests
4447*a1e26a70SApple OSS Distributions * can be stolen from this creator by threads already on core yielding more
4448*a1e26a70SApple OSS Distributions * efficient scheduling and reduced context switches.
4449*a1e26a70SApple OSS Distributions */
4450*a1e26a70SApple OSS Distributions static void
workq_schedule_creator(proc_t p,struct workqueue * wq,workq_kern_threadreq_flags_t flags)4451*a1e26a70SApple OSS Distributions workq_schedule_creator(proc_t p, struct workqueue *wq,
4452*a1e26a70SApple OSS Distributions workq_kern_threadreq_flags_t flags)
4453*a1e26a70SApple OSS Distributions {
4454*a1e26a70SApple OSS Distributions workq_threadreq_t req;
4455*a1e26a70SApple OSS Distributions struct uthread *uth;
4456*a1e26a70SApple OSS Distributions bool needs_wakeup;
4457*a1e26a70SApple OSS Distributions
4458*a1e26a70SApple OSS Distributions workq_lock_held(wq);
4459*a1e26a70SApple OSS Distributions assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);
4460*a1e26a70SApple OSS Distributions
4461*a1e26a70SApple OSS Distributions again:
4462*a1e26a70SApple OSS Distributions uth = wq->wq_creator;
4463*a1e26a70SApple OSS Distributions
4464*a1e26a70SApple OSS Distributions if (!wq->wq_reqcount) {
4465*a1e26a70SApple OSS Distributions /*
4466*a1e26a70SApple OSS Distributions * There is no thread request left.
4467*a1e26a70SApple OSS Distributions *
4468*a1e26a70SApple OSS Distributions * If there is a creator, leave everything in place, so that it cleans
4469*a1e26a70SApple OSS Distributions * up itself in workq_push_idle_thread().
4470*a1e26a70SApple OSS Distributions *
4471*a1e26a70SApple OSS Distributions * Else, make sure the turnstile state is reset to no inheritor.
4472*a1e26a70SApple OSS Distributions */
4473*a1e26a70SApple OSS Distributions if (uth == NULL) {
4474*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4475*a1e26a70SApple OSS Distributions }
4476*a1e26a70SApple OSS Distributions return;
4477*a1e26a70SApple OSS Distributions }
4478*a1e26a70SApple OSS Distributions
4479*a1e26a70SApple OSS Distributions req = workq_threadreq_select_for_creator(wq);
4480*a1e26a70SApple OSS Distributions if (req == NULL) {
4481*a1e26a70SApple OSS Distributions /*
4482*a1e26a70SApple OSS Distributions * There isn't a thread request that passes the admission check.
4483*a1e26a70SApple OSS Distributions *
4484*a1e26a70SApple OSS Distributions * If there is a creator, do not touch anything, the creator will sort
4485*a1e26a70SApple OSS Distributions * it out when it runs.
4486*a1e26a70SApple OSS Distributions *
4487*a1e26a70SApple OSS Distributions * Else, set the inheritor to "WORKQ" so that the turnstile propagation
4488*a1e26a70SApple OSS Distributions * code calls us if anything changes.
4489*a1e26a70SApple OSS Distributions */
4490*a1e26a70SApple OSS Distributions if (uth == NULL) {
4491*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
4492*a1e26a70SApple OSS Distributions }
4493*a1e26a70SApple OSS Distributions return;
4494*a1e26a70SApple OSS Distributions }
4495*a1e26a70SApple OSS Distributions
4496*a1e26a70SApple OSS Distributions
4497*a1e26a70SApple OSS Distributions if (uth) {
4498*a1e26a70SApple OSS Distributions /*
4499*a1e26a70SApple OSS Distributions * We need to maybe override the creator we already have
4500*a1e26a70SApple OSS Distributions */
4501*a1e26a70SApple OSS Distributions if (workq_thread_needs_priority_change(req, uth)) {
4502*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4503*a1e26a70SApple OSS Distributions wq, 1, uthread_tid(uth), req->tr_qos);
4504*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4505*a1e26a70SApple OSS Distributions }
4506*a1e26a70SApple OSS Distributions assert(wq->wq_inheritor == get_machthread(uth));
4507*a1e26a70SApple OSS Distributions } else if (wq->wq_thidlecount) {
4508*a1e26a70SApple OSS Distributions /*
4509*a1e26a70SApple OSS Distributions * We need to unpark a creator thread
4510*a1e26a70SApple OSS Distributions */
4511*a1e26a70SApple OSS Distributions wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
4512*a1e26a70SApple OSS Distributions &needs_wakeup);
4513*a1e26a70SApple OSS Distributions /* Always reset the priorities on the newly chosen creator */
4514*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4515*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(wq, get_machthread(uth),
4516*a1e26a70SApple OSS Distributions TURNSTILE_INHERITOR_THREAD);
4517*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4518*a1e26a70SApple OSS Distributions wq, 2, uthread_tid(uth), req->tr_qos);
4519*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4520*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields = 0;
4521*a1e26a70SApple OSS Distributions if (needs_wakeup) {
4522*a1e26a70SApple OSS Distributions workq_thread_wakeup(uth);
4523*a1e26a70SApple OSS Distributions }
4524*a1e26a70SApple OSS Distributions } else {
4525*a1e26a70SApple OSS Distributions /*
4526*a1e26a70SApple OSS Distributions * We need to allocate a thread...
4527*a1e26a70SApple OSS Distributions */
4528*a1e26a70SApple OSS Distributions if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
4529*a1e26a70SApple OSS Distributions /* out of threads, just go away */
4530*a1e26a70SApple OSS Distributions flags = WORKQ_THREADREQ_NONE;
4531*a1e26a70SApple OSS Distributions } else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
4532*a1e26a70SApple OSS Distributions act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
4533*a1e26a70SApple OSS Distributions } else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
4534*a1e26a70SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4535*a1e26a70SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
4536*a1e26a70SApple OSS Distributions } else if ((workq_add_new_idle_thread(p, wq,
4537*a1e26a70SApple OSS Distributions workq_unpark_continue, false, NULL) == KERN_SUCCESS)) {
4538*a1e26a70SApple OSS Distributions goto again;
4539*a1e26a70SApple OSS Distributions } else {
4540*a1e26a70SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4541*a1e26a70SApple OSS Distributions }
4542*a1e26a70SApple OSS Distributions
4543*a1e26a70SApple OSS Distributions /*
4544*a1e26a70SApple OSS Distributions * If the current thread is the inheritor:
4545*a1e26a70SApple OSS Distributions *
4546*a1e26a70SApple OSS Distributions * If we set the AST, then the thread will stay the inheritor until
4547*a1e26a70SApple OSS Distributions * either the AST calls workq_kern_threadreq_redrive(), or it parks
4548*a1e26a70SApple OSS Distributions * and calls workq_push_idle_thread().
4549*a1e26a70SApple OSS Distributions *
4550*a1e26a70SApple OSS Distributions * Else, the responsibility of the thread creation is with a thread-call
4551*a1e26a70SApple OSS Distributions * and we need to clear the inheritor.
4552*a1e26a70SApple OSS Distributions */
4553*a1e26a70SApple OSS Distributions if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
4554*a1e26a70SApple OSS Distributions wq->wq_inheritor == current_thread()) {
4555*a1e26a70SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4556*a1e26a70SApple OSS Distributions }
4557*a1e26a70SApple OSS Distributions }
4558*a1e26a70SApple OSS Distributions }
4559*a1e26a70SApple OSS Distributions
4560*a1e26a70SApple OSS Distributions /**
4561*a1e26a70SApple OSS Distributions * Same as workq_unpark_select_threadreq_or_park_and_unlock,
4562*a1e26a70SApple OSS Distributions * but do not allow early binds.
4563*a1e26a70SApple OSS Distributions *
4564*a1e26a70SApple OSS Distributions * Called with the base pri frozen, will unfreeze it.
4565*a1e26a70SApple OSS Distributions */
4566*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
4567*a1e26a70SApple OSS Distributions static void
workq_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4568*a1e26a70SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4569*a1e26a70SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4570*a1e26a70SApple OSS Distributions {
4571*a1e26a70SApple OSS Distributions workq_threadreq_t req = NULL;
4572*a1e26a70SApple OSS Distributions bool is_creator = (wq->wq_creator == uth);
4573*a1e26a70SApple OSS Distributions bool schedule_creator = false;
4574*a1e26a70SApple OSS Distributions
4575*a1e26a70SApple OSS Distributions if (__improbable(_wq_exiting(wq))) {
4576*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0);
4577*a1e26a70SApple OSS Distributions goto park;
4578*a1e26a70SApple OSS Distributions }
4579*a1e26a70SApple OSS Distributions
4580*a1e26a70SApple OSS Distributions if (wq->wq_reqcount == 0) {
4581*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0);
4582*a1e26a70SApple OSS Distributions goto park;
4583*a1e26a70SApple OSS Distributions }
4584*a1e26a70SApple OSS Distributions
4585*a1e26a70SApple OSS Distributions req = workq_threadreq_select(wq, uth);
4586*a1e26a70SApple OSS Distributions if (__improbable(req == NULL)) {
4587*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0);
4588*a1e26a70SApple OSS Distributions goto park;
4589*a1e26a70SApple OSS Distributions }
4590*a1e26a70SApple OSS Distributions
4591*a1e26a70SApple OSS Distributions struct uu_workq_policy old_pri = uth->uu_workq_pri;
4592*a1e26a70SApple OSS Distributions uint8_t tr_flags = req->tr_flags;
4593*a1e26a70SApple OSS Distributions struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
4594*a1e26a70SApple OSS Distributions
4595*a1e26a70SApple OSS Distributions /*
4596*a1e26a70SApple OSS Distributions * Attempt to setup ourselves as the new thing to run, moving all priority
4597*a1e26a70SApple OSS Distributions * pushes to ourselves.
4598*a1e26a70SApple OSS Distributions *
4599*a1e26a70SApple OSS Distributions * If the current thread is the creator, then the fact that we are presently
4600*a1e26a70SApple OSS Distributions * running is proof that we'll do something useful, so keep going.
4601*a1e26a70SApple OSS Distributions *
4602*a1e26a70SApple OSS Distributions * For other cases, peek at the AST to know whether the scheduler wants
4603*a1e26a70SApple OSS Distributions * to preempt us, if yes, park instead, and move the thread request
4604*a1e26a70SApple OSS Distributions * turnstile back to the workqueue.
4605*a1e26a70SApple OSS Distributions */
4606*a1e26a70SApple OSS Distributions if (req_ts) {
4607*a1e26a70SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4608*a1e26a70SApple OSS Distributions turnstile_update_inheritor(req_ts, get_machthread(uth),
4609*a1e26a70SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
4610*a1e26a70SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4611*a1e26a70SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4612*a1e26a70SApple OSS Distributions });
4613*a1e26a70SApple OSS Distributions }
4614*a1e26a70SApple OSS Distributions
4615*a1e26a70SApple OSS Distributions /* accounting changes of aggregate thscheduled_count and thactive which has
4616*a1e26a70SApple OSS Distributions * to be paired with the workq_thread_reset_pri below so that we have
4617*a1e26a70SApple OSS Distributions * uth->uu_workq_pri match with thactive.
4618*a1e26a70SApple OSS Distributions *
4619*a1e26a70SApple OSS Distributions * This is undone when the thread parks */
4620*a1e26a70SApple OSS Distributions if (is_creator) {
4621*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
4622*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
4623*a1e26a70SApple OSS Distributions wq->wq_creator = NULL;
4624*a1e26a70SApple OSS Distributions _wq_thactive_inc(wq, req->tr_qos);
4625*a1e26a70SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
4626*a1e26a70SApple OSS Distributions } else if (old_pri.qos_bucket != req->tr_qos) {
4627*a1e26a70SApple OSS Distributions _wq_thactive_move(wq, old_pri.qos_bucket, req->tr_qos);
4628*a1e26a70SApple OSS Distributions }
4629*a1e26a70SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4630*a1e26a70SApple OSS Distributions
4631*a1e26a70SApple OSS Distributions /*
4632*a1e26a70SApple OSS Distributions * Make relevant accounting changes for pool specific counts.
4633*a1e26a70SApple OSS Distributions *
4634*a1e26a70SApple OSS Distributions * The schedule counts changing can affect what the next best request
4635*a1e26a70SApple OSS Distributions * for cooperative thread pool is if this request is dequeued.
4636*a1e26a70SApple OSS Distributions */
4637*a1e26a70SApple OSS Distributions bool cooperative_sched_count_changed =
4638*a1e26a70SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(wq, uth,
4639*a1e26a70SApple OSS Distributions old_pri.qos_req, tr_flags);
4640*a1e26a70SApple OSS Distributions
4641*a1e26a70SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4642*a1e26a70SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4643*a1e26a70SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4644*a1e26a70SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_COOPERATIVE);
4645*a1e26a70SApple OSS Distributions } else {
4646*a1e26a70SApple OSS Distributions workq_thread_set_type(uth, 0);
4647*a1e26a70SApple OSS Distributions }
4648*a1e26a70SApple OSS Distributions
4649*a1e26a70SApple OSS Distributions if (__improbable(thread_unfreeze_base_pri(get_machthread(uth)) && !is_creator)) {
4650*a1e26a70SApple OSS Distributions if (req_ts) {
4651*a1e26a70SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4652*a1e26a70SApple OSS Distributions turnstile_update_inheritor(req_ts, wq->wq_turnstile,
4653*a1e26a70SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
4654*a1e26a70SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4655*a1e26a70SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4656*a1e26a70SApple OSS Distributions });
4657*a1e26a70SApple OSS Distributions }
4658*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0);
4659*a1e26a70SApple OSS Distributions
4660*a1e26a70SApple OSS Distributions /*
4661*a1e26a70SApple OSS Distributions * If a cooperative thread was the one which picked up the manager
4662*a1e26a70SApple OSS Distributions * thread request, we need to reevaluate the cooperative pool before
4663*a1e26a70SApple OSS Distributions * it goes and parks.
4664*a1e26a70SApple OSS Distributions *
4665*a1e26a70SApple OSS Distributions * For every other of thread request that it picks up, the logic in
4666*a1e26a70SApple OSS Distributions * workq_threadreq_select should have done this refresh.
4667*a1e26a70SApple OSS Distributions * See workq_push_idle_thread.
4668*a1e26a70SApple OSS Distributions */
4669*a1e26a70SApple OSS Distributions if (cooperative_sched_count_changed) {
4670*a1e26a70SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
4671*a1e26a70SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
4672*a1e26a70SApple OSS Distributions }
4673*a1e26a70SApple OSS Distributions }
4674*a1e26a70SApple OSS Distributions goto park_thawed;
4675*a1e26a70SApple OSS Distributions }
4676*a1e26a70SApple OSS Distributions
4677*a1e26a70SApple OSS Distributions /*
4678*a1e26a70SApple OSS Distributions * We passed all checks, dequeue the request, bind to it, and set it up
4679*a1e26a70SApple OSS Distributions * to return to user.
4680*a1e26a70SApple OSS Distributions */
4681*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4682*a1e26a70SApple OSS Distributions workq_trace_req_id(req), tr_flags, 0);
4683*a1e26a70SApple OSS Distributions wq->wq_fulfilled++;
4684*a1e26a70SApple OSS Distributions schedule_creator = workq_threadreq_dequeue(wq, req,
4685*a1e26a70SApple OSS Distributions cooperative_sched_count_changed);
4686*a1e26a70SApple OSS Distributions
4687*a1e26a70SApple OSS Distributions workq_thread_reset_cpupercent(req, uth);
4688*a1e26a70SApple OSS Distributions
4689*a1e26a70SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4690*a1e26a70SApple OSS Distributions kqueue_threadreq_bind_prepost(p, req, uth);
4691*a1e26a70SApple OSS Distributions req = NULL;
4692*a1e26a70SApple OSS Distributions } else if (req->tr_count > 0) {
4693*a1e26a70SApple OSS Distributions req = NULL;
4694*a1e26a70SApple OSS Distributions }
4695*a1e26a70SApple OSS Distributions
4696*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4697*a1e26a70SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_NEW;
4698*a1e26a70SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4699*a1e26a70SApple OSS Distributions }
4700*a1e26a70SApple OSS Distributions
4701*a1e26a70SApple OSS Distributions /* If one of the following is true, call workq_schedule_creator (which also
4702*a1e26a70SApple OSS Distributions * adjusts priority of existing creator):
4703*a1e26a70SApple OSS Distributions *
4704*a1e26a70SApple OSS Distributions * - We are the creator currently so the wq may need a new creator
4705*a1e26a70SApple OSS Distributions * - The request we're binding to is the highest priority one, existing
4706*a1e26a70SApple OSS Distributions * creator's priority might need to be adjusted to reflect the next
4707*a1e26a70SApple OSS Distributions * highest TR
4708*a1e26a70SApple OSS Distributions */
4709*a1e26a70SApple OSS Distributions if (is_creator || schedule_creator) {
4710*a1e26a70SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4711*a1e26a70SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
4712*a1e26a70SApple OSS Distributions }
4713*a1e26a70SApple OSS Distributions
4714*a1e26a70SApple OSS Distributions workq_unlock(wq);
4715*a1e26a70SApple OSS Distributions
4716*a1e26a70SApple OSS Distributions if (req) {
4717*a1e26a70SApple OSS Distributions zfree(workq_zone_threadreq, req);
4718*a1e26a70SApple OSS Distributions }
4719*a1e26a70SApple OSS Distributions
4720*a1e26a70SApple OSS Distributions /*
4721*a1e26a70SApple OSS Distributions * Run Thread, Run!
4722*a1e26a70SApple OSS Distributions */
4723*a1e26a70SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
4724*a1e26a70SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
4725*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
4726*a1e26a70SApple OSS Distributions } else if (workq_tr_is_overcommit(tr_flags)) {
4727*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4728*a1e26a70SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4729*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
4730*a1e26a70SApple OSS Distributions }
4731*a1e26a70SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
4732*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
4733*a1e26a70SApple OSS Distributions assert((upcall_flags & WQ_FLAG_THREAD_COOPERATIVE) == 0);
4734*a1e26a70SApple OSS Distributions }
4735*a1e26a70SApple OSS Distributions
4736*a1e26a70SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
4737*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
4738*a1e26a70SApple OSS Distributions }
4739*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
4740*a1e26a70SApple OSS Distributions
4741*a1e26a70SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4742*a1e26a70SApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
4743*a1e26a70SApple OSS Distributions } else {
4744*a1e26a70SApple OSS Distributions #if CONFIG_PREADOPT_TG
4745*a1e26a70SApple OSS Distributions /*
4746*a1e26a70SApple OSS Distributions * The thread may have a preadopt thread group on it already because it
4747*a1e26a70SApple OSS Distributions * got tagged with it as a creator thread. So we need to make sure to
4748*a1e26a70SApple OSS Distributions * clear that since we don't have preadoption for anonymous thread
4749*a1e26a70SApple OSS Distributions * requests
4750*a1e26a70SApple OSS Distributions */
4751*a1e26a70SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
4752*a1e26a70SApple OSS Distributions #endif
4753*a1e26a70SApple OSS Distributions }
4754*a1e26a70SApple OSS Distributions
4755*a1e26a70SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4756*a1e26a70SApple OSS Distributions __builtin_unreachable();
4757*a1e26a70SApple OSS Distributions
4758*a1e26a70SApple OSS Distributions park:
4759*a1e26a70SApple OSS Distributions thread_unfreeze_base_pri(get_machthread(uth));
4760*a1e26a70SApple OSS Distributions park_thawed:
4761*a1e26a70SApple OSS Distributions workq_park_and_unlock(p, wq, uth, setup_flags);
4762*a1e26a70SApple OSS Distributions }
4763*a1e26a70SApple OSS Distributions
4764*a1e26a70SApple OSS Distributions /**
4765*a1e26a70SApple OSS Distributions * Runs a thread request on a thread
4766*a1e26a70SApple OSS Distributions *
4767*a1e26a70SApple OSS Distributions * - if thread is THREAD_NULL, will find a thread and run the request there.
4768*a1e26a70SApple OSS Distributions * Otherwise, the thread must be the current thread.
4769*a1e26a70SApple OSS Distributions *
4770*a1e26a70SApple OSS Distributions * - if req is NULL, will find the highest priority request and run that. If
4771*a1e26a70SApple OSS Distributions * it is not NULL, it must be a threadreq object in state NEW. If it can not
4772*a1e26a70SApple OSS Distributions * be run immediately, it will be enqueued and moved to state QUEUED.
4773*a1e26a70SApple OSS Distributions *
4774*a1e26a70SApple OSS Distributions * Either way, the thread request object serviced will be moved to state
4775*a1e26a70SApple OSS Distributions * BINDING and attached to the uthread.
4776*a1e26a70SApple OSS Distributions *
4777*a1e26a70SApple OSS Distributions * Should be called with the workqueue lock held. Will drop it.
4778*a1e26a70SApple OSS Distributions * Should be called with the base pri not frozen.
4779*a1e26a70SApple OSS Distributions */
4780*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
4781*a1e26a70SApple OSS Distributions static void
workq_unpark_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4782*a1e26a70SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4783*a1e26a70SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4784*a1e26a70SApple OSS Distributions {
4785*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_EARLY_BOUND) {
4786*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4787*a1e26a70SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4788*a1e26a70SApple OSS Distributions }
4789*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_NEW | UT_WORKQ_EARLY_BOUND);
4790*a1e26a70SApple OSS Distributions /*
4791*a1e26a70SApple OSS Distributions * This pointer is possibly freed and only used for tracing purposes.
4792*a1e26a70SApple OSS Distributions */
4793*a1e26a70SApple OSS Distributions workq_threadreq_t req = uth->uu_save.uus_workq_park_data.thread_request;
4794*a1e26a70SApple OSS Distributions workq_unlock(wq);
4795*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4796*a1e26a70SApple OSS Distributions VM_KERNEL_ADDRHIDE(req), 0, 0);
4797*a1e26a70SApple OSS Distributions (void)req;
4798*a1e26a70SApple OSS Distributions
4799*a1e26a70SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4800*a1e26a70SApple OSS Distributions __builtin_unreachable();
4801*a1e26a70SApple OSS Distributions }
4802*a1e26a70SApple OSS Distributions
4803*a1e26a70SApple OSS Distributions thread_freeze_base_pri(get_machthread(uth));
4804*a1e26a70SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
4805*a1e26a70SApple OSS Distributions }
4806*a1e26a70SApple OSS Distributions
4807*a1e26a70SApple OSS Distributions static bool
workq_creator_should_yield(struct workqueue * wq,struct uthread * uth)4808*a1e26a70SApple OSS Distributions workq_creator_should_yield(struct workqueue *wq, struct uthread *uth)
4809*a1e26a70SApple OSS Distributions {
4810*a1e26a70SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
4811*a1e26a70SApple OSS Distributions
4812*a1e26a70SApple OSS Distributions if (qos >= THREAD_QOS_USER_INTERACTIVE) {
4813*a1e26a70SApple OSS Distributions return false;
4814*a1e26a70SApple OSS Distributions }
4815*a1e26a70SApple OSS Distributions
4816*a1e26a70SApple OSS Distributions uint32_t snapshot = uth->uu_save.uus_workq_park_data.fulfilled_snapshot;
4817*a1e26a70SApple OSS Distributions if (wq->wq_fulfilled == snapshot) {
4818*a1e26a70SApple OSS Distributions return false;
4819*a1e26a70SApple OSS Distributions }
4820*a1e26a70SApple OSS Distributions
4821*a1e26a70SApple OSS Distributions uint32_t cnt = 0, conc = wq_max_parallelism[_wq_bucket(qos)];
4822*a1e26a70SApple OSS Distributions if (wq->wq_fulfilled - snapshot > conc) {
4823*a1e26a70SApple OSS Distributions /* we fulfilled more than NCPU requests since being dispatched */
4824*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1,
4825*a1e26a70SApple OSS Distributions wq->wq_fulfilled, snapshot);
4826*a1e26a70SApple OSS Distributions return true;
4827*a1e26a70SApple OSS Distributions }
4828*a1e26a70SApple OSS Distributions
4829*a1e26a70SApple OSS Distributions for (uint8_t i = _wq_bucket(qos); i < WORKQ_NUM_QOS_BUCKETS; i++) {
4830*a1e26a70SApple OSS Distributions cnt += wq->wq_thscheduled_count[i];
4831*a1e26a70SApple OSS Distributions }
4832*a1e26a70SApple OSS Distributions if (conc <= cnt) {
4833*a1e26a70SApple OSS Distributions /* We fulfilled requests and have more than NCPU scheduled threads */
4834*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2,
4835*a1e26a70SApple OSS Distributions wq->wq_fulfilled, snapshot);
4836*a1e26a70SApple OSS Distributions return true;
4837*a1e26a70SApple OSS Distributions }
4838*a1e26a70SApple OSS Distributions
4839*a1e26a70SApple OSS Distributions return false;
4840*a1e26a70SApple OSS Distributions }
4841*a1e26a70SApple OSS Distributions
4842*a1e26a70SApple OSS Distributions /**
4843*a1e26a70SApple OSS Distributions * parked idle thread wakes up
4844*a1e26a70SApple OSS Distributions */
4845*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
4846*a1e26a70SApple OSS Distributions static void
workq_unpark_continue(void * parameter __unused,wait_result_t wr __unused)4847*a1e26a70SApple OSS Distributions workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused)
4848*a1e26a70SApple OSS Distributions {
4849*a1e26a70SApple OSS Distributions thread_t th = current_thread();
4850*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
4851*a1e26a70SApple OSS Distributions proc_t p = current_proc();
4852*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
4853*a1e26a70SApple OSS Distributions
4854*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
4855*a1e26a70SApple OSS Distributions
4856*a1e26a70SApple OSS Distributions if (wq->wq_creator == uth && workq_creator_should_yield(wq, uth)) {
4857*a1e26a70SApple OSS Distributions /*
4858*a1e26a70SApple OSS Distributions * If the number of threads we have out are able to keep up with the
4859*a1e26a70SApple OSS Distributions * demand, then we should avoid sending this creator thread to
4860*a1e26a70SApple OSS Distributions * userspace.
4861*a1e26a70SApple OSS Distributions */
4862*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4863*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields++;
4864*a1e26a70SApple OSS Distributions workq_unlock(wq);
4865*a1e26a70SApple OSS Distributions thread_yield_with_continuation(workq_unpark_continue, NULL);
4866*a1e26a70SApple OSS Distributions __builtin_unreachable();
4867*a1e26a70SApple OSS Distributions }
4868*a1e26a70SApple OSS Distributions
4869*a1e26a70SApple OSS Distributions if (__probable(uth->uu_workq_flags & UT_WORKQ_RUNNING)) {
4870*a1e26a70SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, WQ_SETUP_NONE);
4871*a1e26a70SApple OSS Distributions __builtin_unreachable();
4872*a1e26a70SApple OSS Distributions }
4873*a1e26a70SApple OSS Distributions
4874*a1e26a70SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
4875*a1e26a70SApple OSS Distributions /*
4876*a1e26a70SApple OSS Distributions * We were set running, but for the purposes of dying.
4877*a1e26a70SApple OSS Distributions */
4878*a1e26a70SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
4879*a1e26a70SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_NEW) == 0);
4880*a1e26a70SApple OSS Distributions } else {
4881*a1e26a70SApple OSS Distributions /*
4882*a1e26a70SApple OSS Distributions * workaround for <rdar://problem/38647347>,
4883*a1e26a70SApple OSS Distributions * in case we do hit userspace, make sure calling
4884*a1e26a70SApple OSS Distributions * workq_thread_terminate() does the right thing here,
4885*a1e26a70SApple OSS Distributions * and if we never call it, that workq_exit() will too because it sees
4886*a1e26a70SApple OSS Distributions * this thread on the runlist.
4887*a1e26a70SApple OSS Distributions */
4888*a1e26a70SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
4889*a1e26a70SApple OSS Distributions wq->wq_thdying_count++;
4890*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
4891*a1e26a70SApple OSS Distributions }
4892*a1e26a70SApple OSS Distributions
4893*a1e26a70SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
4894*a1e26a70SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, WQ_SETUP_NONE);
4895*a1e26a70SApple OSS Distributions __builtin_unreachable();
4896*a1e26a70SApple OSS Distributions }
4897*a1e26a70SApple OSS Distributions
4898*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
4899*a1e26a70SApple OSS Distributions static void
workq_setup_and_run(proc_t p,struct uthread * uth,int setup_flags)4900*a1e26a70SApple OSS Distributions workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags)
4901*a1e26a70SApple OSS Distributions {
4902*a1e26a70SApple OSS Distributions thread_t th = get_machthread(uth);
4903*a1e26a70SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
4904*a1e26a70SApple OSS Distributions
4905*a1e26a70SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
4906*a1e26a70SApple OSS Distributions /*
4907*a1e26a70SApple OSS Distributions * For preemption reasons, we want to reset the voucher as late as
4908*a1e26a70SApple OSS Distributions * possible, so we do it in two places:
4909*a1e26a70SApple OSS Distributions * - Just before parking (i.e. in workq_park_and_unlock())
4910*a1e26a70SApple OSS Distributions * - Prior to doing the setup for the next workitem (i.e. here)
4911*a1e26a70SApple OSS Distributions *
4912*a1e26a70SApple OSS Distributions * Those two places are sufficient to ensure we always reset it before
4913*a1e26a70SApple OSS Distributions * it goes back out to user space, but be careful to not break that
4914*a1e26a70SApple OSS Distributions * guarantee.
4915*a1e26a70SApple OSS Distributions *
4916*a1e26a70SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
4917*a1e26a70SApple OSS Distributions * thread group on this thread
4918*a1e26a70SApple OSS Distributions */
4919*a1e26a70SApple OSS Distributions __assert_only kern_return_t kr;
4920*a1e26a70SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
4921*a1e26a70SApple OSS Distributions assert(kr == KERN_SUCCESS);
4922*a1e26a70SApple OSS Distributions }
4923*a1e26a70SApple OSS Distributions
4924*a1e26a70SApple OSS Distributions uint32_t upcall_flags = uth->uu_save.uus_workq_park_data.upcall_flags;
4925*a1e26a70SApple OSS Distributions if (!(setup_flags & WQ_SETUP_FIRST_USE)) {
4926*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
4927*a1e26a70SApple OSS Distributions }
4928*a1e26a70SApple OSS Distributions
4929*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
4930*a1e26a70SApple OSS Distributions /*
4931*a1e26a70SApple OSS Distributions * For threads that have an outside-of-QoS thread priority, indicate
4932*a1e26a70SApple OSS Distributions * to userspace that setting QoS should only affect the TSD and not
4933*a1e26a70SApple OSS Distributions * change QOS in the kernel.
4934*a1e26a70SApple OSS Distributions */
4935*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
4936*a1e26a70SApple OSS Distributions } else {
4937*a1e26a70SApple OSS Distributions /*
4938*a1e26a70SApple OSS Distributions * Put the QoS class value into the lower bits of the reuse_thread
4939*a1e26a70SApple OSS Distributions * register, this is where the thread priority used to be stored
4940*a1e26a70SApple OSS Distributions * anyway.
4941*a1e26a70SApple OSS Distributions */
4942*a1e26a70SApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
4943*a1e26a70SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
4944*a1e26a70SApple OSS Distributions }
4945*a1e26a70SApple OSS Distributions
4946*a1e26a70SApple OSS Distributions if (uth->uu_workq_thport == MACH_PORT_NULL) {
4947*a1e26a70SApple OSS Distributions /* convert_thread_to_port_pinned() consumes a reference */
4948*a1e26a70SApple OSS Distributions thread_reference(th);
4949*a1e26a70SApple OSS Distributions /* Convert to immovable/pinned thread port, but port is not pinned yet */
4950*a1e26a70SApple OSS Distributions ipc_port_t port = convert_thread_to_port_pinned(th);
4951*a1e26a70SApple OSS Distributions /* Atomically, pin and copy out the port */
4952*a1e26a70SApple OSS Distributions uth->uu_workq_thport = ipc_port_copyout_send_pinned(port, get_task_ipcspace(proc_task(p)));
4953*a1e26a70SApple OSS Distributions }
4954*a1e26a70SApple OSS Distributions
4955*a1e26a70SApple OSS Distributions /* Thread has been set up to run, arm its next workqueue quantum or disarm
4956*a1e26a70SApple OSS Distributions * if it is no longer supporting that */
4957*a1e26a70SApple OSS Distributions if (thread_supports_cooperative_workqueue(th)) {
4958*a1e26a70SApple OSS Distributions thread_arm_workqueue_quantum(th);
4959*a1e26a70SApple OSS Distributions } else {
4960*a1e26a70SApple OSS Distributions thread_disarm_workqueue_quantum(th);
4961*a1e26a70SApple OSS Distributions }
4962*a1e26a70SApple OSS Distributions
4963*a1e26a70SApple OSS Distributions /*
4964*a1e26a70SApple OSS Distributions * Call out to pthread, this sets up the thread, pulls in kevent structs
4965*a1e26a70SApple OSS Distributions * onto the stack, sets up the thread state and then returns to userspace.
4966*a1e26a70SApple OSS Distributions */
4967*a1e26a70SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START,
4968*a1e26a70SApple OSS Distributions proc_get_wqptr_fast(p), 0, 0, 0);
4969*a1e26a70SApple OSS Distributions
4970*a1e26a70SApple OSS Distributions if (workq_thread_is_cooperative(uth) || workq_thread_is_permanently_bound(uth)) {
4971*a1e26a70SApple OSS Distributions thread_sched_call(th, NULL);
4972*a1e26a70SApple OSS Distributions } else {
4973*a1e26a70SApple OSS Distributions thread_sched_call(th, workq_sched_callback);
4974*a1e26a70SApple OSS Distributions }
4975*a1e26a70SApple OSS Distributions
4976*a1e26a70SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
4977*a1e26a70SApple OSS Distributions uth->uu_workq_thport, 0, setup_flags, upcall_flags);
4978*a1e26a70SApple OSS Distributions
4979*a1e26a70SApple OSS Distributions __builtin_unreachable();
4980*a1e26a70SApple OSS Distributions }
4981*a1e26a70SApple OSS Distributions
4982*a1e26a70SApple OSS Distributions /**
4983*a1e26a70SApple OSS Distributions * A wrapper around workq_setup_and_run for permanently bound thread.
4984*a1e26a70SApple OSS Distributions */
4985*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
4986*a1e26a70SApple OSS Distributions static void
workq_bound_thread_setup_and_run(struct uthread * uth,int setup_flags)4987*a1e26a70SApple OSS Distributions workq_bound_thread_setup_and_run(struct uthread *uth, int setup_flags)
4988*a1e26a70SApple OSS Distributions {
4989*a1e26a70SApple OSS Distributions struct workq_threadreq_s * kqr = uth->uu_kqr_bound;
4990*a1e26a70SApple OSS Distributions
4991*a1e26a70SApple OSS Distributions uint32_t upcall_flags = (WQ_FLAG_THREAD_NEWSPI |
4992*a1e26a70SApple OSS Distributions WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT);
4993*a1e26a70SApple OSS Distributions if (workq_tr_is_overcommit(kqr->tr_flags)) {
4994*a1e26a70SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4995*a1e26a70SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4996*a1e26a70SApple OSS Distributions }
4997*a1e26a70SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
4998*a1e26a70SApple OSS Distributions workq_setup_and_run(current_proc(), uth, setup_flags);
4999*a1e26a70SApple OSS Distributions __builtin_unreachable();
5000*a1e26a70SApple OSS Distributions }
5001*a1e26a70SApple OSS Distributions
5002*a1e26a70SApple OSS Distributions /**
5003*a1e26a70SApple OSS Distributions * A parked bound thread wakes up for the first time.
5004*a1e26a70SApple OSS Distributions */
5005*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
5006*a1e26a70SApple OSS Distributions static void
workq_bound_thread_initialize_and_unpark_continue(void * parameter __unused,wait_result_t wr)5007*a1e26a70SApple OSS Distributions workq_bound_thread_initialize_and_unpark_continue(void *parameter __unused,
5008*a1e26a70SApple OSS Distributions wait_result_t wr)
5009*a1e26a70SApple OSS Distributions {
5010*a1e26a70SApple OSS Distributions /*
5011*a1e26a70SApple OSS Distributions * Locking model for accessing uu_workq_flags :
5012*a1e26a70SApple OSS Distributions *
5013*a1e26a70SApple OSS Distributions * The concurrent access to uu_workq_flags is synchronized with workq lock
5014*a1e26a70SApple OSS Distributions * until a thread gets permanently bound to a kqwl. Post that, kqlock
5015*a1e26a70SApple OSS Distributions * is used for subsequent synchronizations. This gives us a significant
5016*a1e26a70SApple OSS Distributions * benefit by avoiding having to take a process wide workq lock on every
5017*a1e26a70SApple OSS Distributions * wakeup of the bound thread.
5018*a1e26a70SApple OSS Distributions * This flip in locking model is tracked with UT_WORKQ_PERMANENT_BIND flag.
5019*a1e26a70SApple OSS Distributions *
5020*a1e26a70SApple OSS Distributions * There is one more optimization we can perform for when the thread is
5021*a1e26a70SApple OSS Distributions * awakened for running (i.e THREAD_AWAKENED) until it parks.
5022*a1e26a70SApple OSS Distributions * During this window, we know KQ_SLEEP bit is reset so there should not
5023*a1e26a70SApple OSS Distributions * be any concurrent attempts to modify uu_workq_flags by
5024*a1e26a70SApple OSS Distributions * kqworkloop_bound_thread_wakeup because the thread is already "awake".
5025*a1e26a70SApple OSS Distributions * So we can safely access uu_workq_flags within this window without having
5026*a1e26a70SApple OSS Distributions * to take kqlock. This KQ_SLEEP is later set by the bound thread under
5027*a1e26a70SApple OSS Distributions * kqlock on its way to parking.
5028*a1e26a70SApple OSS Distributions */
5029*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(current_thread());
5030*a1e26a70SApple OSS Distributions
5031*a1e26a70SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
5032*a1e26a70SApple OSS Distributions /* At most one flag. */
5033*a1e26a70SApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING))
5034*a1e26a70SApple OSS Distributions != (UT_WORKQ_RUNNING | UT_WORKQ_DYING));
5035*a1e26a70SApple OSS Distributions
5036*a1e26a70SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5037*a1e26a70SApple OSS Distributions
5038*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
5039*a1e26a70SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_NEW);
5040*a1e26a70SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_NEW;
5041*a1e26a70SApple OSS Distributions
5042*a1e26a70SApple OSS Distributions struct workq_threadreq_s * kqr = uth->uu_kqr_bound;
5043*a1e26a70SApple OSS Distributions if (kqr->tr_work_interval) {
5044*a1e26a70SApple OSS Distributions kern_return_t kr;
5045*a1e26a70SApple OSS Distributions kr = kern_work_interval_explicit_join(get_machthread(uth),
5046*a1e26a70SApple OSS Distributions kqr->tr_work_interval);
5047*a1e26a70SApple OSS Distributions /*
5048*a1e26a70SApple OSS Distributions * The work interval functions requires to be called on the
5049*a1e26a70SApple OSS Distributions * current thread. If we fail here, we record the fact and
5050*a1e26a70SApple OSS Distributions * continue.
5051*a1e26a70SApple OSS Distributions * In the future, we can preflight checking that this join will
5052*a1e26a70SApple OSS Distributions * always be successful when the paird kqwl is configured; but,
5053*a1e26a70SApple OSS Distributions * for now, this should be a rare case (e.g. if you have passed
5054*a1e26a70SApple OSS Distributions * invalid arguments to the join).
5055*a1e26a70SApple OSS Distributions */
5056*a1e26a70SApple OSS Distributions if (kr == KERN_SUCCESS) {
5057*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_WORK_INTERVAL_JOINED;
5058*a1e26a70SApple OSS Distributions /* Thread and kqwl both have +1 ref on the work interval. */
5059*a1e26a70SApple OSS Distributions } else {
5060*a1e26a70SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_WORK_INTERVAL_FAILED;
5061*a1e26a70SApple OSS Distributions }
5062*a1e26a70SApple OSS Distributions }
5063*a1e26a70SApple OSS Distributions workq_thread_reset_cpupercent(kqr, uth);
5064*a1e26a70SApple OSS Distributions workq_bound_thread_setup_and_run(uth, WQ_SETUP_FIRST_USE);
5065*a1e26a70SApple OSS Distributions __builtin_unreachable();
5066*a1e26a70SApple OSS Distributions } else {
5067*a1e26a70SApple OSS Distributions /*
5068*a1e26a70SApple OSS Distributions * The permanently bound kqworkloop is getting destroyed so we
5069*a1e26a70SApple OSS Distributions * are woken up to cleanly unbind ourselves from it and terminate.
5070*a1e26a70SApple OSS Distributions * See KQ_WORKLOOP_DESTROY -> workq_kern_bound_thread_wakeup.
5071*a1e26a70SApple OSS Distributions *
5072*a1e26a70SApple OSS Distributions * The actual full unbind happens from
5073*a1e26a70SApple OSS Distributions * uthread_cleanup -> kqueue_threadreq_unbind.
5074*a1e26a70SApple OSS Distributions */
5075*a1e26a70SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
5076*a1e26a70SApple OSS Distributions }
5077*a1e26a70SApple OSS Distributions } else {
5078*a1e26a70SApple OSS Distributions /*
5079*a1e26a70SApple OSS Distributions * The process is getting terminated so we are woken up to die.
5080*a1e26a70SApple OSS Distributions * E.g. SIGKILL'd.
5081*a1e26a70SApple OSS Distributions */
5082*a1e26a70SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
5083*a1e26a70SApple OSS Distributions /*
5084*a1e26a70SApple OSS Distributions * It is possible we started running as the process is aborted
5085*a1e26a70SApple OSS Distributions * due to termination; but, workq_kern_threadreq_permanent_bind
5086*a1e26a70SApple OSS Distributions * has not had a chance to bind us to the kqwl yet.
5087*a1e26a70SApple OSS Distributions *
5088*a1e26a70SApple OSS Distributions * We synchronize with it using workq lock.
5089*a1e26a70SApple OSS Distributions */
5090*a1e26a70SApple OSS Distributions proc_t p = current_proc();
5091*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
5092*a1e26a70SApple OSS Distributions workq_lock_spin(wq);
5093*a1e26a70SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5094*a1e26a70SApple OSS Distributions workq_unlock(wq);
5095*a1e26a70SApple OSS Distributions
5096*a1e26a70SApple OSS Distributions /*
5097*a1e26a70SApple OSS Distributions * We do the bind commit ourselves if workq_kern_threadreq_permanent_bind
5098*a1e26a70SApple OSS Distributions * has not done it for us yet so our state is aligned with what the
5099*a1e26a70SApple OSS Distributions * termination path below expects.
5100*a1e26a70SApple OSS Distributions */
5101*a1e26a70SApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
5102*a1e26a70SApple OSS Distributions }
5103*a1e26a70SApple OSS Distributions workq_kern_bound_thread_terminate(uth->uu_kqr_bound);
5104*a1e26a70SApple OSS Distributions __builtin_unreachable();
5105*a1e26a70SApple OSS Distributions }
5106*a1e26a70SApple OSS Distributions
5107*a1e26a70SApple OSS Distributions /**
5108*a1e26a70SApple OSS Distributions * A parked bound thread wakes up. Not the first time.
5109*a1e26a70SApple OSS Distributions */
5110*a1e26a70SApple OSS Distributions __attribute__((noreturn, noinline))
5111*a1e26a70SApple OSS Distributions static void
workq_bound_thread_unpark_continue(void * parameter __unused,wait_result_t wr)5112*a1e26a70SApple OSS Distributions workq_bound_thread_unpark_continue(void *parameter __unused, wait_result_t wr)
5113*a1e26a70SApple OSS Distributions {
5114*a1e26a70SApple OSS Distributions struct uthread *uth = get_bsdthread_info(current_thread());
5115*a1e26a70SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5116*a1e26a70SApple OSS Distributions
5117*a1e26a70SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
5118*a1e26a70SApple OSS Distributions /* At most one flag. */
5119*a1e26a70SApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING))
5120*a1e26a70SApple OSS Distributions != (UT_WORKQ_RUNNING | UT_WORKQ_DYING));
5121*a1e26a70SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
5122*a1e26a70SApple OSS Distributions workq_bound_thread_setup_and_run(uth, WQ_SETUP_NONE);
5123*a1e26a70SApple OSS Distributions } else {
5124*a1e26a70SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
5125*a1e26a70SApple OSS Distributions }
5126*a1e26a70SApple OSS Distributions } else {
5127*a1e26a70SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
5128*a1e26a70SApple OSS Distributions }
5129*a1e26a70SApple OSS Distributions workq_kern_bound_thread_terminate(uth->uu_kqr_bound);
5130*a1e26a70SApple OSS Distributions __builtin_unreachable();
5131*a1e26a70SApple OSS Distributions }
5132*a1e26a70SApple OSS Distributions
5133*a1e26a70SApple OSS Distributions #pragma mark misc
5134*a1e26a70SApple OSS Distributions
5135*a1e26a70SApple OSS Distributions int
fill_procworkqueue(proc_t p,struct proc_workqueueinfo * pwqinfo)5136*a1e26a70SApple OSS Distributions fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
5137*a1e26a70SApple OSS Distributions {
5138*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
5139*a1e26a70SApple OSS Distributions int error = 0;
5140*a1e26a70SApple OSS Distributions int activecount;
5141*a1e26a70SApple OSS Distributions
5142*a1e26a70SApple OSS Distributions if (wq == NULL) {
5143*a1e26a70SApple OSS Distributions return EINVAL;
5144*a1e26a70SApple OSS Distributions }
5145*a1e26a70SApple OSS Distributions
5146*a1e26a70SApple OSS Distributions /*
5147*a1e26a70SApple OSS Distributions * This is sometimes called from interrupt context by the kperf sampler.
5148*a1e26a70SApple OSS Distributions * In that case, it's not safe to spin trying to take the lock since we
5149*a1e26a70SApple OSS Distributions * might already hold it. So, we just try-lock it and error out if it's
5150*a1e26a70SApple OSS Distributions * already held. Since this is just a debugging aid, and all our callers
5151*a1e26a70SApple OSS Distributions * are able to handle an error, that's fine.
5152*a1e26a70SApple OSS Distributions */
5153*a1e26a70SApple OSS Distributions bool locked = workq_lock_try(wq);
5154*a1e26a70SApple OSS Distributions if (!locked) {
5155*a1e26a70SApple OSS Distributions return EBUSY;
5156*a1e26a70SApple OSS Distributions }
5157*a1e26a70SApple OSS Distributions
5158*a1e26a70SApple OSS Distributions wq_thactive_t act = _wq_thactive(wq);
5159*a1e26a70SApple OSS Distributions activecount = _wq_thactive_aggregate_downto_qos(wq, act,
5160*a1e26a70SApple OSS Distributions WORKQ_THREAD_QOS_MIN, NULL, NULL);
5161*a1e26a70SApple OSS Distributions if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) {
5162*a1e26a70SApple OSS Distributions activecount++;
5163*a1e26a70SApple OSS Distributions }
5164*a1e26a70SApple OSS Distributions pwqinfo->pwq_nthreads = wq->wq_nthreads;
5165*a1e26a70SApple OSS Distributions pwqinfo->pwq_runthreads = activecount;
5166*a1e26a70SApple OSS Distributions pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
5167*a1e26a70SApple OSS Distributions pwqinfo->pwq_state = 0;
5168*a1e26a70SApple OSS Distributions
5169*a1e26a70SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
5170*a1e26a70SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
5171*a1e26a70SApple OSS Distributions }
5172*a1e26a70SApple OSS Distributions
5173*a1e26a70SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
5174*a1e26a70SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
5175*a1e26a70SApple OSS Distributions }
5176*a1e26a70SApple OSS Distributions
5177*a1e26a70SApple OSS Distributions uint64_t total_cooperative_threads;
5178*a1e26a70SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
5179*a1e26a70SApple OSS Distributions if ((total_cooperative_threads == wq_cooperative_queue_max_size(wq)) &&
5180*a1e26a70SApple OSS Distributions workq_has_cooperative_thread_requests(wq)) {
5181*a1e26a70SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT;
5182*a1e26a70SApple OSS Distributions }
5183*a1e26a70SApple OSS Distributions
5184*a1e26a70SApple OSS Distributions if (wq->wq_exceeded_active_constrained_thread_limit) {
5185*a1e26a70SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT;
5186*a1e26a70SApple OSS Distributions }
5187*a1e26a70SApple OSS Distributions
5188*a1e26a70SApple OSS Distributions workq_unlock(wq);
5189*a1e26a70SApple OSS Distributions return error;
5190*a1e26a70SApple OSS Distributions }
5191*a1e26a70SApple OSS Distributions
5192*a1e26a70SApple OSS Distributions boolean_t
workqueue_get_pwq_exceeded(void * v,boolean_t * exceeded_total,boolean_t * exceeded_constrained)5193*a1e26a70SApple OSS Distributions workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
5194*a1e26a70SApple OSS Distributions boolean_t *exceeded_constrained)
5195*a1e26a70SApple OSS Distributions {
5196*a1e26a70SApple OSS Distributions proc_t p = v;
5197*a1e26a70SApple OSS Distributions struct proc_workqueueinfo pwqinfo;
5198*a1e26a70SApple OSS Distributions int err;
5199*a1e26a70SApple OSS Distributions
5200*a1e26a70SApple OSS Distributions assert(p != NULL);
5201*a1e26a70SApple OSS Distributions assert(exceeded_total != NULL);
5202*a1e26a70SApple OSS Distributions assert(exceeded_constrained != NULL);
5203*a1e26a70SApple OSS Distributions
5204*a1e26a70SApple OSS Distributions err = fill_procworkqueue(p, &pwqinfo);
5205*a1e26a70SApple OSS Distributions if (err) {
5206*a1e26a70SApple OSS Distributions return FALSE;
5207*a1e26a70SApple OSS Distributions }
5208*a1e26a70SApple OSS Distributions if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
5209*a1e26a70SApple OSS Distributions return FALSE;
5210*a1e26a70SApple OSS Distributions }
5211*a1e26a70SApple OSS Distributions
5212*a1e26a70SApple OSS Distributions *exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
5213*a1e26a70SApple OSS Distributions *exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
5214*a1e26a70SApple OSS Distributions
5215*a1e26a70SApple OSS Distributions return TRUE;
5216*a1e26a70SApple OSS Distributions }
5217*a1e26a70SApple OSS Distributions
5218*a1e26a70SApple OSS Distributions uint64_t
workqueue_get_task_ss_flags_from_pwq_state_kdp(void * v)5219*a1e26a70SApple OSS Distributions workqueue_get_task_ss_flags_from_pwq_state_kdp(void * v)
5220*a1e26a70SApple OSS Distributions {
5221*a1e26a70SApple OSS Distributions static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) ==
5222*a1e26a70SApple OSS Distributions kTaskWqExceededConstrainedThreadLimit);
5223*a1e26a70SApple OSS Distributions static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) ==
5224*a1e26a70SApple OSS Distributions kTaskWqExceededTotalThreadLimit);
5225*a1e26a70SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
5226*a1e26a70SApple OSS Distributions static_assert(((uint64_t)WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT << 34) ==
5227*a1e26a70SApple OSS Distributions (uint64_t)kTaskWqExceededCooperativeThreadLimit);
5228*a1e26a70SApple OSS Distributions static_assert(((uint64_t)WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT << 34) ==
5229*a1e26a70SApple OSS Distributions (uint64_t)kTaskWqExceededActiveConstrainedThreadLimit);
5230*a1e26a70SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT |
5231*a1e26a70SApple OSS Distributions WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT |
5232*a1e26a70SApple OSS Distributions WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT |
5233*a1e26a70SApple OSS Distributions WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT) == 0x1F);
5234*a1e26a70SApple OSS Distributions
5235*a1e26a70SApple OSS Distributions if (v == NULL) {
5236*a1e26a70SApple OSS Distributions return 0;
5237*a1e26a70SApple OSS Distributions }
5238*a1e26a70SApple OSS Distributions
5239*a1e26a70SApple OSS Distributions proc_t p = v;
5240*a1e26a70SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
5241*a1e26a70SApple OSS Distributions
5242*a1e26a70SApple OSS Distributions if (wq == NULL || workq_lock_is_acquired_kdp(wq)) {
5243*a1e26a70SApple OSS Distributions return 0;
5244*a1e26a70SApple OSS Distributions }
5245*a1e26a70SApple OSS Distributions
5246*a1e26a70SApple OSS Distributions uint64_t ss_flags = kTaskWqFlagsAvailable;
5247*a1e26a70SApple OSS Distributions
5248*a1e26a70SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
5249*a1e26a70SApple OSS Distributions ss_flags |= kTaskWqExceededConstrainedThreadLimit;
5250*a1e26a70SApple OSS Distributions }
5251*a1e26a70SApple OSS Distributions
5252*a1e26a70SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
5253*a1e26a70SApple OSS Distributions ss_flags |= kTaskWqExceededTotalThreadLimit;
5254*a1e26a70SApple OSS Distributions }
5255*a1e26a70SApple OSS Distributions
5256*a1e26a70SApple OSS Distributions uint64_t total_cooperative_threads;
5257*a1e26a70SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_to_qos_internal(wq,
5258*a1e26a70SApple OSS Distributions WORKQ_THREAD_QOS_MIN);
5259*a1e26a70SApple OSS Distributions if ((total_cooperative_threads == wq_cooperative_queue_max_size(wq)) &&
5260*a1e26a70SApple OSS Distributions workq_has_cooperative_thread_requests(wq)) {
5261*a1e26a70SApple OSS Distributions ss_flags |= kTaskWqExceededCooperativeThreadLimit;
5262*a1e26a70SApple OSS Distributions }
5263*a1e26a70SApple OSS Distributions
5264*a1e26a70SApple OSS Distributions if (wq->wq_exceeded_active_constrained_thread_limit) {
5265*a1e26a70SApple OSS Distributions ss_flags |= kTaskWqExceededActiveConstrainedThreadLimit;
5266*a1e26a70SApple OSS Distributions }
5267*a1e26a70SApple OSS Distributions
5268*a1e26a70SApple OSS Distributions return ss_flags;
5269*a1e26a70SApple OSS Distributions }
5270*a1e26a70SApple OSS Distributions
5271*a1e26a70SApple OSS Distributions void
workq_init(void)5272*a1e26a70SApple OSS Distributions workq_init(void)
5273*a1e26a70SApple OSS Distributions {
5274*a1e26a70SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
5275*a1e26a70SApple OSS Distributions NSEC_PER_USEC, &wq_stalled_window.abstime);
5276*a1e26a70SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,
5277*a1e26a70SApple OSS Distributions NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
5278*a1e26a70SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs,
5279*a1e26a70SApple OSS Distributions NSEC_PER_USEC, &wq_max_timer_interval.abstime);
5280*a1e26a70SApple OSS Distributions
5281*a1e26a70SApple OSS Distributions thread_deallocate_daemon_register_queue(&workq_deallocate_queue,
5282*a1e26a70SApple OSS Distributions workq_deallocate_queue_invoke);
5283*a1e26a70SApple OSS Distributions }
5284