1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/limits.h>
79 #include <machine/atomic.h>
80
81 #include <machine/commpage.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/cpu_number.h>
87 #include <kern/cpu_data.h>
88 #include <kern/smp.h>
89 #include <kern/debug.h>
90 #include <kern/macro_help.h>
91 #include <kern/machine.h>
92 #include <kern/misc_protos.h>
93 #if MONOTONIC
94 #include <kern/monotonic.h>
95 #endif /* MONOTONIC */
96 #include <kern/processor.h>
97 #include <kern/queue.h>
98 #include <kern/recount.h>
99 #include <kern/restartable.h>
100 #include <kern/sched.h>
101 #include <kern/sched_prim.h>
102 #include <kern/sfi.h>
103 #include <kern/syscall_subr.h>
104 #include <kern/task.h>
105 #include <kern/thread.h>
106 #include <kern/thread_group.h>
107 #include <kern/ledger.h>
108 #include <kern/timer_queue.h>
109 #include <kern/waitq.h>
110 #include <kern/policy_internal.h>
111 #include <kern/cpu_quiesce.h>
112
113 #include <vm/pmap.h>
114 #include <vm/vm_kern.h>
115 #include <vm/vm_map.h>
116 #include <vm/vm_pageout.h>
117
118 #include <mach/sdt.h>
119 #include <mach/mach_host.h>
120 #include <mach/host_info.h>
121
122 #include <sys/kdebug.h>
123 #include <kperf/kperf.h>
124 #include <kern/kpc.h>
125 #include <san/kasan.h>
126 #include <kern/pms.h>
127 #include <kern/host.h>
128 #include <stdatomic.h>
129 #include <os/atomic_private.h>
130
131 #ifdef KDBG_MACOS_RELEASE
132 #define KTRC KDBG_MACOS_RELEASE
133 #else
134 #define KTRC KDBG_RELEASE
135 #endif
136
137 struct sched_statistics PERCPU_DATA(sched_stats);
138 bool sched_stats_active;
139
140 static uint64_t
deadline_add(uint64_t d,uint64_t e)141 deadline_add(uint64_t d, uint64_t e)
142 {
143 uint64_t sum;
144 return os_add_overflow(d, e, &sum) ? UINT64_MAX : sum;
145 }
146
147 int
rt_runq_count(processor_set_t pset)148 rt_runq_count(processor_set_t pset)
149 {
150 return os_atomic_load(&SCHED(rt_runq)(pset)->count, relaxed);
151 }
152
153 uint64_t
rt_runq_earliest_deadline(processor_set_t pset)154 rt_runq_earliest_deadline(processor_set_t pset)
155 {
156 return os_atomic_load_wide(&SCHED(rt_runq)(pset)->earliest_deadline, relaxed);
157 }
158
159 static int
rt_runq_priority(processor_set_t pset)160 rt_runq_priority(processor_set_t pset)
161 {
162 pset_assert_locked(pset);
163 rt_queue_t rt_run_queue = SCHED(rt_runq)(pset);
164
165 bitmap_t *map = rt_run_queue->bitmap;
166 int i = bitmap_first(map, NRTQS);
167 assert(i < NRTQS);
168
169 if (i >= 0) {
170 return i + BASEPRI_RTQUEUES;
171 }
172
173 return i;
174 }
175
176 static thread_t rt_runq_first(rt_queue_t rt_runq);
177
178 #if DEBUG
179 static void
check_rt_runq_consistency(rt_queue_t rt_run_queue,thread_t thread)180 check_rt_runq_consistency(rt_queue_t rt_run_queue, thread_t thread)
181 {
182 bitmap_t *map = rt_run_queue->bitmap;
183
184 uint64_t earliest_deadline = RT_DEADLINE_NONE;
185 uint32_t constraint = RT_CONSTRAINT_NONE;
186 int ed_index = NOPRI;
187 int count = 0;
188 bool found_thread = false;
189
190 for (int pri = BASEPRI_RTQUEUES; pri <= MAXPRI; pri++) {
191 int i = pri - BASEPRI_RTQUEUES;
192 rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
193 queue_t queue = &rt_runq->pri_queue;
194 queue_entry_t iter;
195 int n = 0;
196 uint64_t previous_deadline = 0;
197 qe_foreach(iter, queue) {
198 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
199 assert_thread_magic(iter_thread);
200 if (iter_thread == thread) {
201 found_thread = true;
202 }
203 assert(iter_thread->sched_pri == (i + BASEPRI_RTQUEUES));
204 assert(iter_thread->realtime.deadline < RT_DEADLINE_NONE);
205 assert(iter_thread->realtime.constraint < RT_CONSTRAINT_NONE);
206 assert(previous_deadline <= iter_thread->realtime.deadline);
207 n++;
208 if (iter == queue_first(queue)) {
209 assert(rt_runq->pri_earliest_deadline == iter_thread->realtime.deadline);
210 assert(rt_runq->pri_constraint == iter_thread->realtime.constraint);
211 }
212 previous_deadline = iter_thread->realtime.deadline;
213 }
214 assert(n == rt_runq->pri_count);
215 if (n == 0) {
216 assert(bitmap_test(map, i) == false);
217 assert(rt_runq->pri_earliest_deadline == RT_DEADLINE_NONE);
218 assert(rt_runq->pri_constraint == RT_CONSTRAINT_NONE);
219 } else {
220 assert(bitmap_test(map, i) == true);
221 }
222 if (rt_runq->pri_earliest_deadline < earliest_deadline) {
223 earliest_deadline = rt_runq->pri_earliest_deadline;
224 constraint = rt_runq->pri_constraint;
225 ed_index = i;
226 }
227 count += n;
228 }
229 assert(os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed) == earliest_deadline);
230 assert(os_atomic_load(&rt_run_queue->count, relaxed) == count);
231 assert(os_atomic_load(&rt_run_queue->constraint, relaxed) == constraint);
232 assert(os_atomic_load(&rt_run_queue->ed_index, relaxed) == ed_index);
233 if (thread) {
234 assert(found_thread);
235 }
236 }
237 #define CHECK_RT_RUNQ_CONSISTENCY(q, th) check_rt_runq_consistency(q, th)
238 #else
239 #define CHECK_RT_RUNQ_CONSISTENCY(q, th) do {} while (0)
240 #endif
241
242 uint32_t rt_constraint_threshold;
243
244 static bool
rt_runq_is_low_latency(processor_set_t pset)245 rt_runq_is_low_latency(processor_set_t pset)
246 {
247 return os_atomic_load(&SCHED(rt_runq)(pset)->constraint, relaxed) <= rt_constraint_threshold;
248 }
249
250 TUNABLE(bool, cpulimit_affects_quantum, "cpulimit_affects_quantum", true);
251
252 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
253 TUNABLE(int, default_preemption_rate, "preempt", DEFAULT_PREEMPTION_RATE);
254
255 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
256 TUNABLE(int, default_bg_preemption_rate, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE);
257
258 #define MAX_UNSAFE_RT_QUANTA 100
259 #define SAFE_RT_MULTIPLIER 2
260
261 #define MAX_UNSAFE_FIXED_QUANTA 100
262 #define SAFE_FIXED_MULTIPLIER 2
263
264 TUNABLE_DEV_WRITEABLE(int, max_unsafe_rt_quanta, "max_unsafe_rt_quanta", MAX_UNSAFE_RT_QUANTA);
265 TUNABLE_DEV_WRITEABLE(int, max_unsafe_fixed_quanta, "max_unsafe_fixed_quanta", MAX_UNSAFE_FIXED_QUANTA);
266
267 TUNABLE_DEV_WRITEABLE(int, safe_rt_multiplier, "safe_rt_multiplier", SAFE_RT_MULTIPLIER);
268 TUNABLE_DEV_WRITEABLE(int, safe_fixed_multiplier, "safe_fixed_multiplier", SAFE_RT_MULTIPLIER);
269
270 #define MAX_POLL_QUANTA 2
271 TUNABLE(int, max_poll_quanta, "poll", MAX_POLL_QUANTA);
272
273 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
274 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
275
276 uint64_t max_poll_computation;
277
278 uint64_t max_unsafe_rt_computation;
279 uint64_t max_unsafe_fixed_computation;
280 uint64_t sched_safe_rt_duration;
281 uint64_t sched_safe_fixed_duration;
282
283 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
284
285 uint32_t std_quantum;
286 uint32_t min_std_quantum;
287 uint32_t bg_quantum;
288
289 uint32_t std_quantum_us;
290 uint32_t bg_quantum_us;
291
292 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
293
294 uint32_t thread_depress_time;
295 uint32_t default_timeshare_computation;
296 uint32_t default_timeshare_constraint;
297
298 uint32_t max_rt_quantum;
299 uint32_t min_rt_quantum;
300
301 uint32_t rt_deadline_epsilon;
302
303 uint32_t rt_constraint_threshold;
304
305 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
306
307 unsigned sched_tick;
308 uint32_t sched_tick_interval;
309
310 /* Timeshare load calculation interval (15ms) */
311 uint32_t sched_load_compute_interval_us = 15000;
312 uint64_t sched_load_compute_interval_abs;
313 static _Atomic uint64_t sched_load_compute_deadline;
314
315 uint32_t sched_pri_shifts[TH_BUCKET_MAX];
316 uint32_t sched_fixed_shift;
317
318 uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
319
320 /* Allow foreground to decay past default to resolve inversions */
321 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
322 int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
323
324 /* Defaults for timer deadline profiling */
325 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
326 * 2ms */
327 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
328 * <= 5ms */
329
330 uint64_t timer_deadline_tracking_bin_1;
331 uint64_t timer_deadline_tracking_bin_2;
332
333 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
334
335 thread_t sched_maintenance_thread;
336
337 /* interrupts disabled lock to guard recommended cores state */
338 decl_simple_lock_data(, sched_available_cores_lock);
339 uint64_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
340 uint64_t perfcontrol_system_requested_recommended_cores = ALL_CORES_RECOMMENDED;
341 uint64_t perfcontrol_user_requested_recommended_cores = ALL_CORES_RECOMMENDED;
342 static uint64_t usercontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
343 static uint64_t sched_online_processors = 0;
344 static void sched_update_recommended_cores(uint64_t recommended_cores, processor_reason_t reason, uint32_t flags);
345 static void sched_update_powered_cores(uint64_t reqested_powered_cores, processor_reason_t reason, uint32_t flags);
346
347 #if __arm64__
348 static void sched_recommended_cores_maintenance(void);
349 uint64_t perfcontrol_failsafe_starvation_threshold;
350 extern char *proc_name_address(struct proc *p);
351 #endif /* __arm64__ */
352
353 uint64_t sched_one_second_interval;
354 boolean_t allow_direct_handoff = TRUE;
355
356 /* Forwards */
357
358 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
359
360 static void load_shift_init(void);
361 static void preempt_pri_init(void);
362
363 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
364
365 thread_t processor_idle(
366 thread_t thread,
367 processor_t processor);
368
369 static ast_t
370 csw_check_locked(
371 thread_t thread,
372 processor_t processor,
373 processor_set_t pset,
374 ast_t check_reason);
375
376 static void processor_setrun(
377 processor_t processor,
378 thread_t thread,
379 integer_t options);
380
381 static void
382 sched_realtime_timebase_init(void);
383
384 static void
385 sched_timer_deadline_tracking_init(void);
386
387 #if DEBUG
388 extern int debug_task;
389 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
390 #else
391 #define TLOG(a, fmt, args...) do {} while (0)
392 #endif
393
394 static processor_t
395 thread_bind_internal(
396 thread_t thread,
397 processor_t processor);
398
399 static void
400 sched_vm_group_maintenance(void);
401
402 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
403 int8_t sched_load_shifts[NRQS];
404 bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS_MAX)];
405 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
406
407 /*
408 * Statically allocate a buffer to hold the longest possible
409 * scheduler description string, as currently implemented.
410 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
411 * to export to userspace via sysctl(3). If either version
412 * changes, update the other.
413 *
414 * Note that in addition to being an upper bound on the strings
415 * in the kernel, it's also an exact parameter to PE_get_default(),
416 * which interrogates the device tree on some platforms. That
417 * API requires the caller know the exact size of the device tree
418 * property, so we need both a legacy size (32) and the current size
419 * (48) to deal with old and new device trees. The device tree property
420 * is similarly padded to a fixed size so that the same kernel image
421 * can run on multiple devices with different schedulers configured
422 * in the device tree.
423 */
424 char sched_string[SCHED_STRING_MAX_LENGTH];
425
426 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
427
428 /* Global flag which indicates whether Background Stepper Context is enabled */
429 static int cpu_throttle_enabled = 1;
430
431 #if DEVELOPMENT || DEBUG
432 int enable_task_set_cluster_type = 0;
433 bool system_ecore_only = false;
434 #endif /* DEVELOPMENT || DEBUG */
435
436 void
sched_init(void)437 sched_init(void)
438 {
439 boolean_t direct_handoff = FALSE;
440 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
441
442 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
443 /* No boot-args, check in device tree */
444 if (!PE_get_default("kern.sched_pri_decay_limit",
445 &sched_pri_decay_band_limit,
446 sizeof(sched_pri_decay_band_limit))) {
447 /* Allow decay all the way to normal limits */
448 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
449 }
450 }
451
452 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
453
454 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
455 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
456 }
457 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
458
459 cpu_quiescent_counter_init();
460
461 SCHED(init)();
462 SCHED(rt_init)(&pset0);
463 sched_timer_deadline_tracking_init();
464
465 SCHED(pset_init)(&pset0);
466 SCHED(processor_init)(master_processor);
467
468 if (PE_parse_boot_argn("direct_handoff", &direct_handoff, sizeof(direct_handoff))) {
469 allow_direct_handoff = direct_handoff;
470 }
471
472 #if DEVELOPMENT || DEBUG
473 if (PE_parse_boot_argn("enable_skstsct", &enable_task_set_cluster_type, sizeof(enable_task_set_cluster_type))) {
474 system_ecore_only = (enable_task_set_cluster_type == 2);
475 }
476 #endif /* DEVELOPMENT || DEBUG */
477
478 simple_lock_init(&sched_available_cores_lock, 0);
479 }
480
481 void
sched_timebase_init(void)482 sched_timebase_init(void)
483 {
484 uint64_t abstime;
485
486 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
487 sched_one_second_interval = abstime;
488
489 SCHED(timebase_init)();
490 sched_realtime_timebase_init();
491 }
492
493 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
494
495 void
sched_timeshare_init(void)496 sched_timeshare_init(void)
497 {
498 /*
499 * Calculate the timeslicing quantum
500 * in us.
501 */
502 if (default_preemption_rate < 1) {
503 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
504 }
505 std_quantum_us = (1000 * 1000) / default_preemption_rate;
506
507 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
508
509 if (default_bg_preemption_rate < 1) {
510 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
511 }
512 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
513
514 printf("standard background quantum is %d us\n", bg_quantum_us);
515
516 load_shift_init();
517 preempt_pri_init();
518 sched_tick = 0;
519 }
520
521 void
sched_set_max_unsafe_rt_quanta(int max)522 sched_set_max_unsafe_rt_quanta(int max)
523 {
524 const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
525
526 max_unsafe_rt_computation = ((uint64_t)max) * quantum_size;
527
528 const int mult = safe_rt_multiplier <= 0 ? 2 : safe_rt_multiplier;
529 sched_safe_rt_duration = mult * ((uint64_t)max) * quantum_size;
530
531
532 #if DEVELOPMENT || DEBUG
533 max_unsafe_rt_quanta = max;
534 #else
535 /*
536 * On RELEASE kernels, this is only called on boot where
537 * max is already equal to max_unsafe_rt_quanta.
538 */
539 assert3s(max, ==, max_unsafe_rt_quanta);
540 #endif
541 }
542
543 void
sched_set_max_unsafe_fixed_quanta(int max)544 sched_set_max_unsafe_fixed_quanta(int max)
545 {
546 const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
547
548 max_unsafe_fixed_computation = ((uint64_t)max) * quantum_size;
549
550 const int mult = safe_fixed_multiplier <= 0 ? 2 : safe_fixed_multiplier;
551 sched_safe_fixed_duration = mult * ((uint64_t)max) * quantum_size;
552
553 #if DEVELOPMENT || DEBUG
554 max_unsafe_fixed_quanta = max;
555 #else
556 /*
557 * On RELEASE kernels, this is only called on boot where
558 * max is already equal to max_unsafe_fixed_quanta.
559 */
560 assert3s(max, ==, max_unsafe_fixed_quanta);
561 #endif
562 }
563
564 void
sched_timeshare_timebase_init(void)565 sched_timeshare_timebase_init(void)
566 {
567 uint64_t abstime;
568 uint32_t shift;
569
570 /* standard timeslicing quantum */
571 clock_interval_to_absolutetime_interval(
572 std_quantum_us, NSEC_PER_USEC, &abstime);
573 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
574 std_quantum = (uint32_t)abstime;
575
576 /* smallest remaining quantum (250 us) */
577 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
578 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
579 min_std_quantum = (uint32_t)abstime;
580
581 /* quantum for background tasks */
582 clock_interval_to_absolutetime_interval(
583 bg_quantum_us, NSEC_PER_USEC, &abstime);
584 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
585 bg_quantum = (uint32_t)abstime;
586
587 /* scheduler tick interval */
588 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
589 NSEC_PER_USEC, &abstime);
590 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
591 sched_tick_interval = (uint32_t)abstime;
592
593 /* timeshare load calculation interval & deadline initialization */
594 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
595 os_atomic_init(&sched_load_compute_deadline, sched_load_compute_interval_abs);
596
597 /*
598 * Compute conversion factor from usage to
599 * timesharing priorities with 5/8 ** n aging.
600 */
601 abstime = (abstime * 5) / 3;
602 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
603 abstime >>= 1;
604 }
605 sched_fixed_shift = shift;
606
607 for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
608 sched_pri_shifts[i] = INT8_MAX;
609 }
610
611 sched_set_max_unsafe_rt_quanta(max_unsafe_rt_quanta);
612 sched_set_max_unsafe_fixed_quanta(max_unsafe_fixed_quanta);
613
614 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
615 thread_depress_time = 1 * std_quantum;
616 default_timeshare_computation = std_quantum / 2;
617 default_timeshare_constraint = std_quantum;
618
619 #if __arm64__
620 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
621 #endif /* __arm64__ */
622 }
623
624 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
625
626 void
pset_rt_init(processor_set_t pset)627 pset_rt_init(processor_set_t pset)
628 {
629 for (int pri = BASEPRI_RTQUEUES; pri <= MAXPRI; pri++) {
630 int i = pri - BASEPRI_RTQUEUES;
631 rt_queue_pri_t *rqi = &pset->rt_runq.rt_queue_pri[i];
632 queue_init(&rqi->pri_queue);
633 rqi->pri_count = 0;
634 rqi->pri_earliest_deadline = RT_DEADLINE_NONE;
635 rqi->pri_constraint = RT_CONSTRAINT_NONE;
636 }
637 os_atomic_init(&pset->rt_runq.count, 0);
638 os_atomic_init(&pset->rt_runq.earliest_deadline, RT_DEADLINE_NONE);
639 os_atomic_init(&pset->rt_runq.constraint, RT_CONSTRAINT_NONE);
640 os_atomic_init(&pset->rt_runq.ed_index, NOPRI);
641 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
642 }
643
644 /* epsilon for comparing RT deadlines */
645 int rt_deadline_epsilon_us = 100;
646
647 int
sched_get_rt_deadline_epsilon(void)648 sched_get_rt_deadline_epsilon(void)
649 {
650 return rt_deadline_epsilon_us;
651 }
652
653 void
sched_set_rt_deadline_epsilon(int new_epsilon_us)654 sched_set_rt_deadline_epsilon(int new_epsilon_us)
655 {
656 rt_deadline_epsilon_us = new_epsilon_us;
657
658 uint64_t abstime;
659 clock_interval_to_absolutetime_interval(rt_deadline_epsilon_us, NSEC_PER_USEC, &abstime);
660 assert((abstime >> 32) == 0 && ((rt_deadline_epsilon_us == 0) || (uint32_t)abstime != 0));
661 rt_deadline_epsilon = (uint32_t)abstime;
662 }
663
664 static void
sched_realtime_timebase_init(void)665 sched_realtime_timebase_init(void)
666 {
667 uint64_t abstime;
668
669 /* smallest rt computation (50 us) */
670 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
671 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
672 min_rt_quantum = (uint32_t)abstime;
673
674 /* maximum rt computation (50 ms) */
675 clock_interval_to_absolutetime_interval(
676 50, 1000 * NSEC_PER_USEC, &abstime);
677 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
678 max_rt_quantum = (uint32_t)abstime;
679
680 /* constraint threshold for sending backup IPIs (4 ms) */
681 clock_interval_to_absolutetime_interval(4, NSEC_PER_MSEC, &abstime);
682 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
683 rt_constraint_threshold = (uint32_t)abstime;
684
685 /* epsilon for comparing deadlines */
686 sched_set_rt_deadline_epsilon(rt_deadline_epsilon_us);
687 }
688
689 void
sched_check_spill(processor_set_t pset,thread_t thread)690 sched_check_spill(processor_set_t pset, thread_t thread)
691 {
692 (void)pset;
693 (void)thread;
694
695 return;
696 }
697
698 bool
sched_thread_should_yield(processor_t processor,thread_t thread)699 sched_thread_should_yield(processor_t processor, thread_t thread)
700 {
701 (void)thread;
702
703 return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
704 }
705
706 /* Default implementations of .steal_thread_enabled */
707 bool
sched_steal_thread_DISABLED(processor_set_t pset)708 sched_steal_thread_DISABLED(processor_set_t pset)
709 {
710 (void)pset;
711 return false;
712 }
713
714 bool
sched_steal_thread_enabled(processor_set_t pset)715 sched_steal_thread_enabled(processor_set_t pset)
716 {
717 return bit_count(pset->node->pset_map) > 1;
718 }
719
720 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
721
722 /*
723 * Set up values for timeshare
724 * loading factors.
725 */
726 static void
load_shift_init(void)727 load_shift_init(void)
728 {
729 int8_t k, *p = sched_load_shifts;
730 uint32_t i, j;
731
732 uint32_t sched_decay_penalty = 1;
733
734 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
735 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
736 }
737
738 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
739 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
740 }
741
742 if (sched_decay_penalty == 0) {
743 /*
744 * There is no penalty for timeshare threads for using too much
745 * CPU, so set all load shifts to INT8_MIN. Even under high load,
746 * sched_pri_shift will be >INT8_MAX, and there will be no
747 * penalty applied to threads (nor will sched_usage be updated per
748 * thread).
749 */
750 for (i = 0; i < NRQS; i++) {
751 sched_load_shifts[i] = INT8_MIN;
752 }
753
754 return;
755 }
756
757 *p++ = INT8_MIN; *p++ = 0;
758
759 /*
760 * For a given system load "i", the per-thread priority
761 * penalty per quantum of CPU usage is ~2^k priority
762 * levels. "sched_decay_penalty" can cause more
763 * array entries to be filled with smaller "k" values
764 */
765 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
766 for (j <<= 1; (i < j) && (i < NRQS); ++i) {
767 *p++ = k;
768 }
769 }
770 }
771
772 static void
preempt_pri_init(void)773 preempt_pri_init(void)
774 {
775 bitmap_t *p = sched_preempt_pri;
776
777 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
778 bitmap_set(p, i);
779 }
780
781 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
782 bitmap_set(p, i);
783 }
784 }
785
786 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
787
788 void
check_monotonic_time(uint64_t ctime)789 check_monotonic_time(uint64_t ctime)
790 {
791 processor_t processor = current_processor();
792 uint64_t last_dispatch = processor->last_dispatch;
793
794 if (last_dispatch > ctime) {
795 panic("Non-monotonic time: last_dispatch at 0x%llx, ctime 0x%llx",
796 last_dispatch, ctime);
797 }
798 }
799
800
801 /*
802 * Thread wait timer expiration.
803 * Runs in timer interrupt context with interrupts disabled.
804 */
805 void
thread_timer_expire(void * p0,__unused void * p1)806 thread_timer_expire(void *p0, __unused void *p1)
807 {
808 thread_t thread = (thread_t)p0;
809
810 assert_thread_magic(thread);
811
812 assert(ml_get_interrupts_enabled() == FALSE);
813
814 thread_lock(thread);
815
816 if (thread->wait_timer_armed) {
817 thread->wait_timer_armed = false;
818 clear_wait_internal(thread, THREAD_TIMED_OUT);
819 /* clear_wait_internal may have dropped and retaken the thread lock */
820 }
821
822 thread->wait_timer_active--;
823
824 thread_unlock(thread);
825 }
826
827 /*
828 * thread_unblock:
829 *
830 * Unblock thread on wake up.
831 *
832 * Returns TRUE if the thread should now be placed on the runqueue.
833 *
834 * Thread must be locked.
835 *
836 * Called at splsched().
837 */
838 boolean_t
thread_unblock(thread_t thread,wait_result_t wresult)839 thread_unblock(
840 thread_t thread,
841 wait_result_t wresult)
842 {
843 boolean_t ready_for_runq = FALSE;
844 thread_t cthread = current_thread();
845 uint32_t new_run_count;
846 int old_thread_state;
847
848 /*
849 * Set wait_result.
850 */
851 thread->wait_result = wresult;
852
853 /*
854 * Cancel pending wait timer.
855 */
856 if (thread->wait_timer_armed) {
857 if (timer_call_cancel(thread->wait_timer)) {
858 thread->wait_timer_active--;
859 }
860 thread->wait_timer_armed = false;
861 }
862
863 boolean_t aticontext, pidle;
864 ml_get_power_state(&aticontext, &pidle);
865
866 /*
867 * Update scheduling state: not waiting,
868 * set running.
869 */
870 old_thread_state = thread->state;
871 thread->state = (old_thread_state | TH_RUN) &
872 ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT | TH_WAKING);
873
874 if ((old_thread_state & TH_RUN) == 0) {
875 uint64_t ctime = mach_approximate_time();
876
877 check_monotonic_time(ctime);
878
879 thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
880 timer_start(&thread->runnable_timer, ctime);
881
882 ready_for_runq = TRUE;
883
884 if (old_thread_state & TH_WAIT_REPORT) {
885 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
886 }
887
888 /* Update the runnable thread count */
889 new_run_count = SCHED(run_count_incr)(thread);
890
891 #if CONFIG_SCHED_AUTO_JOIN
892 if (aticontext == FALSE && work_interval_should_propagate(cthread, thread)) {
893 work_interval_auto_join_propagate(cthread, thread);
894 }
895 #endif /*CONFIG_SCHED_AUTO_JOIN */
896 } else {
897 /*
898 * Either the thread is idling in place on another processor,
899 * or it hasn't finished context switching yet.
900 */
901 assert((thread->state & TH_IDLE) == 0);
902 /*
903 * The run count is only dropped after the context switch completes
904 * and the thread is still waiting, so we should not run_incr here
905 */
906 new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
907 }
908
909 /*
910 * Calculate deadline for real-time threads.
911 */
912 if (thread->sched_mode == TH_MODE_REALTIME) {
913 uint64_t ctime = mach_absolute_time();
914 thread->realtime.deadline = thread->realtime.constraint + ctime;
915 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SET_RT_DEADLINE) | DBG_FUNC_NONE,
916 (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
917 }
918
919 /*
920 * Clear old quantum, fail-safe computation, etc.
921 */
922 thread->quantum_remaining = 0;
923 thread->computation_metered = 0;
924 thread->reason = AST_NONE;
925 thread->block_hint = kThreadWaitNone;
926
927 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
928 * We also account for "double hop" thread signaling via
929 * the thread callout infrastructure.
930 * DRK: consider removing the callout wakeup counters in the future
931 * they're present for verification at the moment.
932 */
933
934 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
935 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, current_proc());
936
937 uint64_t ttd = current_processor()->timer_call_ttd;
938
939 if (ttd) {
940 if (ttd <= timer_deadline_tracking_bin_1) {
941 thread->thread_timer_wakeups_bin_1++;
942 } else if (ttd <= timer_deadline_tracking_bin_2) {
943 thread->thread_timer_wakeups_bin_2++;
944 }
945 }
946
947 ledger_credit_thread(thread, thread->t_ledger,
948 task_ledgers.interrupt_wakeups, 1);
949 if (pidle) {
950 ledger_credit_thread(thread, thread->t_ledger,
951 task_ledgers.platform_idle_wakeups, 1);
952 }
953 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
954 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
955 if (cthread->callout_woken_from_icontext) {
956 ledger_credit_thread(thread, thread->t_ledger,
957 task_ledgers.interrupt_wakeups, 1);
958 thread->thread_callout_interrupt_wakeups++;
959
960 if (cthread->callout_woken_from_platform_idle) {
961 ledger_credit_thread(thread, thread->t_ledger,
962 task_ledgers.platform_idle_wakeups, 1);
963 thread->thread_callout_platform_idle_wakeups++;
964 }
965
966 cthread->callout_woke_thread = TRUE;
967 }
968 }
969
970 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
971 thread->callout_woken_from_icontext = !!aticontext;
972 thread->callout_woken_from_platform_idle = !!pidle;
973 thread->callout_woke_thread = FALSE;
974 }
975
976 #if KPERF
977 if (ready_for_runq) {
978 kperf_make_runnable(thread, aticontext);
979 }
980 #endif /* KPERF */
981
982 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
983 MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
984 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
985 sched_run_buckets[TH_BUCKET_RUN], 0);
986
987 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, current_proc());
988
989 return ready_for_runq;
990 }
991
992 /*
993 * Routine: thread_allowed_for_handoff
994 * Purpose:
995 * Check if the thread is allowed for handoff operation
996 * Conditions:
997 * thread lock held, IPC locks may be held.
998 * TODO: In future, do not allow handoff if threads have different cluster
999 * recommendations.
1000 */
1001 boolean_t
thread_allowed_for_handoff(thread_t thread)1002 thread_allowed_for_handoff(
1003 thread_t thread)
1004 {
1005 thread_t self = current_thread();
1006
1007 if (allow_direct_handoff &&
1008 thread->sched_mode == TH_MODE_REALTIME &&
1009 self->sched_mode == TH_MODE_REALTIME) {
1010 return TRUE;
1011 }
1012
1013 return FALSE;
1014 }
1015
1016 /*
1017 * Routine: thread_go
1018 * Purpose:
1019 * Unblock and dispatch thread.
1020 * Conditions:
1021 * thread lock held, IPC locks may be held.
1022 * thread must have been waiting
1023 */
1024 void
thread_go(thread_t thread,wait_result_t wresult,bool try_handoff)1025 thread_go(
1026 thread_t thread,
1027 wait_result_t wresult,
1028 bool try_handoff)
1029 {
1030 thread_t self = current_thread();
1031
1032 assert_thread_magic(thread);
1033
1034 assert(thread->at_safe_point == FALSE);
1035 assert(thread->wait_event == NO_EVENT64);
1036 assert(waitq_is_null(thread->waitq));
1037
1038 assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
1039 assert(thread->state & TH_WAIT);
1040
1041 if (thread->started) {
1042 assert(thread->state & TH_WAKING);
1043 }
1044
1045 thread_lock_assert(thread, LCK_ASSERT_OWNED);
1046
1047 assert(ml_get_interrupts_enabled() == false);
1048
1049 if (thread_unblock(thread, wresult)) {
1050 #if SCHED_TRACE_THREAD_WAKEUPS
1051 backtrace(&thread->thread_wakeup_bt[0],
1052 (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL,
1053 NULL);
1054 #endif /* SCHED_TRACE_THREAD_WAKEUPS */
1055 if (try_handoff && thread_allowed_for_handoff(thread)) {
1056 thread_reference(thread);
1057 assert(self->handoff_thread == NULL);
1058 self->handoff_thread = thread;
1059 } else {
1060 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1061 }
1062 }
1063 }
1064
1065 /*
1066 * Routine: thread_mark_wait_locked
1067 * Purpose:
1068 * Mark a thread as waiting. If, given the circumstances,
1069 * it doesn't want to wait (i.e. already aborted), then
1070 * indicate that in the return value.
1071 * Conditions:
1072 * at splsched() and thread is locked.
1073 */
1074 __private_extern__
1075 wait_result_t
thread_mark_wait_locked(thread_t thread,wait_interrupt_t interruptible_orig)1076 thread_mark_wait_locked(
1077 thread_t thread,
1078 wait_interrupt_t interruptible_orig)
1079 {
1080 boolean_t at_safe_point;
1081 wait_interrupt_t interruptible = interruptible_orig;
1082
1083 if (thread->state & TH_IDLE) {
1084 panic("Invalid attempt to wait while running the idle thread");
1085 }
1086
1087 assert(!(thread->state & (TH_WAIT | TH_WAKING | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
1088
1089 /*
1090 * The thread may have certain types of interrupts/aborts masked
1091 * off. Even if the wait location says these types of interrupts
1092 * are OK, we have to honor mask settings (outer-scoped code may
1093 * not be able to handle aborts at the moment).
1094 */
1095 interruptible &= TH_OPT_INTMASK;
1096 if (interruptible > (thread->options & TH_OPT_INTMASK)) {
1097 interruptible = thread->options & TH_OPT_INTMASK;
1098 }
1099
1100 at_safe_point = (interruptible == THREAD_ABORTSAFE);
1101
1102 if (interruptible == THREAD_UNINT ||
1103 !(thread->sched_flags & TH_SFLAG_ABORT) ||
1104 (!at_safe_point &&
1105 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
1106 if (!(thread->state & TH_TERMINATE)) {
1107 DTRACE_SCHED(sleep);
1108 }
1109
1110 int state_bits = TH_WAIT;
1111 if (!interruptible) {
1112 state_bits |= TH_UNINT;
1113 }
1114 if (thread->sched_call) {
1115 wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
1116 if (is_kerneltask(get_threadtask(thread))) {
1117 mask = THREAD_WAIT_NOREPORT_KERNEL;
1118 }
1119 if ((interruptible_orig & mask) == 0) {
1120 state_bits |= TH_WAIT_REPORT;
1121 }
1122 }
1123 thread->state |= state_bits;
1124 thread->at_safe_point = at_safe_point;
1125
1126 /* TODO: pass this through assert_wait instead, have
1127 * assert_wait just take a struct as an argument */
1128 assert(!thread->block_hint);
1129 thread->block_hint = thread->pending_block_hint;
1130 thread->pending_block_hint = kThreadWaitNone;
1131
1132 return thread->wait_result = THREAD_WAITING;
1133 } else {
1134 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
1135 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1136 }
1137 }
1138 thread->pending_block_hint = kThreadWaitNone;
1139
1140 return thread->wait_result = THREAD_INTERRUPTED;
1141 }
1142
1143 /*
1144 * Routine: thread_interrupt_level
1145 * Purpose:
1146 * Set the maximum interruptible state for the
1147 * current thread. The effective value of any
1148 * interruptible flag passed into assert_wait
1149 * will never exceed this.
1150 *
1151 * Useful for code that must not be interrupted,
1152 * but which calls code that doesn't know that.
1153 * Returns:
1154 * The old interrupt level for the thread.
1155 */
1156 __private_extern__
1157 wait_interrupt_t
thread_interrupt_level(wait_interrupt_t new_level)1158 thread_interrupt_level(
1159 wait_interrupt_t new_level)
1160 {
1161 thread_t thread = current_thread();
1162 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1163
1164 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1165
1166 return result;
1167 }
1168
1169 /*
1170 * assert_wait:
1171 *
1172 * Assert that the current thread is about to go to
1173 * sleep until the specified event occurs.
1174 */
1175 wait_result_t
assert_wait(event_t event,wait_interrupt_t interruptible)1176 assert_wait(
1177 event_t event,
1178 wait_interrupt_t interruptible)
1179 {
1180 if (__improbable(event == NO_EVENT)) {
1181 panic("%s() called with NO_EVENT", __func__);
1182 }
1183
1184 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1185 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1186 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
1187
1188 struct waitq *waitq;
1189 waitq = global_eventq(event);
1190 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
1191 }
1192
1193 /*
1194 * assert_wait_queue:
1195 *
1196 * Return the global waitq for the specified event
1197 */
1198 struct waitq *
assert_wait_queue(event_t event)1199 assert_wait_queue(
1200 event_t event)
1201 {
1202 return global_eventq(event);
1203 }
1204
1205 wait_result_t
assert_wait_timeout(event_t event,wait_interrupt_t interruptible,uint32_t interval,uint32_t scale_factor)1206 assert_wait_timeout(
1207 event_t event,
1208 wait_interrupt_t interruptible,
1209 uint32_t interval,
1210 uint32_t scale_factor)
1211 {
1212 thread_t thread = current_thread();
1213 wait_result_t wresult;
1214 uint64_t deadline;
1215 spl_t s;
1216
1217 if (__improbable(event == NO_EVENT)) {
1218 panic("%s() called with NO_EVENT", __func__);
1219 }
1220
1221 struct waitq *waitq;
1222 waitq = global_eventq(event);
1223
1224 s = splsched();
1225 waitq_lock(waitq);
1226
1227 clock_interval_to_deadline(interval, scale_factor, &deadline);
1228
1229 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1230 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1231 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1232
1233 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1234 interruptible,
1235 TIMEOUT_URGENCY_SYS_NORMAL,
1236 deadline, TIMEOUT_NO_LEEWAY,
1237 thread);
1238
1239 waitq_unlock(waitq);
1240 splx(s);
1241 return wresult;
1242 }
1243
1244 wait_result_t
assert_wait_timeout_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint32_t interval,uint32_t leeway,uint32_t scale_factor)1245 assert_wait_timeout_with_leeway(
1246 event_t event,
1247 wait_interrupt_t interruptible,
1248 wait_timeout_urgency_t urgency,
1249 uint32_t interval,
1250 uint32_t leeway,
1251 uint32_t scale_factor)
1252 {
1253 thread_t thread = current_thread();
1254 wait_result_t wresult;
1255 uint64_t deadline;
1256 uint64_t abstime;
1257 uint64_t slop;
1258 uint64_t now;
1259 spl_t s;
1260
1261 if (__improbable(event == NO_EVENT)) {
1262 panic("%s() called with NO_EVENT", __func__);
1263 }
1264
1265 now = mach_absolute_time();
1266 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1267 deadline = now + abstime;
1268
1269 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1270
1271 struct waitq *waitq;
1272 waitq = global_eventq(event);
1273
1274 s = splsched();
1275 waitq_lock(waitq);
1276
1277 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1278 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1279 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1280
1281 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1282 interruptible,
1283 urgency, deadline, slop,
1284 thread);
1285
1286 waitq_unlock(waitq);
1287 splx(s);
1288 return wresult;
1289 }
1290
1291 wait_result_t
assert_wait_deadline(event_t event,wait_interrupt_t interruptible,uint64_t deadline)1292 assert_wait_deadline(
1293 event_t event,
1294 wait_interrupt_t interruptible,
1295 uint64_t deadline)
1296 {
1297 thread_t thread = current_thread();
1298 wait_result_t wresult;
1299 spl_t s;
1300
1301 if (__improbable(event == NO_EVENT)) {
1302 panic("%s() called with NO_EVENT", __func__);
1303 }
1304
1305 struct waitq *waitq;
1306 waitq = global_eventq(event);
1307
1308 s = splsched();
1309 waitq_lock(waitq);
1310
1311 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1312 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1313 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1314
1315 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1316 interruptible,
1317 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1318 TIMEOUT_NO_LEEWAY, thread);
1319 waitq_unlock(waitq);
1320 splx(s);
1321 return wresult;
1322 }
1323
1324 wait_result_t
assert_wait_deadline_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint64_t deadline,uint64_t leeway)1325 assert_wait_deadline_with_leeway(
1326 event_t event,
1327 wait_interrupt_t interruptible,
1328 wait_timeout_urgency_t urgency,
1329 uint64_t deadline,
1330 uint64_t leeway)
1331 {
1332 thread_t thread = current_thread();
1333 wait_result_t wresult;
1334 spl_t s;
1335
1336 if (__improbable(event == NO_EVENT)) {
1337 panic("%s() called with NO_EVENT", __func__);
1338 }
1339
1340 struct waitq *waitq;
1341 waitq = global_eventq(event);
1342
1343 s = splsched();
1344 waitq_lock(waitq);
1345
1346 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1347 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1348 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1349
1350 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1351 interruptible,
1352 urgency, deadline, leeway,
1353 thread);
1354 waitq_unlock(waitq);
1355 splx(s);
1356 return wresult;
1357 }
1358
1359 void
sched_cond_init(sched_cond_atomic_t * cond)1360 sched_cond_init(
1361 sched_cond_atomic_t *cond)
1362 {
1363 os_atomic_init(cond, SCHED_COND_INIT);
1364 }
1365
1366 wait_result_t
sched_cond_wait_parameter(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation,void * parameter)1367 sched_cond_wait_parameter(
1368 sched_cond_atomic_t *cond,
1369 wait_interrupt_t interruptible,
1370 thread_continue_t continuation,
1371 void *parameter)
1372 {
1373 assert_wait((event_t) cond, interruptible);
1374 /* clear active bit to indicate future wakeups will have to unblock this thread */
1375 sched_cond_t new_state = (sched_cond_t) os_atomic_andnot(cond, SCHED_COND_ACTIVE, relaxed);
1376 if (__improbable(new_state & SCHED_COND_WAKEUP)) {
1377 /* a wakeup has been issued; undo wait assertion, ack the wakeup, and return */
1378 thread_t thread = current_thread();
1379 clear_wait(thread, THREAD_AWAKENED);
1380 sched_cond_ack(cond);
1381 return THREAD_AWAKENED;
1382 }
1383 return thread_block_parameter(continuation, parameter);
1384 }
1385
1386 wait_result_t
sched_cond_wait(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation)1387 sched_cond_wait(
1388 sched_cond_atomic_t *cond,
1389 wait_interrupt_t interruptible,
1390 thread_continue_t continuation)
1391 {
1392 return sched_cond_wait_parameter(cond, interruptible, continuation, NULL);
1393 }
1394
1395 sched_cond_t
sched_cond_ack(sched_cond_atomic_t * cond)1396 sched_cond_ack(
1397 sched_cond_atomic_t *cond)
1398 {
1399 sched_cond_t new_cond = (sched_cond_t) os_atomic_xor(cond, SCHED_COND_ACTIVE | SCHED_COND_WAKEUP, acquire);
1400 assert(new_cond & SCHED_COND_ACTIVE);
1401 return new_cond;
1402 }
1403
1404 kern_return_t
sched_cond_signal(sched_cond_atomic_t * cond,thread_t thread)1405 sched_cond_signal(
1406 sched_cond_atomic_t *cond,
1407 thread_t thread)
1408 {
1409 disable_preemption();
1410 sched_cond_t old_cond = (sched_cond_t) os_atomic_or_orig(cond, SCHED_COND_WAKEUP, release);
1411 if (!(old_cond & (SCHED_COND_WAKEUP | SCHED_COND_ACTIVE))) {
1412 /* this was the first wakeup to be issued AND the thread was inactive */
1413 thread_wakeup_thread((event_t) cond, thread);
1414 }
1415 enable_preemption();
1416 return KERN_SUCCESS;
1417 }
1418
1419 /*
1420 * thread_isoncpu:
1421 *
1422 * Return TRUE if a thread is running on a processor such that an AST
1423 * is needed to pull it out of userspace execution, or if executing in
1424 * the kernel, bring to a context switch boundary that would cause
1425 * thread state to be serialized in the thread PCB.
1426 *
1427 * Thread locked, returns the same way. While locked, fields
1428 * like "state" cannot change. "runq" can change only from set to unset.
1429 */
1430 static inline boolean_t
thread_isoncpu(thread_t thread)1431 thread_isoncpu(thread_t thread)
1432 {
1433 /* Not running or runnable */
1434 if (!(thread->state & TH_RUN)) {
1435 return FALSE;
1436 }
1437
1438 /* Waiting on a runqueue, not currently running */
1439 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1440 if (thread->runq != PROCESSOR_NULL) {
1441 return FALSE;
1442 }
1443
1444 /*
1445 * Thread does not have a stack yet
1446 * It could be on the stack alloc queue or preparing to be invoked
1447 */
1448 if (!thread->kernel_stack) {
1449 return FALSE;
1450 }
1451
1452 /*
1453 * Thread must be running on a processor, or
1454 * about to run, or just did run. In all these
1455 * cases, an AST to the processor is needed
1456 * to guarantee that the thread is kicked out
1457 * of userspace and the processor has
1458 * context switched (and saved register state).
1459 */
1460 return TRUE;
1461 }
1462
1463 /*
1464 * thread_stop:
1465 *
1466 * Force a preemption point for a thread and wait
1467 * for it to stop running on a CPU. If a stronger
1468 * guarantee is requested, wait until no longer
1469 * runnable. Arbitrates access among
1470 * multiple stop requests. (released by unstop)
1471 *
1472 * The thread must enter a wait state and stop via a
1473 * separate means.
1474 *
1475 * Returns FALSE if interrupted.
1476 */
1477 boolean_t
thread_stop(thread_t thread,boolean_t until_not_runnable)1478 thread_stop(
1479 thread_t thread,
1480 boolean_t until_not_runnable)
1481 {
1482 wait_result_t wresult;
1483 spl_t s = splsched();
1484 boolean_t oncpu;
1485
1486 wake_lock(thread);
1487 thread_lock(thread);
1488
1489 while (thread->state & TH_SUSP) {
1490 thread->wake_active = TRUE;
1491 thread_unlock(thread);
1492
1493 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1494 wake_unlock(thread);
1495 splx(s);
1496
1497 if (wresult == THREAD_WAITING) {
1498 wresult = thread_block(THREAD_CONTINUE_NULL);
1499 }
1500
1501 if (wresult != THREAD_AWAKENED) {
1502 return FALSE;
1503 }
1504
1505 s = splsched();
1506 wake_lock(thread);
1507 thread_lock(thread);
1508 }
1509
1510 thread->state |= TH_SUSP;
1511
1512 while ((oncpu = thread_isoncpu(thread)) ||
1513 (until_not_runnable && (thread->state & TH_RUN))) {
1514 processor_t processor;
1515
1516 if (oncpu) {
1517 assert(thread->state & TH_RUN);
1518 processor = thread->chosen_processor;
1519 cause_ast_check(processor);
1520 }
1521
1522 thread->wake_active = TRUE;
1523 thread_unlock(thread);
1524
1525 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1526 wake_unlock(thread);
1527 splx(s);
1528
1529 if (wresult == THREAD_WAITING) {
1530 wresult = thread_block(THREAD_CONTINUE_NULL);
1531 }
1532
1533 if (wresult != THREAD_AWAKENED) {
1534 thread_unstop(thread);
1535 return FALSE;
1536 }
1537
1538 s = splsched();
1539 wake_lock(thread);
1540 thread_lock(thread);
1541 }
1542
1543 thread_unlock(thread);
1544 wake_unlock(thread);
1545 splx(s);
1546
1547 /*
1548 * We return with the thread unlocked. To prevent it from
1549 * transitioning to a runnable state (or from TH_RUN to
1550 * being on the CPU), the caller must ensure the thread
1551 * is stopped via an external means (such as an AST)
1552 */
1553
1554 return TRUE;
1555 }
1556
1557 /*
1558 * thread_unstop:
1559 *
1560 * Release a previous stop request and set
1561 * the thread running if appropriate.
1562 *
1563 * Use only after a successful stop operation.
1564 */
1565 void
thread_unstop(thread_t thread)1566 thread_unstop(
1567 thread_t thread)
1568 {
1569 spl_t s = splsched();
1570
1571 wake_lock(thread);
1572 thread_lock(thread);
1573
1574 assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
1575
1576 if (thread->state & TH_SUSP) {
1577 thread->state &= ~TH_SUSP;
1578
1579 if (thread->wake_active) {
1580 thread->wake_active = FALSE;
1581 thread_unlock(thread);
1582
1583 thread_wakeup(&thread->wake_active);
1584 wake_unlock(thread);
1585 splx(s);
1586
1587 return;
1588 }
1589 }
1590
1591 thread_unlock(thread);
1592 wake_unlock(thread);
1593 splx(s);
1594 }
1595
1596 /*
1597 * thread_wait:
1598 *
1599 * Wait for a thread to stop running. (non-interruptible)
1600 *
1601 */
1602 void
thread_wait(thread_t thread,boolean_t until_not_runnable)1603 thread_wait(
1604 thread_t thread,
1605 boolean_t until_not_runnable)
1606 {
1607 wait_result_t wresult;
1608 boolean_t oncpu;
1609 processor_t processor;
1610 spl_t s = splsched();
1611
1612 wake_lock(thread);
1613 thread_lock(thread);
1614
1615 /*
1616 * Wait until not running on a CPU. If stronger requirement
1617 * desired, wait until not runnable. Assumption: if thread is
1618 * on CPU, then TH_RUN is set, so we're not waiting in any case
1619 * where the original, pure "TH_RUN" check would have let us
1620 * finish.
1621 */
1622 while ((oncpu = thread_isoncpu(thread)) ||
1623 (until_not_runnable && (thread->state & TH_RUN))) {
1624 if (oncpu) {
1625 assert(thread->state & TH_RUN);
1626 processor = thread->chosen_processor;
1627 cause_ast_check(processor);
1628 }
1629
1630 thread->wake_active = TRUE;
1631 thread_unlock(thread);
1632
1633 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1634 wake_unlock(thread);
1635 splx(s);
1636
1637 if (wresult == THREAD_WAITING) {
1638 thread_block(THREAD_CONTINUE_NULL);
1639 }
1640
1641 s = splsched();
1642 wake_lock(thread);
1643 thread_lock(thread);
1644 }
1645
1646 thread_unlock(thread);
1647 wake_unlock(thread);
1648 splx(s);
1649 }
1650
1651 /*
1652 * Routine: clear_wait_internal
1653 *
1654 * Clear the wait condition for the specified thread.
1655 * Start the thread executing if that is appropriate.
1656 * Arguments:
1657 * thread thread to awaken
1658 * result Wakeup result the thread should see
1659 * Conditions:
1660 * At splsched
1661 * the thread is locked.
1662 * Returns:
1663 * KERN_SUCCESS thread was rousted out a wait
1664 * KERN_FAILURE thread was waiting but could not be rousted
1665 * KERN_NOT_WAITING thread was not waiting
1666 */
1667 __private_extern__ kern_return_t
clear_wait_internal(thread_t thread,wait_result_t wresult)1668 clear_wait_internal(
1669 thread_t thread,
1670 wait_result_t wresult)
1671 {
1672 waitq_t waitq = thread->waitq;
1673
1674 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1675 return KERN_FAILURE;
1676 }
1677
1678 /*
1679 * Check that the thread is waiting and not waking, as a waking thread
1680 * has already cleared its waitq, and is destined to be go'ed, don't
1681 * need to do it again.
1682 */
1683 if ((thread->state & (TH_WAIT | TH_TERMINATE | TH_WAKING)) != TH_WAIT) {
1684 assert(waitq_is_null(thread->waitq));
1685 return KERN_NOT_WAITING;
1686 }
1687
1688 /* may drop and retake the thread lock */
1689 if (!waitq_is_null(waitq) && !waitq_pull_thread_locked(waitq, thread)) {
1690 return KERN_NOT_WAITING;
1691 }
1692
1693 thread_go(thread, wresult, /* handoff */ false);
1694
1695 return KERN_SUCCESS;
1696 }
1697
1698
1699 /*
1700 * clear_wait:
1701 *
1702 * Clear the wait condition for the specified thread. Start the thread
1703 * executing if that is appropriate.
1704 *
1705 * parameters:
1706 * thread thread to awaken
1707 * result Wakeup result the thread should see
1708 */
1709 kern_return_t
clear_wait(thread_t thread,wait_result_t result)1710 clear_wait(
1711 thread_t thread,
1712 wait_result_t result)
1713 {
1714 kern_return_t ret;
1715 spl_t s;
1716
1717 s = splsched();
1718 thread_lock(thread);
1719
1720 ret = clear_wait_internal(thread, result);
1721
1722 if (thread == current_thread()) {
1723 /*
1724 * The thread must be ready to wait again immediately
1725 * after clearing its own wait.
1726 */
1727 assert((thread->state & TH_WAKING) == 0);
1728 }
1729
1730 thread_unlock(thread);
1731 splx(s);
1732 return ret;
1733 }
1734
1735
1736 /*
1737 * thread_wakeup_prim:
1738 *
1739 * Common routine for thread_wakeup, thread_wakeup_with_result,
1740 * and thread_wakeup_one.
1741 *
1742 */
1743 kern_return_t
thread_wakeup_prim(event_t event,boolean_t one_thread,wait_result_t result)1744 thread_wakeup_prim(
1745 event_t event,
1746 boolean_t one_thread,
1747 wait_result_t result)
1748 {
1749 if (__improbable(event == NO_EVENT)) {
1750 panic("%s() called with NO_EVENT", __func__);
1751 }
1752
1753 struct waitq *wq = global_eventq(event);
1754
1755 if (one_thread) {
1756 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_WAKEUP_DEFAULT);
1757 } else {
1758 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_WAKEUP_DEFAULT);
1759 }
1760 }
1761
1762 /*
1763 * Wakeup a specified thread if and only if it's waiting for this event
1764 */
1765 kern_return_t
thread_wakeup_thread(event_t event,thread_t thread)1766 thread_wakeup_thread(
1767 event_t event,
1768 thread_t thread)
1769 {
1770 if (__improbable(event == NO_EVENT)) {
1771 panic("%s() called with NO_EVENT", __func__);
1772 }
1773
1774 if (__improbable(thread == THREAD_NULL)) {
1775 panic("%s() called with THREAD_NULL", __func__);
1776 }
1777
1778 struct waitq *wq = global_eventq(event);
1779
1780 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1781 }
1782
1783 /*
1784 * Wakeup a thread waiting on an event and promote it to a priority.
1785 *
1786 * Requires woken thread to un-promote itself when done.
1787 */
1788 kern_return_t
thread_wakeup_one_with_pri(event_t event,int priority)1789 thread_wakeup_one_with_pri(
1790 event_t event,
1791 int priority)
1792 {
1793 if (__improbable(event == NO_EVENT)) {
1794 panic("%s() called with NO_EVENT", __func__);
1795 }
1796
1797 struct waitq *wq = global_eventq(event);
1798
1799 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1800 }
1801
1802 /*
1803 * Wakeup a thread waiting on an event,
1804 * promote it to a priority,
1805 * and return a reference to the woken thread.
1806 *
1807 * Requires woken thread to un-promote itself when done.
1808 */
1809 thread_t
thread_wakeup_identify(event_t event,int priority)1810 thread_wakeup_identify(event_t event,
1811 int priority)
1812 {
1813 if (__improbable(event == NO_EVENT)) {
1814 panic("%s() called with NO_EVENT", __func__);
1815 }
1816
1817 struct waitq *wq = global_eventq(event);
1818
1819 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1820 }
1821
1822 /*
1823 * thread_bind:
1824 *
1825 * Force the current thread to execute on the specified processor.
1826 * Takes effect after the next thread_block().
1827 *
1828 * Returns the previous binding. PROCESSOR_NULL means
1829 * not bound.
1830 *
1831 * XXX - DO NOT export this to users - XXX
1832 */
1833 processor_t
thread_bind(processor_t processor)1834 thread_bind(
1835 processor_t processor)
1836 {
1837 thread_t self = current_thread();
1838 processor_t prev;
1839 spl_t s;
1840
1841 s = splsched();
1842 thread_lock(self);
1843
1844 prev = thread_bind_internal(self, processor);
1845
1846 thread_unlock(self);
1847 splx(s);
1848
1849 return prev;
1850 }
1851
1852 /*
1853 * thread_bind_internal:
1854 *
1855 * If the specified thread is not the current thread, and it is currently
1856 * running on another CPU, a remote AST must be sent to that CPU to cause
1857 * the thread to migrate to its bound processor. Otherwise, the migration
1858 * will occur at the next quantum expiration or blocking point.
1859 *
1860 * When the thread is the current thread, and explicit thread_block() should
1861 * be used to force the current processor to context switch away and
1862 * let the thread migrate to the bound processor.
1863 *
1864 * Thread must be locked, and at splsched.
1865 */
1866
1867 static processor_t
thread_bind_internal(thread_t thread,processor_t processor)1868 thread_bind_internal(
1869 thread_t thread,
1870 processor_t processor)
1871 {
1872 processor_t prev;
1873
1874 /* <rdar://problem/15102234> */
1875 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1876 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1877 assert(thread->runq == PROCESSOR_NULL);
1878
1879 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1880
1881 prev = thread->bound_processor;
1882 thread->bound_processor = processor;
1883
1884 return prev;
1885 }
1886
1887 /*
1888 * thread_vm_bind_group_add:
1889 *
1890 * The "VM bind group" is a special mechanism to mark a collection
1891 * of threads from the VM subsystem that, in general, should be scheduled
1892 * with only one CPU of parallelism. To accomplish this, we initially
1893 * bind all the threads to the master processor, which has the effect
1894 * that only one of the threads in the group can execute at once, including
1895 * preempting threads in the group that are a lower priority. Future
1896 * mechanisms may use more dynamic mechanisms to prevent the collection
1897 * of VM threads from using more CPU time than desired.
1898 *
1899 * The current implementation can result in priority inversions where
1900 * compute-bound priority 95 or realtime threads that happen to have
1901 * landed on the master processor prevent the VM threads from running.
1902 * When this situation is detected, we unbind the threads for one
1903 * scheduler tick to allow the scheduler to run the threads an
1904 * additional CPUs, before restoring the binding (assuming high latency
1905 * is no longer a problem).
1906 */
1907
1908 /*
1909 * The current max is provisioned for:
1910 * vm_compressor_swap_trigger_thread (92)
1911 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1912 * vm_pageout_continue (92)
1913 * memorystatus_thread (95)
1914 */
1915 #define MAX_VM_BIND_GROUP_COUNT (5)
1916 decl_simple_lock_data(static, sched_vm_group_list_lock);
1917 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1918 static int sched_vm_group_thread_count;
1919 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1920
1921 void
thread_vm_bind_group_add(void)1922 thread_vm_bind_group_add(void)
1923 {
1924 thread_t self = current_thread();
1925
1926 thread_reference(self);
1927 self->options |= TH_OPT_SCHED_VM_GROUP;
1928
1929 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1930 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1931 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1932 simple_unlock(&sched_vm_group_list_lock);
1933
1934 thread_bind(master_processor);
1935
1936 /* Switch to bound processor if not already there */
1937 thread_block(THREAD_CONTINUE_NULL);
1938 }
1939
1940 static void
sched_vm_group_maintenance(void)1941 sched_vm_group_maintenance(void)
1942 {
1943 uint64_t ctime = mach_absolute_time();
1944 uint64_t longtime = ctime - sched_tick_interval;
1945 int i;
1946 spl_t s;
1947 boolean_t high_latency_observed = FALSE;
1948 boolean_t runnable_and_not_on_runq_observed = FALSE;
1949 boolean_t bind_target_changed = FALSE;
1950 processor_t bind_target = PROCESSOR_NULL;
1951
1952 /* Make sure nobody attempts to add new threads while we are enumerating them */
1953 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1954
1955 s = splsched();
1956
1957 for (i = 0; i < sched_vm_group_thread_count; i++) {
1958 thread_t thread = sched_vm_group_thread_list[i];
1959 assert(thread != THREAD_NULL);
1960 thread_lock(thread);
1961 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
1962 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1963 high_latency_observed = TRUE;
1964 } else if (thread->runq == PROCESSOR_NULL) {
1965 /* There are some cases where a thread be transitiong that also fall into this case */
1966 runnable_and_not_on_runq_observed = TRUE;
1967 }
1968 }
1969 thread_unlock(thread);
1970
1971 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1972 /* All the things we are looking for are true, stop looking */
1973 break;
1974 }
1975 }
1976
1977 splx(s);
1978
1979 if (sched_vm_group_temporarily_unbound) {
1980 /* If we turned off binding, make sure everything is OK before rebinding */
1981 if (!high_latency_observed) {
1982 /* rebind */
1983 bind_target_changed = TRUE;
1984 bind_target = master_processor;
1985 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1986 }
1987 } else {
1988 /*
1989 * Check if we're in a bad state, which is defined by high
1990 * latency with no core currently executing a thread. If a
1991 * single thread is making progress on a CPU, that means the
1992 * binding concept to reduce parallelism is working as
1993 * designed.
1994 */
1995 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1996 /* unbind */
1997 bind_target_changed = TRUE;
1998 bind_target = PROCESSOR_NULL;
1999 sched_vm_group_temporarily_unbound = TRUE;
2000 }
2001 }
2002
2003 if (bind_target_changed) {
2004 s = splsched();
2005 for (i = 0; i < sched_vm_group_thread_count; i++) {
2006 thread_t thread = sched_vm_group_thread_list[i];
2007 boolean_t removed;
2008 assert(thread != THREAD_NULL);
2009
2010 thread_lock(thread);
2011 removed = thread_run_queue_remove(thread);
2012 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
2013 thread_bind_internal(thread, bind_target);
2014 } else {
2015 /*
2016 * Thread was in the middle of being context-switched-to,
2017 * or was in the process of blocking. To avoid switching the bind
2018 * state out mid-flight, defer the change if possible.
2019 */
2020 if (bind_target == PROCESSOR_NULL) {
2021 thread_bind_internal(thread, bind_target);
2022 } else {
2023 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
2024 }
2025 }
2026
2027 if (removed) {
2028 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
2029 }
2030 thread_unlock(thread);
2031 }
2032 splx(s);
2033 }
2034
2035 simple_unlock(&sched_vm_group_list_lock);
2036 }
2037
2038 #if defined(__x86_64__)
2039 #define SCHED_AVOID_CPU0 1
2040 #else
2041 #define SCHED_AVOID_CPU0 0
2042 #endif
2043
2044 int sched_allow_rt_smt = 1;
2045 int sched_avoid_cpu0 = SCHED_AVOID_CPU0;
2046 int sched_allow_rt_steal = 1;
2047 int sched_backup_cpu_timeout_count = 5; /* The maximum number of 10us delays to wait before using a backup cpu */
2048
2049 int sched_rt_n_backup_processors = SCHED_DEFAULT_BACKUP_PROCESSORS;
2050
2051 int
sched_get_rt_n_backup_processors(void)2052 sched_get_rt_n_backup_processors(void)
2053 {
2054 return sched_rt_n_backup_processors;
2055 }
2056
2057 void
sched_set_rt_n_backup_processors(int n)2058 sched_set_rt_n_backup_processors(int n)
2059 {
2060 if (n < 0) {
2061 n = 0;
2062 } else if (n > SCHED_MAX_BACKUP_PROCESSORS) {
2063 n = SCHED_MAX_BACKUP_PROCESSORS;
2064 }
2065
2066 sched_rt_n_backup_processors = n;
2067 }
2068
2069 int sched_rt_runq_strict_priority = false;
2070
2071 inline static processor_set_t
change_locked_pset(processor_set_t current_pset,processor_set_t new_pset)2072 change_locked_pset(processor_set_t current_pset, processor_set_t new_pset)
2073 {
2074 if (current_pset != new_pset) {
2075 pset_unlock(current_pset);
2076 pset_lock(new_pset);
2077 }
2078
2079 return new_pset;
2080 }
2081
2082 /*
2083 * Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
2084 * rebalancing opportunity exists when a core is (instantaneously) idle, but
2085 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
2086 * IPI thrash if this core does not remain idle following the load balancing ASTs
2087 * Idle "thrash", when IPI issue is followed by idle entry/core power down
2088 * followed by a wakeup shortly thereafter.
2089 */
2090
2091 #if (DEVELOPMENT || DEBUG)
2092 int sched_smt_balance = 1;
2093 #endif
2094
2095 /* Invoked with pset locked, returns with pset unlocked */
2096 void
sched_SMT_balance(processor_t cprocessor,processor_set_t cpset)2097 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
2098 {
2099 processor_t ast_processor = NULL;
2100
2101 #if (DEVELOPMENT || DEBUG)
2102 if (__improbable(sched_smt_balance == 0)) {
2103 goto smt_balance_exit;
2104 }
2105 #endif
2106
2107 assert(cprocessor == current_processor());
2108 if (cprocessor->is_SMT == FALSE) {
2109 goto smt_balance_exit;
2110 }
2111
2112 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
2113
2114 /* Determine if both this processor and its sibling are idle,
2115 * indicating an SMT rebalancing opportunity.
2116 */
2117 if (sib_processor->state != PROCESSOR_IDLE) {
2118 goto smt_balance_exit;
2119 }
2120
2121 processor_t sprocessor;
2122
2123 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2124 uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
2125 ~cpset->primary_map);
2126 for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
2127 sprocessor = processor_array[cpuid];
2128 if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
2129 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
2130 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2131 if (ipi_type != SCHED_IPI_NONE) {
2132 assert(sprocessor != cprocessor);
2133 ast_processor = sprocessor;
2134 break;
2135 }
2136 }
2137 }
2138
2139 smt_balance_exit:
2140 pset_unlock(cpset);
2141
2142 if (ast_processor) {
2143 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
2144 sched_ipi_perform(ast_processor, ipi_type);
2145 }
2146 }
2147
2148 static cpumap_t
pset_available_cpumap(processor_set_t pset)2149 pset_available_cpumap(processor_set_t pset)
2150 {
2151 return pset->cpu_available_map & pset->recommended_bitmask;
2152 }
2153
2154 int
pset_available_cpu_count(processor_set_t pset)2155 pset_available_cpu_count(processor_set_t pset)
2156 {
2157 return bit_count(pset_available_cpumap(pset));
2158 }
2159
2160 bool
pset_is_recommended(processor_set_t pset)2161 pset_is_recommended(processor_set_t pset)
2162 {
2163 if (!pset) {
2164 return false;
2165 }
2166 return pset_available_cpu_count(pset) > 0;
2167 }
2168
2169 static cpumap_t
pset_available_but_not_running_cpumap(processor_set_t pset)2170 pset_available_but_not_running_cpumap(processor_set_t pset)
2171 {
2172 return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
2173 pset->recommended_bitmask;
2174 }
2175
2176 bool
pset_has_stealable_threads(processor_set_t pset)2177 pset_has_stealable_threads(processor_set_t pset)
2178 {
2179 pset_assert_locked(pset);
2180
2181 cpumap_t avail_map = pset_available_but_not_running_cpumap(pset);
2182 /*
2183 * Secondary CPUs never steal, so allow stealing of threads if there are more threads than
2184 * available primary CPUs
2185 */
2186 avail_map &= pset->primary_map;
2187
2188 return (pset->pset_runq.count > 0) && ((pset->pset_runq.count + rt_runq_count(pset)) > bit_count(avail_map));
2189 }
2190
2191 static cpumap_t
pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset)2192 pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset)
2193 {
2194 cpumap_t avail_map = pset_available_cpumap(pset);
2195 if (!sched_allow_rt_smt) {
2196 /*
2197 * Secondary CPUs are not allowed to run RT threads, so
2198 * only primary CPUs should be included
2199 */
2200 avail_map &= pset->primary_map;
2201 }
2202
2203 return avail_map & ~pset->realtime_map;
2204 }
2205
2206 static bool
pset_needs_a_followup_IPI(processor_set_t pset)2207 pset_needs_a_followup_IPI(processor_set_t pset)
2208 {
2209 int nbackup_cpus = 0;
2210
2211 if (rt_runq_is_low_latency(pset)) {
2212 nbackup_cpus = sched_rt_n_backup_processors;
2213 }
2214
2215 int rt_rq_count = rt_runq_count(pset);
2216
2217 return (rt_rq_count > 0) && ((rt_rq_count + nbackup_cpus - bit_count(pset->pending_AST_URGENT_cpu_mask)) > 0);
2218 }
2219
2220 bool
pset_has_stealable_rt_threads(processor_set_t pset)2221 pset_has_stealable_rt_threads(processor_set_t pset)
2222 {
2223 pset_node_t node = pset->node;
2224 if (bit_count(node->pset_map) == 1) {
2225 return false;
2226 }
2227
2228 cpumap_t avail_map = pset_available_but_not_running_rt_threads_cpumap(pset);
2229
2230 return rt_runq_count(pset) > bit_count(avail_map);
2231 }
2232
2233 static void
pset_update_rt_stealable_state(processor_set_t pset)2234 pset_update_rt_stealable_state(processor_set_t pset)
2235 {
2236 if (pset_has_stealable_rt_threads(pset)) {
2237 pset->stealable_rt_threads_earliest_deadline = rt_runq_earliest_deadline(pset);
2238 } else {
2239 pset->stealable_rt_threads_earliest_deadline = RT_DEADLINE_NONE;
2240 }
2241 }
2242
2243 static void
clear_pending_AST_bits(processor_set_t pset,processor_t processor,__kdebug_only const int trace_point_number)2244 clear_pending_AST_bits(processor_set_t pset, processor_t processor, __kdebug_only const int trace_point_number)
2245 {
2246 /* Acknowledge any pending IPIs here with pset lock held */
2247 pset_assert_locked(pset);
2248 if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2249 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END,
2250 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, trace_point_number);
2251 }
2252 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2253
2254 #if defined(CONFIG_SCHED_DEFERRED_AST)
2255 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
2256 #endif
2257 }
2258
2259 /*
2260 * Called with pset locked, on a processor that is committing to run a new thread
2261 * Will transition an idle or dispatching processor to running as it picks up
2262 * the first new thread from the idle thread.
2263 */
2264 static void
pset_commit_processor_to_new_thread(processor_set_t pset,processor_t processor,thread_t new_thread)2265 pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
2266 {
2267 pset_assert_locked(pset);
2268
2269 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2270 assert(current_thread() == processor->idle_thread);
2271
2272 /*
2273 * Dispatching processor is now committed to running new_thread,
2274 * so change its state to PROCESSOR_RUNNING.
2275 */
2276 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
2277 } else {
2278 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
2279 }
2280
2281 processor_state_update_from_thread(processor, new_thread, true);
2282
2283 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2284 bit_set(pset->realtime_map, processor->cpu_id);
2285 } else {
2286 bit_clear(pset->realtime_map, processor->cpu_id);
2287 }
2288 pset_update_rt_stealable_state(pset);
2289
2290 pset_node_t node = pset->node;
2291
2292 if (bit_count(node->pset_map) == 1) {
2293 /* Node has only a single pset, so skip node pset map updates */
2294 return;
2295 }
2296
2297 cpumap_t avail_map = pset_available_cpumap(pset);
2298
2299 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2300 if ((avail_map & pset->realtime_map) == avail_map) {
2301 /* No more non-RT CPUs in this pset */
2302 atomic_bit_clear(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2303 }
2304 avail_map &= pset->primary_map;
2305 if ((avail_map & pset->realtime_map) == avail_map) {
2306 /* No more non-RT primary CPUs in this pset */
2307 atomic_bit_clear(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2308 }
2309 } else {
2310 if ((avail_map & pset->realtime_map) != avail_map) {
2311 if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
2312 atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2313 }
2314 }
2315 avail_map &= pset->primary_map;
2316 if ((avail_map & pset->realtime_map) != avail_map) {
2317 if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
2318 atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2319 }
2320 }
2321 }
2322 }
2323
2324 static processor_t choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries, bool skip_spills);
2325 static processor_t choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline,
2326 processor_t skip_processor, bool skip_spills, bool include_ast_urgent_pending_cpus);
2327 static processor_t choose_next_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool consider_secondaries);
2328 #if defined(__x86_64__)
2329 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups);
2330 static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups);
2331 #endif
2332 static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup);
2333 static bool processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor);
2334
2335 static bool
other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset,uint64_t earliest_deadline)2336 other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset, uint64_t earliest_deadline)
2337 {
2338 pset_map_t pset_map = stealing_pset->node->pset_map;
2339
2340 bit_clear(pset_map, stealing_pset->pset_id);
2341
2342 for (int pset_id = lsb_first(pset_map); pset_id >= 0; pset_id = lsb_next(pset_map, pset_id)) {
2343 processor_set_t nset = pset_array[pset_id];
2344
2345 if (deadline_add(nset->stealable_rt_threads_earliest_deadline, rt_deadline_epsilon) < earliest_deadline) {
2346 return true;
2347 }
2348 }
2349
2350 return false;
2351 }
2352
2353 /*
2354 * starting_pset must be locked, but returns true if it is unlocked before return
2355 */
2356 static bool
choose_next_rt_processor_for_IPI(processor_set_t starting_pset,processor_t chosen_processor,bool spill_ipi,processor_t * result_processor,sched_ipi_type_t * result_ipi_type)2357 choose_next_rt_processor_for_IPI(processor_set_t starting_pset, processor_t chosen_processor, bool spill_ipi,
2358 processor_t *result_processor, sched_ipi_type_t *result_ipi_type)
2359 {
2360 bool starting_pset_is_unlocked = false;
2361 uint64_t earliest_deadline = rt_runq_earliest_deadline(starting_pset);
2362 int max_pri = rt_runq_priority(starting_pset);
2363 __kdebug_only uint64_t spill_tid = thread_tid(rt_runq_first(&starting_pset->rt_runq));
2364 processor_set_t pset = starting_pset;
2365 processor_t next_rt_processor = PROCESSOR_NULL;
2366 if (spill_ipi) {
2367 processor_set_t nset = next_pset(pset);
2368 assert(nset != starting_pset);
2369 pset = change_locked_pset(pset, nset);
2370 starting_pset_is_unlocked = true;
2371 }
2372 do {
2373 const bool consider_secondaries = true;
2374 next_rt_processor = choose_next_processor_for_realtime_thread(pset, max_pri, earliest_deadline, chosen_processor, consider_secondaries);
2375 if (next_rt_processor == PROCESSOR_NULL) {
2376 if (!spill_ipi) {
2377 break;
2378 }
2379 processor_set_t nset = next_pset(pset);
2380 if (nset == starting_pset) {
2381 break;
2382 }
2383 pset = change_locked_pset(pset, nset);
2384 starting_pset_is_unlocked = true;
2385 }
2386 } while (next_rt_processor == PROCESSOR_NULL);
2387 if (next_rt_processor) {
2388 if (pset != starting_pset) {
2389 if (bit_set_if_clear(pset->rt_pending_spill_cpu_mask, next_rt_processor->cpu_id)) {
2390 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_START,
2391 next_rt_processor->cpu_id, pset->rt_pending_spill_cpu_mask, starting_pset->cpu_set_low, (uintptr_t)spill_tid);
2392 }
2393 }
2394 *result_ipi_type = sched_ipi_action(next_rt_processor, NULL, SCHED_IPI_EVENT_RT_PREEMPT);
2395 *result_processor = next_rt_processor;
2396 }
2397 if (pset != starting_pset) {
2398 pset_unlock(pset);
2399 }
2400
2401 return starting_pset_is_unlocked;
2402 }
2403
2404 /*
2405 * backup processor - used by choose_processor to send a backup IPI to in case the preferred processor can't immediately respond
2406 * followup processor - used in thread_select when there are still threads on the run queue and available processors
2407 * spill processor - a processor in a different processor set that is signalled to steal a thread from this run queue
2408 */
2409 typedef enum {
2410 none,
2411 backup,
2412 followup,
2413 spill
2414 } next_processor_type_t;
2415
2416 #undef LOOP_COUNT
2417 #ifdef LOOP_COUNT
2418 int max_loop_count[MAX_SCHED_CPUS] = { 0 };
2419 #endif
2420
2421 /*
2422 * thread_select:
2423 *
2424 * Select a new thread for the current processor to execute.
2425 *
2426 * May select the current thread, which must be locked.
2427 */
2428 static thread_t
thread_select(thread_t thread,processor_t processor,ast_t * reason)2429 thread_select(thread_t thread,
2430 processor_t processor,
2431 ast_t *reason)
2432 {
2433 processor_set_t pset = processor->processor_set;
2434 thread_t new_thread = THREAD_NULL;
2435
2436 assert(processor == current_processor());
2437 assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2438
2439 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_START,
2440 0, pset->pending_AST_URGENT_cpu_mask, 0, 0);
2441
2442 __kdebug_only int idle_reason = 0;
2443 __kdebug_only int delay_count = 0;
2444
2445 #if defined(__x86_64__)
2446 int timeout_count = sched_backup_cpu_timeout_count;
2447 if ((sched_avoid_cpu0 == 1) && (processor->cpu_id == 0)) {
2448 /* Prefer cpu0 as backup */
2449 timeout_count--;
2450 } else if ((sched_avoid_cpu0 == 2) && (processor->processor_primary != processor)) {
2451 /* Prefer secondary cpu as backup */
2452 timeout_count--;
2453 }
2454 #endif
2455 bool pending_AST_URGENT = false;
2456 bool pending_AST_PREEMPT = false;
2457
2458 #ifdef LOOP_COUNT
2459 int loop_count = -1;
2460 #endif
2461
2462 do {
2463 /*
2464 * Update the priority.
2465 */
2466 if (SCHED(can_update_priority)(thread)) {
2467 SCHED(update_priority)(thread);
2468 }
2469
2470 pset_lock(pset);
2471
2472 restart:
2473 #ifdef LOOP_COUNT
2474 loop_count++;
2475 if (loop_count > max_loop_count[processor->cpu_id]) {
2476 max_loop_count[processor->cpu_id] = loop_count;
2477 if (bit_count(loop_count) == 1) {
2478 kprintf("[%d]%s>max_loop_count = %d\n", processor->cpu_id, __FUNCTION__, loop_count);
2479 }
2480 }
2481 #endif
2482 pending_AST_URGENT = bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
2483 pending_AST_PREEMPT = bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2484
2485 processor_state_update_from_thread(processor, thread, true);
2486
2487 idle_reason = 0;
2488
2489 processor_t ast_processor = PROCESSOR_NULL;
2490 processor_t next_rt_processor = PROCESSOR_NULL;
2491 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2492 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2493
2494 assert(processor->state != PROCESSOR_OFF_LINE);
2495
2496 /*
2497 * Bound threads are dispatched to a processor without going through
2498 * choose_processor(), so in those cases we must continue trying to dequeue work
2499 * as we are the only option.
2500 */
2501 if (!SCHED(processor_bound_count)(processor)) {
2502 if (!processor->is_recommended) {
2503 /*
2504 * The performance controller has provided a hint to not dispatch more threads,
2505 */
2506 idle_reason = 1;
2507 goto send_followup_ipi_before_idle;
2508 } else if (rt_runq_count(pset)) {
2509 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, false);
2510 /* Give the current RT thread a chance to complete */
2511 ok_to_run_realtime_thread |= (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice);
2512 #if defined(__x86_64__)
2513 /*
2514 * On Intel we want to avoid SMT secondary processors and processor 0
2515 * but allow them to be used as backup processors in case the preferred chosen
2516 * processor is delayed by interrupts or processor stalls. So if it is
2517 * not ok_to_run_realtime_thread as preferred (sched_ok_to_run_realtime_thread(pset, processor, as_backup=false))
2518 * but ok_to_run_realtime_thread as backup (sched_ok_to_run_realtime_thread(pset, processor, as_backup=true))
2519 * we delay up to (timeout_count * 10us) to give the preferred processor chance
2520 * to grab the thread before the (current) backup processor does.
2521 *
2522 * timeout_count defaults to 5 but can be tuned using sysctl kern.sched_backup_cpu_timeout_count
2523 * on DEVELOPMENT || DEBUG kernels. It is also adjusted (see above) depending on whether we want to use
2524 * cpu0 before secondary cpus or not.
2525 */
2526 if (!ok_to_run_realtime_thread) {
2527 if (sched_ok_to_run_realtime_thread(pset, processor, true)) {
2528 if (timeout_count-- > 0) {
2529 pset_unlock(pset);
2530 thread_unlock(thread);
2531 delay(10);
2532 delay_count++;
2533 thread_lock(thread);
2534 pset_lock(pset);
2535 goto restart;
2536 }
2537 ok_to_run_realtime_thread = true;
2538 }
2539 }
2540 #endif
2541 if (!ok_to_run_realtime_thread) {
2542 idle_reason = 2;
2543 goto send_followup_ipi_before_idle;
2544 }
2545 } else if (processor->processor_primary != processor) {
2546 /*
2547 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
2548 * we should look for work only under the same conditions that choose_processor()
2549 * would have assigned work, which is when all primary processors have been assigned work.
2550 */
2551 if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
2552 /* There are idle primaries */
2553 idle_reason = 3;
2554 goto idle;
2555 }
2556 }
2557 }
2558
2559 /*
2560 * Test to see if the current thread should continue
2561 * to run on this processor. Must not be attempting to wait, and not
2562 * bound to a different processor, nor be in the wrong
2563 * processor set, nor be forced to context switch by TH_SUSP.
2564 *
2565 * Note that there are never any RT threads in the regular runqueue.
2566 *
2567 * This code is very insanely tricky.
2568 */
2569
2570 /* i.e. not waiting, not TH_SUSP'ed */
2571 bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
2572
2573 /*
2574 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
2575 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
2576 * <rdar://problem/47907700>
2577 *
2578 * A yielding thread shouldn't be forced to context switch.
2579 */
2580
2581 bool is_yielding = (*reason & AST_YIELD) == AST_YIELD;
2582
2583 bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
2584
2585 bool affinity_mismatch = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
2586
2587 bool bound_elsewhere = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
2588
2589 bool avoid_processor = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread);
2590
2591 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, true);
2592
2593 bool current_thread_can_keep_running = (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor);
2594 if (current_thread_can_keep_running) {
2595 /*
2596 * This thread is eligible to keep running on this processor.
2597 *
2598 * RT threads with un-expired quantum stay on processor,
2599 * unless there's a valid RT thread with an earlier deadline
2600 * and it is still ok_to_run_realtime_thread.
2601 */
2602 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
2603 /*
2604 * Pick a new RT thread only if ok_to_run_realtime_thread
2605 * (but the current thread is allowed to complete).
2606 */
2607 if (ok_to_run_realtime_thread) {
2608 if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
2609 goto pick_new_rt_thread;
2610 }
2611 if (rt_runq_priority(pset) > thread->sched_pri) {
2612 if (sched_rt_runq_strict_priority) {
2613 /* The next RT thread is better, so pick it off the runqueue. */
2614 goto pick_new_rt_thread;
2615 }
2616
2617 /*
2618 * See if the current lower priority thread can continue to run without causing
2619 * the higher priority thread on the runq queue to miss its deadline.
2620 */
2621 thread_t hi_thread = rt_runq_first(SCHED(rt_runq)(pset));
2622 if (thread->realtime.computation + hi_thread->realtime.computation + rt_deadline_epsilon >= hi_thread->realtime.constraint) {
2623 /* The next RT thread is better, so pick it off the runqueue. */
2624 goto pick_new_rt_thread;
2625 }
2626 } else if ((rt_runq_count(pset) > 0) && (deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < thread->realtime.deadline)) {
2627 /* The next RT thread is better, so pick it off the runqueue. */
2628 goto pick_new_rt_thread;
2629 }
2630 if (other_psets_have_earlier_rt_threads_pending(pset, thread->realtime.deadline)) {
2631 goto pick_new_rt_thread;
2632 }
2633 }
2634
2635 /* This is still the best RT thread to run. */
2636 processor->deadline = thread->realtime.deadline;
2637
2638 sched_update_pset_load_average(pset, 0);
2639
2640 clear_pending_AST_bits(pset, processor, 1);
2641
2642 next_rt_processor = PROCESSOR_NULL;
2643 next_rt_ipi_type = SCHED_IPI_NONE;
2644
2645 bool pset_unlocked = false;
2646 __kdebug_only next_processor_type_t nptype = none;
2647 if (sched_allow_rt_steal && pset_has_stealable_rt_threads(pset)) {
2648 nptype = spill;
2649 pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, true, &next_rt_processor, &next_rt_ipi_type);
2650 } else if (pset_needs_a_followup_IPI(pset)) {
2651 nptype = followup;
2652 pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, false, &next_rt_processor, &next_rt_ipi_type);
2653 }
2654 if (!pset_unlocked) {
2655 pset_unlock(pset);
2656 }
2657
2658 if (next_rt_processor) {
2659 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
2660 next_rt_processor->cpu_id, next_rt_processor->state, nptype, 2);
2661 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2662 }
2663
2664 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2665 (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 1);
2666 return thread;
2667 }
2668
2669 if ((rt_runq_count(pset) == 0) &&
2670 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
2671 /* This thread is still the highest priority runnable (non-idle) thread */
2672 processor->deadline = RT_DEADLINE_NONE;
2673
2674 sched_update_pset_load_average(pset, 0);
2675
2676 clear_pending_AST_bits(pset, processor, 2);
2677
2678 pset_unlock(pset);
2679
2680 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2681 (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 2);
2682 return thread;
2683 }
2684 } else {
2685 /*
2686 * This processor must context switch.
2687 * If it's due to a rebalance, we should aggressively find this thread a new home.
2688 */
2689 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) {
2690 *reason |= AST_REBALANCE;
2691 }
2692 }
2693
2694 bool secondary_forced_idle = ((processor->processor_secondary != PROCESSOR_NULL) &&
2695 (thread_no_smt(thread) || (thread->sched_pri >= BASEPRI_RTQUEUES)) &&
2696 (processor->processor_secondary->state == PROCESSOR_IDLE));
2697
2698 /* OK, so we're not going to run the current thread. Look at the RT queue. */
2699 if (ok_to_run_realtime_thread) {
2700 pick_new_rt_thread:
2701 new_thread = sched_rt_choose_thread(pset);
2702 if (new_thread != THREAD_NULL) {
2703 processor->deadline = new_thread->realtime.deadline;
2704 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2705
2706 clear_pending_AST_bits(pset, processor, 3);
2707
2708 if (processor->processor_secondary != NULL) {
2709 processor_t sprocessor = processor->processor_secondary;
2710 if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2711 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2712 ast_processor = sprocessor;
2713 }
2714 }
2715 }
2716 }
2717
2718 send_followup_ipi_before_idle:
2719 /* This might not have been cleared if we didn't call sched_rt_choose_thread() */
2720 if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
2721 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 5);
2722 }
2723 __kdebug_only next_processor_type_t nptype = none;
2724 bool pset_unlocked = false;
2725 if (sched_allow_rt_steal && pset_has_stealable_rt_threads(pset)) {
2726 nptype = spill;
2727 pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, true, &next_rt_processor, &next_rt_ipi_type);
2728 } else if (pset_needs_a_followup_IPI(pset)) {
2729 nptype = followup;
2730 pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, false, &next_rt_processor, &next_rt_ipi_type);
2731 }
2732
2733 assert(new_thread || !ast_processor);
2734 if (new_thread || next_rt_processor) {
2735 if (!pset_unlocked) {
2736 pset_unlock(pset);
2737 pset_unlocked = true;
2738 }
2739 if (ast_processor == next_rt_processor) {
2740 ast_processor = PROCESSOR_NULL;
2741 ipi_type = SCHED_IPI_NONE;
2742 }
2743
2744 if (ast_processor) {
2745 sched_ipi_perform(ast_processor, ipi_type);
2746 }
2747
2748 if (next_rt_processor) {
2749 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
2750 next_rt_processor->cpu_id, next_rt_processor->state, nptype, 3);
2751 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2752 }
2753
2754 if (new_thread) {
2755 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2756 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 3);
2757 return new_thread;
2758 }
2759 }
2760
2761 if (pset_unlocked) {
2762 pset_lock(pset);
2763 }
2764
2765 if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2766 /* Things changed while we dropped the lock */
2767 goto restart;
2768 }
2769
2770 if (processor->is_recommended) {
2771 bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
2772 if (sched_ok_to_run_realtime_thread(pset, processor, true) && (spill_pending || rt_runq_count(pset))) {
2773 /* Things changed while we dropped the lock */
2774 goto restart;
2775 }
2776
2777 if ((processor->processor_primary != processor) && (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES)) {
2778 /* secondary can only run realtime thread */
2779 if (idle_reason == 0) {
2780 idle_reason = 4;
2781 }
2782 goto idle;
2783 }
2784 } else if (!SCHED(processor_bound_count)(processor)) {
2785 /* processor not recommended and no bound threads */
2786 if (idle_reason == 0) {
2787 idle_reason = 5;
2788 }
2789 goto idle;
2790 }
2791
2792 processor->deadline = RT_DEADLINE_NONE;
2793
2794 /* No RT threads, so let's look at the regular threads. */
2795 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
2796 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2797
2798 clear_pending_AST_bits(pset, processor, 4);
2799
2800 ast_processor = PROCESSOR_NULL;
2801 ipi_type = SCHED_IPI_NONE;
2802
2803 processor_t sprocessor = processor->processor_secondary;
2804 if (sprocessor != NULL) {
2805 if (sprocessor->state == PROCESSOR_RUNNING) {
2806 if (thread_no_smt(new_thread)) {
2807 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2808 ast_processor = sprocessor;
2809 }
2810 } else if (secondary_forced_idle && !thread_no_smt(new_thread) && pset_has_stealable_threads(pset)) {
2811 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_PREEMPT);
2812 ast_processor = sprocessor;
2813 }
2814 }
2815 pset_unlock(pset);
2816
2817 if (ast_processor) {
2818 sched_ipi_perform(ast_processor, ipi_type);
2819 }
2820 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2821 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 4);
2822 return new_thread;
2823 }
2824
2825 if (processor->must_idle) {
2826 processor->must_idle = false;
2827 *reason |= AST_REBALANCE;
2828 idle_reason = 6;
2829 goto idle;
2830 }
2831
2832 if (SCHED(steal_thread_enabled)(pset) && (processor->processor_primary == processor)) {
2833 /*
2834 * No runnable threads, attempt to steal
2835 * from other processors. Returns with pset lock dropped.
2836 */
2837
2838 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
2839 pset_lock(pset);
2840 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2841 if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2842 /*
2843 * A realtime thread choose this processor while it was DISPATCHING
2844 * and the pset lock was dropped
2845 */
2846 ast_on(AST_URGENT | AST_PREEMPT);
2847 }
2848
2849 clear_pending_AST_bits(pset, processor, 5);
2850
2851 pset_unlock(pset);
2852
2853 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2854 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 5);
2855 return new_thread;
2856 }
2857
2858 /*
2859 * If other threads have appeared, shortcut
2860 * around again.
2861 */
2862 if (SCHED(processor_bound_count)(processor)) {
2863 continue;
2864 }
2865 if (processor->is_recommended) {
2866 if (!SCHED(processor_queue_empty)(processor) || (sched_ok_to_run_realtime_thread(pset, processor, true) && (rt_runq_count(pset) > 0))) {
2867 continue;
2868 }
2869 }
2870
2871 pset_lock(pset);
2872 }
2873
2874 idle:
2875 /* Someone selected this processor while we had dropped the lock */
2876 if ((!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) ||
2877 (!pending_AST_PREEMPT && bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id))) {
2878 goto restart;
2879 }
2880
2881 if ((idle_reason == 0) && current_thread_can_keep_running) {
2882 /* This thread is the only runnable (non-idle) thread */
2883 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2884 processor->deadline = thread->realtime.deadline;
2885 } else {
2886 processor->deadline = RT_DEADLINE_NONE;
2887 }
2888
2889 sched_update_pset_load_average(pset, 0);
2890
2891 clear_pending_AST_bits(pset, processor, 6);
2892
2893 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2894 (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 6);
2895 pset_unlock(pset);
2896 return thread;
2897 }
2898
2899 /*
2900 * Nothing is runnable, or this processor must be forced idle,
2901 * so set this processor idle if it was running.
2902 */
2903 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
2904 pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
2905 processor_state_update_idle(processor);
2906 }
2907 pset_update_rt_stealable_state(pset);
2908
2909 clear_pending_AST_bits(pset, processor, 7);
2910
2911 /* Invoked with pset locked, returns with pset unlocked */
2912 SCHED(processor_balance)(processor, pset);
2913
2914 new_thread = processor->idle_thread;
2915 } while (new_thread == THREAD_NULL);
2916
2917 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2918 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 10 + idle_reason);
2919 return new_thread;
2920 }
2921
2922 /*
2923 * thread_invoke
2924 *
2925 * Called at splsched with neither thread locked.
2926 *
2927 * Perform a context switch and start executing the new thread.
2928 *
2929 * Returns FALSE when the context switch didn't happen.
2930 * The reference to the new thread is still consumed.
2931 *
2932 * "self" is what is currently running on the processor,
2933 * "thread" is the new thread to context switch to
2934 * (which may be the same thread in some cases)
2935 */
2936 static boolean_t
thread_invoke(thread_t self,thread_t thread,ast_t reason)2937 thread_invoke(
2938 thread_t self,
2939 thread_t thread,
2940 ast_t reason)
2941 {
2942 if (__improbable(get_preemption_level() != 0)) {
2943 int pl = get_preemption_level();
2944 panic("thread_invoke: preemption_level %d, possible cause: %s",
2945 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2946 "blocking while holding a spinlock, or within interrupt context"));
2947 }
2948
2949 thread_continue_t continuation = self->continuation;
2950 void *parameter = self->parameter;
2951
2952 struct recount_snap snap = { 0 };
2953 recount_snapshot(&snap);
2954 uint64_t ctime = snap.rsn_time_mach;
2955
2956 check_monotonic_time(ctime);
2957
2958 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2959 commpage_update_mach_approximate_time(ctime);
2960 #endif
2961
2962 if (ctime < thread->last_made_runnable_time) {
2963 panic("Non-monotonic time: invoke at 0x%llx, runnable at 0x%llx",
2964 ctime, thread->last_made_runnable_time);
2965 }
2966
2967 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2968 if (!((thread->state & TH_IDLE) != 0 ||
2969 ((reason & AST_HANDOFF) && self->sched_mode == TH_MODE_REALTIME))) {
2970 sched_timeshare_consider_maintenance(ctime);
2971 }
2972 #endif
2973
2974 recount_log_switch_thread(&snap);
2975
2976 assert_thread_magic(self);
2977 assert(self == current_thread());
2978 assert(self->runq == PROCESSOR_NULL);
2979 assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2980
2981 thread_lock(thread);
2982
2983 assert_thread_magic(thread);
2984 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
2985 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2986 assert(thread->runq == PROCESSOR_NULL);
2987
2988 /* Update SFI class based on other factors */
2989 thread->sfi_class = sfi_thread_classify(thread);
2990
2991 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2992 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2993 /*
2994 * In case a base_pri update happened between the timestamp and
2995 * taking the thread lock
2996 */
2997 if (ctime <= thread->last_basepri_change_time) {
2998 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
2999 }
3000
3001 /* Allow realtime threads to hang onto a stack. */
3002 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
3003 self->reserved_stack = self->kernel_stack;
3004 }
3005
3006 /* Prepare for spin debugging */
3007 #if SCHED_HYGIENE_DEBUG
3008 ml_spin_debug_clear(thread);
3009 #endif
3010
3011 if (continuation != NULL) {
3012 if (!thread->kernel_stack) {
3013 /*
3014 * If we are using a privileged stack,
3015 * check to see whether we can exchange it with
3016 * that of the other thread.
3017 */
3018 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
3019 goto need_stack;
3020 }
3021
3022 /*
3023 * Context switch by performing a stack handoff.
3024 * Requires both threads to be parked in a continuation.
3025 */
3026 continuation = thread->continuation;
3027 parameter = thread->parameter;
3028
3029 processor_t processor = current_processor();
3030 processor->active_thread = thread;
3031 processor_state_update_from_thread(processor, thread, false);
3032
3033 if (thread->last_processor != processor && thread->last_processor != NULL) {
3034 if (thread->last_processor->processor_set != processor->processor_set) {
3035 thread->ps_switch++;
3036 }
3037 thread->p_switch++;
3038 }
3039 thread->last_processor = processor;
3040 thread->c_switch++;
3041 ast_context(thread);
3042
3043 thread_unlock(thread);
3044
3045 self->reason = reason;
3046
3047 processor->last_dispatch = ctime;
3048 self->last_run_time = ctime;
3049 timer_update(&thread->runnable_timer, ctime);
3050 recount_switch_thread(&snap, self, get_threadtask(self));
3051
3052 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3053 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
3054 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3055
3056 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
3057 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3058 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3059 }
3060
3061 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3062
3063 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3064
3065 #if KPERF
3066 kperf_off_cpu(self);
3067 #endif /* KPERF */
3068
3069 /*
3070 * This is where we actually switch thread identity,
3071 * and address space if required. However, register
3072 * state is not switched - this routine leaves the
3073 * stack and register state active on the current CPU.
3074 */
3075 TLOG(1, "thread_invoke: calling stack_handoff\n");
3076 stack_handoff(self, thread);
3077
3078 /* 'self' is now off core */
3079 assert(thread == current_thread_volatile());
3080
3081 DTRACE_SCHED(on__cpu);
3082
3083 #if KPERF
3084 kperf_on_cpu(thread, continuation, NULL);
3085 #endif /* KPERF */
3086
3087 thread_dispatch(self, thread);
3088
3089 #if KASAN
3090 /* Old thread's stack has been moved to the new thread, so explicitly
3091 * unpoison it. */
3092 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3093 #endif
3094
3095 thread->continuation = thread->parameter = NULL;
3096
3097 boolean_t enable_interrupts = TRUE;
3098
3099 /* idle thread needs to stay interrupts-disabled */
3100 if ((thread->state & TH_IDLE)) {
3101 enable_interrupts = FALSE;
3102 }
3103
3104 assert(continuation);
3105 call_continuation(continuation, parameter,
3106 thread->wait_result, enable_interrupts);
3107 /*NOTREACHED*/
3108 } else if (thread == self) {
3109 /* same thread but with continuation */
3110 ast_context(self);
3111
3112 thread_unlock(self);
3113
3114 #if KPERF
3115 kperf_on_cpu(thread, continuation, NULL);
3116 #endif /* KPERF */
3117
3118 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3119 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3120 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3121
3122 #if KASAN
3123 /* stack handoff to self - no thread_dispatch(), so clear the stack
3124 * and free the fakestack directly */
3125 #if KASAN_CLASSIC
3126 kasan_fakestack_drop(self);
3127 kasan_fakestack_gc(self);
3128 #endif /* KASAN_CLASSIC */
3129 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
3130 #endif /* KASAN */
3131
3132 self->continuation = self->parameter = NULL;
3133
3134 boolean_t enable_interrupts = TRUE;
3135
3136 /* idle thread needs to stay interrupts-disabled */
3137 if ((self->state & TH_IDLE)) {
3138 enable_interrupts = FALSE;
3139 }
3140
3141 call_continuation(continuation, parameter,
3142 self->wait_result, enable_interrupts);
3143 /*NOTREACHED*/
3144 }
3145 } else {
3146 /*
3147 * Check that the other thread has a stack
3148 */
3149 if (!thread->kernel_stack) {
3150 need_stack:
3151 if (!stack_alloc_try(thread)) {
3152 thread_unlock(thread);
3153 thread_stack_enqueue(thread);
3154 return FALSE;
3155 }
3156 } else if (thread == self) {
3157 ast_context(self);
3158 thread_unlock(self);
3159
3160 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3161 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3162 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3163
3164 return TRUE;
3165 }
3166 }
3167
3168 /*
3169 * Context switch by full context save.
3170 */
3171 processor_t processor = current_processor();
3172 processor->active_thread = thread;
3173 processor_state_update_from_thread(processor, thread, false);
3174
3175 if (thread->last_processor != processor && thread->last_processor != NULL) {
3176 if (thread->last_processor->processor_set != processor->processor_set) {
3177 thread->ps_switch++;
3178 }
3179 thread->p_switch++;
3180 }
3181 thread->last_processor = processor;
3182 thread->c_switch++;
3183 ast_context(thread);
3184
3185 thread_unlock(thread);
3186
3187 self->reason = reason;
3188
3189 processor->last_dispatch = ctime;
3190 self->last_run_time = ctime;
3191 timer_update(&thread->runnable_timer, ctime);
3192 recount_switch_thread(&snap, self, get_threadtask(self));
3193
3194 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3195 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3196 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3197
3198 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
3199 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3200 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3201 }
3202
3203 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3204
3205 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3206
3207 #if KPERF
3208 kperf_off_cpu(self);
3209 #endif /* KPERF */
3210
3211 /*
3212 * This is where we actually switch register context,
3213 * and address space if required. We will next run
3214 * as a result of a subsequent context switch.
3215 *
3216 * Once registers are switched and the processor is running "thread",
3217 * the stack variables and non-volatile registers will contain whatever
3218 * was there the last time that thread blocked. No local variables should
3219 * be used after this point, except for the special case of "thread", which
3220 * the platform layer returns as the previous thread running on the processor
3221 * via the function call ABI as a return register, and "self", which may have
3222 * been stored on the stack or a non-volatile register, but a stale idea of
3223 * what was on the CPU is newly-accurate because that thread is again
3224 * running on the CPU.
3225 *
3226 * If one of the threads is using a continuation, thread_continue
3227 * is used to stitch up its context.
3228 *
3229 * If we are invoking a thread which is resuming from a continuation,
3230 * the CPU will invoke thread_continue next.
3231 *
3232 * If the current thread is parking in a continuation, then its state
3233 * won't be saved and the stack will be discarded. When the stack is
3234 * re-allocated, it will be configured to resume from thread_continue.
3235 */
3236
3237 assert(continuation == self->continuation);
3238 thread = machine_switch_context(self, continuation, thread);
3239 assert(self == current_thread_volatile());
3240 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
3241
3242 assert(continuation == NULL && self->continuation == NULL);
3243
3244 DTRACE_SCHED(on__cpu);
3245
3246 #if KPERF
3247 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
3248 #endif /* KPERF */
3249
3250 /* We have been resumed and are set to run. */
3251 thread_dispatch(thread, self);
3252
3253 return TRUE;
3254 }
3255
3256 #if defined(CONFIG_SCHED_DEFERRED_AST)
3257 /*
3258 * pset_cancel_deferred_dispatch:
3259 *
3260 * Cancels all ASTs that we can cancel for the given processor set
3261 * if the current processor is running the last runnable thread in the
3262 * system.
3263 *
3264 * This function assumes the current thread is runnable. This must
3265 * be called with the pset unlocked.
3266 */
3267 static void
pset_cancel_deferred_dispatch(processor_set_t pset,processor_t processor)3268 pset_cancel_deferred_dispatch(
3269 processor_set_t pset,
3270 processor_t processor)
3271 {
3272 processor_t active_processor = NULL;
3273 uint32_t sampled_sched_run_count;
3274
3275 pset_lock(pset);
3276 sampled_sched_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
3277
3278 /*
3279 * If we have emptied the run queue, and our current thread is runnable, we
3280 * should tell any processors that are still DISPATCHING that they will
3281 * probably not have any work to do. In the event that there are no
3282 * pending signals that we can cancel, this is also uninteresting.
3283 *
3284 * In the unlikely event that another thread becomes runnable while we are
3285 * doing this (sched_run_count is atomically updated, not guarded), the
3286 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
3287 * in order to dispatch it to a processor in our pset. So, the other
3288 * codepath will wait while we squash all cancelable ASTs, get the pset
3289 * lock, and then dispatch the freshly runnable thread. So this should be
3290 * correct (we won't accidentally have a runnable thread that hasn't been
3291 * dispatched to an idle processor), if not ideal (we may be restarting the
3292 * dispatch process, which could have some overhead).
3293 */
3294
3295 if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
3296 uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
3297 pset->pending_deferred_AST_cpu_mask &
3298 ~pset->pending_AST_URGENT_cpu_mask);
3299 for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
3300 active_processor = processor_array[cpuid];
3301 /*
3302 * If a processor is DISPATCHING, it could be because of
3303 * a cancelable signal.
3304 *
3305 * IF the processor is not our
3306 * current processor (the current processor should not
3307 * be DISPATCHING, so this is a bit paranoid), AND there
3308 * is a cancelable signal pending on the processor, AND
3309 * there is no non-cancelable signal pending (as there is
3310 * no point trying to backtrack on bringing the processor
3311 * up if a signal we cannot cancel is outstanding), THEN
3312 * it should make sense to roll back the processor state
3313 * to the IDLE state.
3314 *
3315 * If the racey nature of this approach (as the signal
3316 * will be arbitrated by hardware, and can fire as we
3317 * roll back state) results in the core responding
3318 * despite being pushed back to the IDLE state, it
3319 * should be no different than if the core took some
3320 * interrupt while IDLE.
3321 */
3322 if (active_processor != processor) {
3323 /*
3324 * Squash all of the processor state back to some
3325 * reasonable facsimile of PROCESSOR_IDLE.
3326 */
3327
3328 processor_state_update_idle(active_processor);
3329 active_processor->deadline = RT_DEADLINE_NONE;
3330 pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
3331 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
3332 machine_signal_idle_cancel(active_processor);
3333 }
3334 }
3335 }
3336
3337 pset_unlock(pset);
3338 }
3339 #else
3340 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
3341 #endif
3342
3343 static void
thread_csw_callout(thread_t old,thread_t new,uint64_t timestamp)3344 thread_csw_callout(
3345 thread_t old,
3346 thread_t new,
3347 uint64_t timestamp)
3348 {
3349 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
3350 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
3351 machine_switch_perfcontrol_context(event, timestamp, 0,
3352 same_pri_latency, old, new);
3353 }
3354
3355
3356 /*
3357 * thread_dispatch:
3358 *
3359 * Handle threads at context switch. Re-dispatch other thread
3360 * if still running, otherwise update run state and perform
3361 * special actions. Update quantum for other thread and begin
3362 * the quantum for ourselves.
3363 *
3364 * "thread" is the old thread that we have switched away from.
3365 * "self" is the new current thread that we have context switched to
3366 *
3367 * Called at splsched.
3368 *
3369 */
3370 void
thread_dispatch(thread_t thread,thread_t self)3371 thread_dispatch(
3372 thread_t thread,
3373 thread_t self)
3374 {
3375 processor_t processor = self->last_processor;
3376 bool was_idle = false;
3377
3378 assert(processor == current_processor());
3379 assert(self == current_thread_volatile());
3380 assert(thread != self);
3381
3382 if (thread != THREAD_NULL) {
3383 /*
3384 * Do the perfcontrol callout for context switch.
3385 * The reason we do this here is:
3386 * - thread_dispatch() is called from various places that are not
3387 * the direct context switch path for eg. processor shutdown etc.
3388 * So adding the callout here covers all those cases.
3389 * - We want this callout as early as possible to be close
3390 * to the timestamp taken in thread_invoke()
3391 * - We want to avoid holding the thread lock while doing the
3392 * callout
3393 * - We do not want to callout if "thread" is NULL.
3394 */
3395 thread_csw_callout(thread, self, processor->last_dispatch);
3396
3397 #if KASAN
3398 if (thread->continuation != NULL) {
3399 /*
3400 * Thread has a continuation and the normal stack is going away.
3401 * Unpoison the stack and mark all fakestack objects as unused.
3402 */
3403 #if KASAN_CLASSIC
3404 kasan_fakestack_drop(thread);
3405 #endif /* KASAN_CLASSIC */
3406 if (thread->kernel_stack) {
3407 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3408 }
3409 }
3410
3411
3412 #if KASAN_CLASSIC
3413 /*
3414 * Free all unused fakestack objects.
3415 */
3416 kasan_fakestack_gc(thread);
3417 #endif /* KASAN_CLASSIC */
3418 #endif /* KASAN */
3419
3420 /*
3421 * If blocked at a continuation, discard
3422 * the stack.
3423 */
3424 if (thread->continuation != NULL && thread->kernel_stack != 0) {
3425 stack_free(thread);
3426 }
3427
3428 if (thread->state & TH_IDLE) {
3429 was_idle = true;
3430 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3431 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3432 (uintptr_t)thread_tid(thread), 0, thread->state,
3433 sched_run_buckets[TH_BUCKET_RUN], 0);
3434 } else {
3435 int64_t consumed;
3436 int64_t remainder = 0;
3437
3438 if (processor->quantum_end > processor->last_dispatch) {
3439 remainder = processor->quantum_end -
3440 processor->last_dispatch;
3441 }
3442
3443 consumed = thread->quantum_remaining - remainder;
3444
3445 if ((thread->reason & AST_LEDGER) == 0) {
3446 /*
3447 * Bill CPU time to both the task and
3448 * the individual thread.
3449 */
3450 ledger_credit_thread(thread, thread->t_ledger,
3451 task_ledgers.cpu_time, consumed);
3452 ledger_credit_thread(thread, thread->t_threadledger,
3453 thread_ledgers.cpu_time, consumed);
3454 if (thread->t_bankledger) {
3455 ledger_credit_thread(thread, thread->t_bankledger,
3456 bank_ledgers.cpu_time,
3457 (consumed - thread->t_deduct_bank_ledger_time));
3458 }
3459 thread->t_deduct_bank_ledger_time = 0;
3460 if (consumed > 0) {
3461 /*
3462 * This should never be negative, but in traces we are seeing some instances
3463 * of consumed being negative.
3464 * <rdar://problem/57782596> thread_dispatch() thread CPU consumed calculation sometimes results in negative value
3465 */
3466 sched_update_pset_avg_execution_time(current_processor()->processor_set, consumed, processor->last_dispatch, thread->th_sched_bucket);
3467 }
3468 }
3469
3470 /* For the thread that we just context switched away from, figure
3471 * out if we have expired the wq quantum and set the AST if we have
3472 */
3473 if (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) {
3474 thread_evaluate_workqueue_quantum_expiry(thread);
3475 }
3476
3477 /*
3478 * Pairs with task_restartable_ranges_synchronize
3479 */
3480 wake_lock(thread);
3481 thread_lock(thread);
3482
3483 /*
3484 * Same as ast_check(), in case we missed the IPI
3485 */
3486 thread_reset_pcs_ack_IPI(thread);
3487
3488 /*
3489 * Apply a priority floor if the thread holds a kernel resource
3490 * or explicitly requested it.
3491 * Do this before checking starting_pri to avoid overpenalizing
3492 * repeated rwlock blockers.
3493 */
3494 if (__improbable(thread->rwlock_count != 0)) {
3495 lck_rw_set_promotion_locked(thread);
3496 }
3497 if (__improbable(thread->priority_floor_count != 0)) {
3498 thread_floor_boost_set_promotion_locked(thread);
3499 }
3500
3501 boolean_t keep_quantum = processor->first_timeslice;
3502
3503 /*
3504 * Treat a thread which has dropped priority since it got on core
3505 * as having expired its quantum.
3506 */
3507 if (processor->starting_pri > thread->sched_pri) {
3508 keep_quantum = FALSE;
3509 }
3510
3511 /* Compute remainder of current quantum. */
3512 if (keep_quantum &&
3513 processor->quantum_end > processor->last_dispatch) {
3514 thread->quantum_remaining = (uint32_t)remainder;
3515 } else {
3516 thread->quantum_remaining = 0;
3517 }
3518
3519 if (thread->sched_mode == TH_MODE_REALTIME) {
3520 /*
3521 * Cancel the deadline if the thread has
3522 * consumed the entire quantum.
3523 */
3524 if (thread->quantum_remaining == 0) {
3525 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CANCEL_RT_DEADLINE) | DBG_FUNC_NONE,
3526 (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
3527 thread->realtime.deadline = RT_DEADLINE_QUANTUM_EXPIRED;
3528 }
3529 } else {
3530 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3531 /*
3532 * For non-realtime threads treat a tiny
3533 * remaining quantum as an expired quantum
3534 * but include what's left next time.
3535 */
3536 if (thread->quantum_remaining < min_std_quantum) {
3537 thread->reason |= AST_QUANTUM;
3538 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
3539 }
3540 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3541 }
3542
3543 /*
3544 * If we are doing a direct handoff then
3545 * take the remainder of the quantum.
3546 */
3547 if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
3548 self->quantum_remaining = thread->quantum_remaining;
3549 thread->reason |= AST_QUANTUM;
3550 thread->quantum_remaining = 0;
3551 } else {
3552 #if defined(CONFIG_SCHED_MULTIQ)
3553 if (SCHED(sched_groups_enabled) &&
3554 thread->sched_group == self->sched_group) {
3555 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3556 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
3557 self->reason, (uintptr_t)thread_tid(thread),
3558 self->quantum_remaining, thread->quantum_remaining, 0);
3559
3560 self->quantum_remaining = thread->quantum_remaining;
3561 thread->quantum_remaining = 0;
3562 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
3563 }
3564 #endif /* defined(CONFIG_SCHED_MULTIQ) */
3565 }
3566
3567 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
3568
3569 if (!(thread->state & TH_WAIT)) {
3570 /*
3571 * Still runnable.
3572 */
3573 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
3574
3575 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
3576
3577 ast_t reason = thread->reason;
3578 sched_options_t options = SCHED_NONE;
3579
3580 if (reason & AST_REBALANCE) {
3581 options |= SCHED_REBALANCE;
3582 if (reason & AST_QUANTUM) {
3583 /*
3584 * Having gone to the trouble of forcing this thread off a less preferred core,
3585 * we should force the preferable core to reschedule immediately to give this
3586 * thread a chance to run instead of just sitting on the run queue where
3587 * it may just be stolen back by the idle core we just forced it off.
3588 * But only do this at the end of a quantum to prevent cascading effects.
3589 */
3590 options |= SCHED_PREEMPT;
3591 }
3592 }
3593
3594 if (reason & AST_QUANTUM) {
3595 options |= SCHED_TAILQ;
3596 } else if (reason & AST_PREEMPT) {
3597 options |= SCHED_HEADQ;
3598 } else {
3599 options |= (SCHED_PREEMPT | SCHED_TAILQ);
3600 }
3601
3602 thread_setrun(thread, options);
3603
3604 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3605 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3606 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
3607 sched_run_buckets[TH_BUCKET_RUN], 0);
3608
3609 if (thread->wake_active) {
3610 thread->wake_active = FALSE;
3611 thread_unlock(thread);
3612
3613 thread_wakeup(&thread->wake_active);
3614 } else {
3615 thread_unlock(thread);
3616 }
3617
3618 wake_unlock(thread);
3619 } else {
3620 /*
3621 * Waiting.
3622 */
3623 boolean_t should_terminate = FALSE;
3624 uint32_t new_run_count;
3625 int thread_state = thread->state;
3626
3627 /* Only the first call to thread_dispatch
3628 * after explicit termination should add
3629 * the thread to the termination queue
3630 */
3631 if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
3632 should_terminate = TRUE;
3633 thread_state |= TH_TERMINATE2;
3634 }
3635
3636 timer_stop(&thread->runnable_timer, processor->last_dispatch);
3637
3638 thread_state &= ~TH_RUN;
3639 thread->state = thread_state;
3640
3641 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
3642 thread->chosen_processor = PROCESSOR_NULL;
3643
3644 new_run_count = SCHED(run_count_decr)(thread);
3645
3646 #if CONFIG_SCHED_AUTO_JOIN
3647 if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0) {
3648 work_interval_auto_join_unwind(thread);
3649 }
3650 #endif /* CONFIG_SCHED_AUTO_JOIN */
3651
3652 #if CONFIG_SCHED_SFI
3653 if (thread->reason & AST_SFI) {
3654 thread->wait_sfi_begin_time = processor->last_dispatch;
3655 }
3656 #endif
3657 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
3658
3659 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3660 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3661 (uintptr_t)thread_tid(thread), thread->reason, thread_state,
3662 new_run_count, 0);
3663
3664 if (thread_state & TH_WAIT_REPORT) {
3665 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
3666 }
3667
3668 if (thread->wake_active) {
3669 thread->wake_active = FALSE;
3670 thread_unlock(thread);
3671
3672 thread_wakeup(&thread->wake_active);
3673 } else {
3674 thread_unlock(thread);
3675 }
3676
3677 wake_unlock(thread);
3678
3679 if (should_terminate) {
3680 thread_terminate_enqueue(thread);
3681 }
3682 }
3683 }
3684 /*
3685 * The thread could have been added to the termination queue, so it's
3686 * unsafe to use after this point.
3687 */
3688 thread = THREAD_NULL;
3689 }
3690
3691 int urgency = THREAD_URGENCY_NONE;
3692 uint64_t latency = 0;
3693
3694 /* Update (new) current thread and reprogram running timers */
3695 thread_lock(self);
3696
3697 if (!(self->state & TH_IDLE)) {
3698 uint64_t arg1, arg2;
3699
3700 #if CONFIG_SCHED_SFI
3701 ast_t new_ast;
3702
3703 new_ast = sfi_thread_needs_ast(self, NULL);
3704
3705 if (new_ast != AST_NONE) {
3706 ast_on(new_ast);
3707 }
3708 #endif
3709
3710 if (processor->last_dispatch < self->last_made_runnable_time) {
3711 panic("Non-monotonic time: dispatch at 0x%llx, runnable at 0x%llx",
3712 processor->last_dispatch, self->last_made_runnable_time);
3713 }
3714
3715 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
3716
3717 latency = processor->last_dispatch - self->last_made_runnable_time;
3718 assert(latency >= self->same_pri_latency);
3719
3720 urgency = thread_get_urgency(self, &arg1, &arg2);
3721
3722 thread_tell_urgency(urgency, arg1, arg2, latency, self);
3723
3724 /*
3725 * Start a new CPU limit interval if the previous one has
3726 * expired. This should happen before initializing a new
3727 * quantum.
3728 */
3729 if (cpulimit_affects_quantum &&
3730 thread_cpulimit_interval_has_expired(processor->last_dispatch)) {
3731 thread_cpulimit_restart(processor->last_dispatch);
3732 }
3733
3734 /*
3735 * Get a new quantum if none remaining.
3736 */
3737 if (self->quantum_remaining == 0) {
3738 thread_quantum_init(self, processor->last_dispatch);
3739 }
3740
3741 /*
3742 * Set up quantum timer and timeslice.
3743 */
3744 processor->quantum_end = processor->last_dispatch +
3745 self->quantum_remaining;
3746
3747 running_timer_setup(processor, RUNNING_TIMER_QUANTUM, self,
3748 processor->quantum_end, processor->last_dispatch);
3749 if (was_idle) {
3750 /*
3751 * kperf's running timer is active whenever the idle thread for a
3752 * CPU is not running.
3753 */
3754 kperf_running_setup(processor, processor->last_dispatch);
3755 }
3756 running_timers_activate(processor);
3757 processor->first_timeslice = TRUE;
3758 } else {
3759 running_timers_deactivate(processor);
3760 processor->first_timeslice = FALSE;
3761 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
3762 }
3763
3764 assert(self->block_hint == kThreadWaitNone);
3765 self->computation_epoch = processor->last_dispatch;
3766 self->reason = AST_NONE;
3767 processor->starting_pri = self->sched_pri;
3768
3769 thread_unlock(self);
3770
3771 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
3772 processor->last_dispatch);
3773
3774 #if defined(CONFIG_SCHED_DEFERRED_AST)
3775 /*
3776 * TODO: Can we state that redispatching our old thread is also
3777 * uninteresting?
3778 */
3779 if ((os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) == 1) && !(self->state & TH_IDLE)) {
3780 pset_cancel_deferred_dispatch(processor->processor_set, processor);
3781 }
3782 #endif
3783 }
3784
3785 /*
3786 * thread_block_reason:
3787 *
3788 * Forces a reschedule, blocking the caller if a wait
3789 * has been asserted.
3790 *
3791 * If a continuation is specified, then thread_invoke will
3792 * attempt to discard the thread's kernel stack. When the
3793 * thread resumes, it will execute the continuation function
3794 * on a new kernel stack.
3795 */
3796 wait_result_t
thread_block_reason(thread_continue_t continuation,void * parameter,ast_t reason)3797 thread_block_reason(
3798 thread_continue_t continuation,
3799 void *parameter,
3800 ast_t reason)
3801 {
3802 thread_t self = current_thread();
3803 processor_t processor;
3804 thread_t new_thread;
3805 spl_t s;
3806
3807 s = splsched();
3808
3809 processor = current_processor();
3810
3811 /* If we're explicitly yielding, force a subsequent quantum */
3812 if (reason & AST_YIELD) {
3813 processor->first_timeslice = FALSE;
3814 }
3815
3816 /* We're handling all scheduling AST's */
3817 ast_off(AST_SCHEDULING);
3818
3819 #if PROC_REF_DEBUG
3820 if ((continuation != NULL) && (get_threadtask(self) != kernel_task)) {
3821 uthread_assert_zero_proc_refcount(get_bsdthread_info(self));
3822 }
3823 #endif
3824
3825 self->continuation = continuation;
3826 self->parameter = parameter;
3827
3828 if (self->state & ~(TH_RUN | TH_IDLE)) {
3829 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3830 MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
3831 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
3832 }
3833
3834 do {
3835 thread_lock(self);
3836 new_thread = thread_select(self, processor, &reason);
3837 thread_unlock(self);
3838 } while (!thread_invoke(self, new_thread, reason));
3839
3840 splx(s);
3841
3842 return self->wait_result;
3843 }
3844
3845 /*
3846 * thread_block:
3847 *
3848 * Block the current thread if a wait has been asserted.
3849 */
3850 wait_result_t
thread_block(thread_continue_t continuation)3851 thread_block(
3852 thread_continue_t continuation)
3853 {
3854 return thread_block_reason(continuation, NULL, AST_NONE);
3855 }
3856
3857 wait_result_t
thread_block_parameter(thread_continue_t continuation,void * parameter)3858 thread_block_parameter(
3859 thread_continue_t continuation,
3860 void *parameter)
3861 {
3862 return thread_block_reason(continuation, parameter, AST_NONE);
3863 }
3864
3865 /*
3866 * thread_run:
3867 *
3868 * Switch directly from the current thread to the
3869 * new thread, handing off our quantum if appropriate.
3870 *
3871 * New thread must be runnable, and not on a run queue.
3872 *
3873 * Called at splsched.
3874 */
3875 int
thread_run(thread_t self,thread_continue_t continuation,void * parameter,thread_t new_thread)3876 thread_run(
3877 thread_t self,
3878 thread_continue_t continuation,
3879 void *parameter,
3880 thread_t new_thread)
3881 {
3882 ast_t reason = AST_NONE;
3883
3884 if ((self->state & TH_IDLE) == 0) {
3885 reason = AST_HANDOFF;
3886 }
3887
3888 /*
3889 * If this thread hadn't been setrun'ed, it
3890 * might not have a chosen processor, so give it one
3891 */
3892 if (new_thread->chosen_processor == NULL) {
3893 new_thread->chosen_processor = current_processor();
3894 }
3895
3896 self->continuation = continuation;
3897 self->parameter = parameter;
3898
3899 while (!thread_invoke(self, new_thread, reason)) {
3900 /* the handoff failed, so we have to fall back to the normal block path */
3901 processor_t processor = current_processor();
3902
3903 reason = AST_NONE;
3904
3905 thread_lock(self);
3906 new_thread = thread_select(self, processor, &reason);
3907 thread_unlock(self);
3908 }
3909
3910 return self->wait_result;
3911 }
3912
3913 /*
3914 * thread_continue:
3915 *
3916 * Called at splsched when a thread first receives
3917 * a new stack after a continuation.
3918 *
3919 * Called with THREAD_NULL as the old thread when
3920 * invoked by machine_load_context.
3921 */
3922 void
thread_continue(thread_t thread)3923 thread_continue(
3924 thread_t thread)
3925 {
3926 thread_t self = current_thread();
3927 thread_continue_t continuation;
3928 void *parameter;
3929
3930 DTRACE_SCHED(on__cpu);
3931
3932 continuation = self->continuation;
3933 parameter = self->parameter;
3934
3935 assert(continuation != NULL);
3936
3937 #if KPERF
3938 kperf_on_cpu(self, continuation, NULL);
3939 #endif
3940
3941 thread_dispatch(thread, self);
3942
3943 self->continuation = self->parameter = NULL;
3944
3945 #if SCHED_HYGIENE_DEBUG
3946 /* Reset interrupt-masked spin debugging timeout */
3947 ml_spin_debug_clear(self);
3948 #endif
3949
3950 TLOG(1, "thread_continue: calling call_continuation\n");
3951
3952 boolean_t enable_interrupts = TRUE;
3953
3954 /* bootstrap thread, idle thread need to stay interrupts-disabled */
3955 if (thread == THREAD_NULL || (self->state & TH_IDLE)) {
3956 enable_interrupts = FALSE;
3957 }
3958
3959 #if KASAN_TBI
3960 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
3961 #endif /* KASAN_TBI */
3962
3963
3964 call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
3965 /*NOTREACHED*/
3966 }
3967
3968 void
thread_quantum_init(thread_t thread,uint64_t now)3969 thread_quantum_init(thread_t thread, uint64_t now)
3970 {
3971 uint64_t new_quantum = 0;
3972
3973 switch (thread->sched_mode) {
3974 case TH_MODE_REALTIME:
3975 new_quantum = thread->realtime.computation;
3976 new_quantum = MIN(new_quantum, max_unsafe_rt_computation);
3977 break;
3978
3979 case TH_MODE_FIXED:
3980 new_quantum = SCHED(initial_quantum_size)(thread);
3981 new_quantum = MIN(new_quantum, max_unsafe_fixed_computation);
3982 break;
3983
3984 default:
3985 new_quantum = SCHED(initial_quantum_size)(thread);
3986 break;
3987 }
3988
3989 if (cpulimit_affects_quantum) {
3990 const uint64_t cpulimit_remaining = thread_cpulimit_remaining(now);
3991
3992 /*
3993 * If there's no remaining CPU time, the ledger system will
3994 * notice and put the thread to sleep.
3995 */
3996 if (cpulimit_remaining > 0) {
3997 new_quantum = MIN(new_quantum, cpulimit_remaining);
3998 }
3999 }
4000
4001 assert3u(new_quantum, <, UINT32_MAX);
4002 assert3u(new_quantum, >, 0);
4003
4004 thread->quantum_remaining = (uint32_t)new_quantum;
4005 }
4006
4007 uint32_t
sched_timeshare_initial_quantum_size(thread_t thread)4008 sched_timeshare_initial_quantum_size(thread_t thread)
4009 {
4010 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
4011 return bg_quantum;
4012 } else {
4013 return std_quantum;
4014 }
4015 }
4016
4017 /*
4018 * run_queue_init:
4019 *
4020 * Initialize a run queue before first use.
4021 */
4022 void
run_queue_init(run_queue_t rq)4023 run_queue_init(
4024 run_queue_t rq)
4025 {
4026 rq->highq = NOPRI;
4027 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
4028 rq->bitmap[i] = 0;
4029 }
4030 rq->urgency = rq->count = 0;
4031 for (int i = 0; i < NRQS; i++) {
4032 circle_queue_init(&rq->queues[i]);
4033 }
4034 }
4035
4036 /*
4037 * run_queue_dequeue:
4038 *
4039 * Perform a dequeue operation on a run queue,
4040 * and return the resulting thread.
4041 *
4042 * The run queue must be locked (see thread_run_queue_remove()
4043 * for more info), and not empty.
4044 */
4045 thread_t
run_queue_dequeue(run_queue_t rq,sched_options_t options)4046 run_queue_dequeue(
4047 run_queue_t rq,
4048 sched_options_t options)
4049 {
4050 thread_t thread;
4051 circle_queue_t queue = &rq->queues[rq->highq];
4052
4053 if (options & SCHED_HEADQ) {
4054 thread = cqe_dequeue_head(queue, struct thread, runq_links);
4055 } else {
4056 thread = cqe_dequeue_tail(queue, struct thread, runq_links);
4057 }
4058
4059 assert(thread != THREAD_NULL);
4060 assert_thread_magic(thread);
4061
4062 thread->runq = PROCESSOR_NULL;
4063 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4064 rq->count--;
4065 if (SCHED(priority_is_urgent)(rq->highq)) {
4066 rq->urgency--; assert(rq->urgency >= 0);
4067 }
4068 if (circle_queue_empty(queue)) {
4069 bitmap_clear(rq->bitmap, rq->highq);
4070 rq->highq = bitmap_first(rq->bitmap, NRQS);
4071 }
4072
4073 return thread;
4074 }
4075
4076 /*
4077 * run_queue_enqueue:
4078 *
4079 * Perform a enqueue operation on a run queue.
4080 *
4081 * The run queue must be locked (see thread_run_queue_remove()
4082 * for more info).
4083 */
4084 boolean_t
run_queue_enqueue(run_queue_t rq,thread_t thread,sched_options_t options)4085 run_queue_enqueue(
4086 run_queue_t rq,
4087 thread_t thread,
4088 sched_options_t options)
4089 {
4090 circle_queue_t queue = &rq->queues[thread->sched_pri];
4091 boolean_t result = FALSE;
4092
4093 assert_thread_magic(thread);
4094
4095 if (circle_queue_empty(queue)) {
4096 circle_enqueue_tail(queue, &thread->runq_links);
4097
4098 rq_bitmap_set(rq->bitmap, thread->sched_pri);
4099 if (thread->sched_pri > rq->highq) {
4100 rq->highq = thread->sched_pri;
4101 result = TRUE;
4102 }
4103 } else {
4104 if (options & SCHED_TAILQ) {
4105 circle_enqueue_tail(queue, &thread->runq_links);
4106 } else {
4107 circle_enqueue_head(queue, &thread->runq_links);
4108 }
4109 }
4110 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4111 rq->urgency++;
4112 }
4113 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4114 rq->count++;
4115
4116 return result;
4117 }
4118
4119 /*
4120 * run_queue_remove:
4121 *
4122 * Remove a specific thread from a runqueue.
4123 *
4124 * The run queue must be locked.
4125 */
4126 void
run_queue_remove(run_queue_t rq,thread_t thread)4127 run_queue_remove(
4128 run_queue_t rq,
4129 thread_t thread)
4130 {
4131 circle_queue_t queue = &rq->queues[thread->sched_pri];
4132
4133 assert(thread->runq != PROCESSOR_NULL);
4134 assert_thread_magic(thread);
4135
4136 circle_dequeue(queue, &thread->runq_links);
4137 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4138 rq->count--;
4139 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4140 rq->urgency--; assert(rq->urgency >= 0);
4141 }
4142
4143 if (circle_queue_empty(queue)) {
4144 /* update run queue status */
4145 bitmap_clear(rq->bitmap, thread->sched_pri);
4146 rq->highq = bitmap_first(rq->bitmap, NRQS);
4147 }
4148
4149 thread->runq = PROCESSOR_NULL;
4150 }
4151
4152 /*
4153 * run_queue_peek
4154 *
4155 * Peek at the runq and return the highest
4156 * priority thread from the runq.
4157 *
4158 * The run queue must be locked.
4159 */
4160 thread_t
run_queue_peek(run_queue_t rq)4161 run_queue_peek(
4162 run_queue_t rq)
4163 {
4164 if (rq->count > 0) {
4165 circle_queue_t queue = &rq->queues[rq->highq];
4166 thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
4167 assert_thread_magic(thread);
4168 return thread;
4169 } else {
4170 return THREAD_NULL;
4171 }
4172 }
4173
4174 static bool
rt_runq_enqueue(rt_queue_t rt_run_queue,thread_t thread,processor_t processor)4175 rt_runq_enqueue(rt_queue_t rt_run_queue, thread_t thread, processor_t processor)
4176 {
4177 int pri = thread->sched_pri;
4178 assert((pri >= BASEPRI_RTQUEUES) && (pri <= MAXPRI));
4179 int i = pri - BASEPRI_RTQUEUES;
4180 rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4181 bitmap_t *map = rt_run_queue->bitmap;
4182
4183 bitmap_set(map, i);
4184
4185 queue_t queue = &rt_runq->pri_queue;
4186 uint64_t deadline = thread->realtime.deadline;
4187 bool preempt = false;
4188 bool earliest = false;
4189
4190 if (queue_empty(queue)) {
4191 enqueue_tail(queue, &thread->runq_links);
4192 preempt = true;
4193 earliest = true;
4194 rt_runq->pri_earliest_deadline = deadline;
4195 rt_runq->pri_constraint = thread->realtime.constraint;
4196 } else {
4197 /* Insert into rt_runq in thread deadline order */
4198 queue_entry_t iter;
4199 qe_foreach(iter, queue) {
4200 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
4201 assert_thread_magic(iter_thread);
4202
4203 if (deadline < iter_thread->realtime.deadline) {
4204 if (iter == queue_first(queue)) {
4205 preempt = true;
4206 earliest = true;
4207 rt_runq->pri_earliest_deadline = deadline;
4208 rt_runq->pri_constraint = thread->realtime.constraint;
4209 }
4210 insque(&thread->runq_links, queue_prev(iter));
4211 break;
4212 } else if (iter == queue_last(queue)) {
4213 enqueue_tail(queue, &thread->runq_links);
4214 break;
4215 }
4216 }
4217 }
4218 if (earliest && (deadline < os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed))) {
4219 os_atomic_store_wide(&rt_run_queue->earliest_deadline, deadline, relaxed);
4220 os_atomic_store(&rt_run_queue->constraint, thread->realtime.constraint, relaxed);
4221 os_atomic_store(&rt_run_queue->ed_index, pri - BASEPRI_RTQUEUES, relaxed);
4222 }
4223
4224 SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4225 rt_runq->pri_count++;
4226 os_atomic_inc(&rt_run_queue->count, relaxed);
4227
4228 thread->runq = processor;
4229
4230 CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, thread);
4231
4232 return preempt;
4233 }
4234
4235 static thread_t
rt_runq_dequeue(rt_queue_t rt_run_queue)4236 rt_runq_dequeue(rt_queue_t rt_run_queue)
4237 {
4238 bitmap_t *map = rt_run_queue->bitmap;
4239 int i = bitmap_first(map, NRTQS);
4240 assert((i >= 0) && (i < NRTQS));
4241
4242 rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4243
4244 if (!sched_rt_runq_strict_priority) {
4245 int ed_index = os_atomic_load(&rt_run_queue->ed_index, relaxed);
4246 if (ed_index != i) {
4247 assert((ed_index >= 0) && (ed_index < NRTQS));
4248 rt_queue_pri_t *ed_runq = &rt_run_queue->rt_queue_pri[ed_index];
4249
4250 thread_t ed_thread = qe_queue_first(&ed_runq->pri_queue, struct thread, runq_links);
4251 thread_t hi_thread = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4252
4253 if (ed_thread->realtime.computation + hi_thread->realtime.computation + rt_deadline_epsilon < hi_thread->realtime.constraint) {
4254 /* choose the earliest deadline thread */
4255 rt_runq = ed_runq;
4256 i = ed_index;
4257 }
4258 }
4259 }
4260
4261 assert(rt_runq->pri_count > 0);
4262 uint64_t earliest_deadline = RT_DEADLINE_NONE;
4263 uint32_t constraint = RT_CONSTRAINT_NONE;
4264 int ed_index = NOPRI;
4265 thread_t new_thread = qe_dequeue_head(&rt_runq->pri_queue, struct thread, runq_links);
4266 SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4267 if (--rt_runq->pri_count > 0) {
4268 thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4269 assert(next_rt != THREAD_NULL);
4270 earliest_deadline = next_rt->realtime.deadline;
4271 constraint = next_rt->realtime.constraint;
4272 ed_index = i;
4273 } else {
4274 bitmap_clear(map, i);
4275 }
4276 rt_runq->pri_earliest_deadline = earliest_deadline;
4277 rt_runq->pri_constraint = constraint;
4278
4279 for (i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4280 rt_runq = &rt_run_queue->rt_queue_pri[i];
4281 if (rt_runq->pri_earliest_deadline < earliest_deadline) {
4282 earliest_deadline = rt_runq->pri_earliest_deadline;
4283 constraint = rt_runq->pri_constraint;
4284 ed_index = i;
4285 }
4286 }
4287 os_atomic_store_wide(&rt_run_queue->earliest_deadline, earliest_deadline, relaxed);
4288 os_atomic_store(&rt_run_queue->constraint, constraint, relaxed);
4289 os_atomic_store(&rt_run_queue->ed_index, ed_index, relaxed);
4290 os_atomic_dec(&rt_run_queue->count, relaxed);
4291
4292 new_thread->runq = PROCESSOR_NULL;
4293
4294 CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, THREAD_NULL);
4295
4296 return new_thread;
4297 }
4298
4299 static thread_t
rt_runq_first(rt_queue_t rt_run_queue)4300 rt_runq_first(rt_queue_t rt_run_queue)
4301 {
4302 bitmap_t *map = rt_run_queue->bitmap;
4303 int i = bitmap_first(map, NRTQS);
4304 if (i < 0) {
4305 return THREAD_NULL;
4306 }
4307 rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4308 thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4309
4310 return next_rt;
4311 }
4312
4313 static void
rt_runq_remove(rt_queue_t rt_run_queue,thread_t thread)4314 rt_runq_remove(rt_queue_t rt_run_queue, thread_t thread)
4315 {
4316 CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, thread);
4317
4318 int pri = thread->sched_pri;
4319 assert((pri >= BASEPRI_RTQUEUES) && (pri <= MAXPRI));
4320 int i = pri - BASEPRI_RTQUEUES;
4321 rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4322 bitmap_t *map = rt_run_queue->bitmap;
4323
4324 assert(rt_runq->pri_count > 0);
4325 uint64_t earliest_deadline = RT_DEADLINE_NONE;
4326 uint32_t constraint = RT_CONSTRAINT_NONE;
4327 int ed_index = NOPRI;
4328 remqueue(&thread->runq_links);
4329 SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4330 if (--rt_runq->pri_count > 0) {
4331 thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4332 earliest_deadline = next_rt->realtime.deadline;
4333 constraint = next_rt->realtime.constraint;
4334 ed_index = i;
4335 } else {
4336 bitmap_clear(map, i);
4337 }
4338 rt_runq->pri_earliest_deadline = earliest_deadline;
4339 rt_runq->pri_constraint = constraint;
4340
4341 for (i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4342 rt_runq = &rt_run_queue->rt_queue_pri[i];
4343 if (rt_runq->pri_earliest_deadline < earliest_deadline) {
4344 earliest_deadline = rt_runq->pri_earliest_deadline;
4345 constraint = rt_runq->pri_constraint;
4346 ed_index = i;
4347 }
4348 }
4349 os_atomic_store_wide(&rt_run_queue->earliest_deadline, earliest_deadline, relaxed);
4350 os_atomic_store(&rt_run_queue->constraint, constraint, relaxed);
4351 os_atomic_store(&rt_run_queue->ed_index, ed_index, relaxed);
4352 os_atomic_dec(&rt_run_queue->count, relaxed);
4353
4354 thread->runq = PROCESSOR_NULL;
4355
4356 CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, THREAD_NULL);
4357 }
4358
4359 rt_queue_t
sched_rtlocal_runq(processor_set_t pset)4360 sched_rtlocal_runq(processor_set_t pset)
4361 {
4362 return &pset->rt_runq;
4363 }
4364
4365 void
sched_rtlocal_init(processor_set_t pset)4366 sched_rtlocal_init(processor_set_t pset)
4367 {
4368 pset_rt_init(pset);
4369 }
4370
4371 void
sched_rtlocal_queue_shutdown(processor_t processor)4372 sched_rtlocal_queue_shutdown(processor_t processor)
4373 {
4374 processor_set_t pset = processor->processor_set;
4375 thread_t thread;
4376 queue_head_t tqueue;
4377
4378 pset_lock(pset);
4379
4380 /* We only need to migrate threads if this is the last active or last recommended processor in the pset */
4381 if (bit_count(pset_available_cpumap(pset)) > 0) {
4382 pset_unlock(pset);
4383 return;
4384 }
4385
4386 queue_init(&tqueue);
4387
4388 while (rt_runq_count(pset) > 0) {
4389 thread = rt_runq_dequeue(&pset->rt_runq);
4390 enqueue_tail(&tqueue, &thread->runq_links);
4391 }
4392 sched_update_pset_load_average(pset, 0);
4393 pset_update_rt_stealable_state(pset);
4394 pset_unlock(pset);
4395
4396 qe_foreach_element_safe(thread, &tqueue, runq_links) {
4397 remqueue(&thread->runq_links);
4398
4399 thread_lock(thread);
4400
4401 thread_setrun(thread, SCHED_TAILQ);
4402
4403 thread_unlock(thread);
4404 }
4405 }
4406
4407 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
4408 void
sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)4409 sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)
4410 {
4411 thread_t thread;
4412
4413 pset_node_t node = &pset_node0;
4414 processor_set_t pset = node->psets;
4415
4416 spl_t s = splsched();
4417 do {
4418 while (pset != NULL) {
4419 pset_lock(pset);
4420
4421 bitmap_t *map = pset->rt_runq.bitmap;
4422 for (int i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4423 rt_queue_pri_t *rt_runq = &pset->rt_runq.rt_queue_pri[i];
4424
4425 qe_foreach_element_safe(thread, &rt_runq->pri_queue, runq_links) {
4426 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
4427 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
4428 }
4429 }
4430 }
4431
4432 pset_unlock(pset);
4433
4434 pset = pset->pset_list;
4435 }
4436 } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
4437 splx(s);
4438 }
4439
4440 int64_t
sched_rtlocal_runq_count_sum(void)4441 sched_rtlocal_runq_count_sum(void)
4442 {
4443 pset_node_t node = &pset_node0;
4444 processor_set_t pset = node->psets;
4445 int64_t count = 0;
4446
4447 do {
4448 while (pset != NULL) {
4449 count += pset->rt_runq.runq_stats.count_sum;
4450
4451 pset = pset->pset_list;
4452 }
4453 } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
4454
4455 return count;
4456 }
4457
4458 /*
4459 * Called with stealing_pset locked and
4460 * returns with stealing_pset locked
4461 * but the lock will have been dropped
4462 * if a thread is returned.
4463 */
4464 thread_t
sched_rtlocal_steal_thread(processor_set_t stealing_pset,uint64_t earliest_deadline)4465 sched_rtlocal_steal_thread(processor_set_t stealing_pset, uint64_t earliest_deadline)
4466 {
4467 if (!sched_allow_rt_steal) {
4468 return THREAD_NULL;
4469 }
4470 pset_map_t pset_map = stealing_pset->node->pset_map;
4471
4472 bit_clear(pset_map, stealing_pset->pset_id);
4473
4474 processor_set_t pset = stealing_pset;
4475
4476 processor_set_t target_pset;
4477 uint64_t target_deadline;
4478
4479 retry:
4480 target_pset = NULL;
4481 target_deadline = earliest_deadline - rt_deadline_epsilon;
4482
4483 for (int pset_id = lsb_first(pset_map); pset_id >= 0; pset_id = lsb_next(pset_map, pset_id)) {
4484 processor_set_t nset = pset_array[pset_id];
4485
4486 /*
4487 * During startup, while pset_array[] and node->pset_map are still being initialized,
4488 * the update to pset_map may become visible to this cpu before the update to pset_array[].
4489 * It would be good to avoid inserting a memory barrier here that is only needed during startup,
4490 * so just check nset is not NULL instead.
4491 */
4492 if (nset && (nset->stealable_rt_threads_earliest_deadline < target_deadline)) {
4493 target_deadline = nset->stealable_rt_threads_earliest_deadline;
4494 target_pset = nset;
4495 }
4496 }
4497
4498 if (target_pset != NULL) {
4499 pset = change_locked_pset(pset, target_pset);
4500 if (pset->stealable_rt_threads_earliest_deadline <= target_deadline) {
4501 thread_t new_thread = rt_runq_dequeue(&pset->rt_runq);
4502 pset_update_rt_stealable_state(pset);
4503 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_STEAL) | DBG_FUNC_NONE, (uintptr_t)thread_tid(new_thread), pset->pset_id, pset->cpu_set_low, 0);
4504
4505 pset = change_locked_pset(pset, stealing_pset);
4506 return new_thread;
4507 }
4508 pset = change_locked_pset(pset, stealing_pset);
4509 earliest_deadline = rt_runq_earliest_deadline(pset);
4510 goto retry;
4511 }
4512
4513 pset = change_locked_pset(pset, stealing_pset);
4514 return THREAD_NULL;
4515 }
4516
4517 /*
4518 * pset is locked
4519 */
4520 thread_t
sched_rt_choose_thread(processor_set_t pset)4521 sched_rt_choose_thread(processor_set_t pset)
4522 {
4523 processor_t processor = current_processor();
4524
4525 if (SCHED(steal_thread_enabled)(pset)) {
4526 do {
4527 bool spill_pending = bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
4528 if (spill_pending) {
4529 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 2);
4530 }
4531 thread_t new_thread = SCHED(rt_steal_thread)(pset, rt_runq_earliest_deadline(pset));
4532 if (new_thread != THREAD_NULL) {
4533 if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
4534 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 3);
4535 }
4536 return new_thread;
4537 }
4538 } while (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id));
4539 }
4540
4541 if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
4542 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 4);
4543 }
4544
4545 if (rt_runq_count(pset) > 0) {
4546 thread_t new_thread = rt_runq_dequeue(SCHED(rt_runq)(pset));
4547 assert(new_thread != THREAD_NULL);
4548 pset_update_rt_stealable_state(pset);
4549 return new_thread;
4550 }
4551
4552 return THREAD_NULL;
4553 }
4554
4555 /*
4556 * realtime_queue_insert:
4557 *
4558 * Enqueue a thread for realtime execution.
4559 */
4560 static bool
realtime_queue_insert(processor_t processor,processor_set_t pset,thread_t thread)4561 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
4562 {
4563 pset_assert_locked(pset);
4564
4565 bool preempt = rt_runq_enqueue(SCHED(rt_runq)(pset), thread, processor);
4566 pset_update_rt_stealable_state(pset);
4567
4568 return preempt;
4569 }
4570
4571 /*
4572 * realtime_setrun:
4573 *
4574 * Dispatch a thread for realtime execution.
4575 *
4576 * Thread must be locked. Associated pset must
4577 * be locked, and is returned unlocked.
4578 */
4579 static void
realtime_setrun(processor_t chosen_processor,thread_t thread)4580 realtime_setrun(
4581 processor_t chosen_processor,
4582 thread_t thread)
4583 {
4584 processor_set_t pset = chosen_processor->processor_set;
4585 pset_assert_locked(pset);
4586 bool pset_is_locked = true;
4587
4588 int n_backup = 0;
4589
4590 if (thread->realtime.constraint <= rt_constraint_threshold) {
4591 n_backup = sched_rt_n_backup_processors;
4592 }
4593 assert((n_backup >= 0) && (n_backup <= SCHED_MAX_BACKUP_PROCESSORS));
4594
4595 int existing_backups = bit_count(pset->pending_AST_URGENT_cpu_mask) - rt_runq_count(pset);
4596 if (existing_backups > 0) {
4597 n_backup = n_backup - existing_backups;
4598 if (n_backup < 0) {
4599 n_backup = 0;
4600 }
4601 }
4602
4603 sched_ipi_type_t ipi_type[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4604 processor_t ipi_processor[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4605
4606 thread->chosen_processor = chosen_processor;
4607
4608 /* <rdar://problem/15102234> */
4609 assert(thread->bound_processor == PROCESSOR_NULL);
4610
4611 realtime_queue_insert(chosen_processor, pset, thread);
4612
4613 processor_t processor = chosen_processor;
4614
4615 int count = 0;
4616 for (int i = 0; i <= n_backup; i++) {
4617 if (i == 0) {
4618 ipi_type[i] = SCHED_IPI_NONE;
4619 ipi_processor[i] = processor;
4620 count++;
4621
4622 ast_t preempt = AST_NONE;
4623 if (thread->sched_pri > processor->current_pri) {
4624 preempt = (AST_PREEMPT | AST_URGENT);
4625 } else if (thread->sched_pri == processor->current_pri) {
4626 if (deadline_add(thread->realtime.deadline, rt_deadline_epsilon) < processor->deadline) {
4627 preempt = (AST_PREEMPT | AST_URGENT);
4628 }
4629 }
4630
4631 if (preempt != AST_NONE) {
4632 if (processor->state == PROCESSOR_IDLE) {
4633 if (processor == current_processor()) {
4634 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
4635 ast_on(preempt);
4636
4637 if ((preempt & AST_URGENT) == AST_URGENT) {
4638 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4639 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4640 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 1);
4641 }
4642 }
4643
4644 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4645 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4646 }
4647 } else {
4648 ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4649 }
4650 } else if (processor->state == PROCESSOR_DISPATCHING) {
4651 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4652 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4653 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 2);
4654 }
4655 } else {
4656 if (processor == current_processor()) {
4657 ast_on(preempt);
4658
4659 if ((preempt & AST_URGENT) == AST_URGENT) {
4660 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4661 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4662 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 3);
4663 }
4664 }
4665
4666 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4667 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4668 }
4669 } else {
4670 ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4671 }
4672 }
4673 } else {
4674 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
4675 }
4676 } else {
4677 if (!pset_is_locked) {
4678 pset_lock(pset);
4679 }
4680 ipi_type[i] = SCHED_IPI_NONE;
4681 ipi_processor[i] = PROCESSOR_NULL;
4682 pset_is_locked = !choose_next_rt_processor_for_IPI(pset, chosen_processor, false, &ipi_processor[i], &ipi_type[i]);
4683 if (ipi_processor[i] == PROCESSOR_NULL) {
4684 break;
4685 }
4686 count++;
4687
4688 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
4689 ipi_processor[i]->cpu_id, ipi_processor[i]->state, backup, 1);
4690 #if defined(__x86_64__)
4691 #define p_is_good(p) (((p)->processor_primary == (p)) && ((sched_avoid_cpu0 != 1) || ((p)->cpu_id != 0)))
4692 if (n_backup == SCHED_DEFAULT_BACKUP_PROCESSORS_SMT) {
4693 processor_t p0 = ipi_processor[0];
4694 processor_t p1 = ipi_processor[1];
4695 assert(p0 && p1);
4696 if (p_is_good(p0) && p_is_good(p1)) {
4697 /*
4698 * Both the chosen processor and the first backup are non-cpu0 primaries,
4699 * so there is no need for a 2nd backup processor.
4700 */
4701 break;
4702 }
4703 }
4704 #endif
4705 }
4706 }
4707
4708 if (pset_is_locked) {
4709 pset_unlock(pset);
4710 }
4711
4712 assert((count > 0) && (count <= (n_backup + 1)));
4713 for (int i = 0; i < count; i++) {
4714 assert(ipi_processor[i] != PROCESSOR_NULL);
4715 sched_ipi_perform(ipi_processor[i], ipi_type[i]);
4716 }
4717 }
4718
4719
4720 sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset,processor_t dst,thread_t thread,__unused sched_ipi_event_t event)4721 sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
4722 thread_t thread, __unused sched_ipi_event_t event)
4723 {
4724 #if defined(CONFIG_SCHED_DEFERRED_AST)
4725 #if CONFIG_THREAD_GROUPS
4726 if (thread) {
4727 struct thread_group *tg = thread_group_get(thread);
4728 if (thread_group_uses_immediate_ipi(tg)) {
4729 return SCHED_IPI_IMMEDIATE;
4730 }
4731 }
4732 #endif /* CONFIG_THREAD_GROUPS */
4733 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
4734 return SCHED_IPI_DEFERRED;
4735 }
4736 #else /* CONFIG_SCHED_DEFERRED_AST */
4737 (void) thread;
4738 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
4739 #endif /* CONFIG_SCHED_DEFERRED_AST */
4740 return SCHED_IPI_NONE;
4741 }
4742
4743 sched_ipi_type_t
sched_ipi_action(processor_t dst,thread_t thread,sched_ipi_event_t event)4744 sched_ipi_action(processor_t dst, thread_t thread, sched_ipi_event_t event)
4745 {
4746 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
4747 assert(dst != NULL);
4748
4749 processor_set_t pset = dst->processor_set;
4750 if (current_processor() == dst) {
4751 return SCHED_IPI_NONE;
4752 }
4753
4754 bool dst_idle = (dst->state == PROCESSOR_IDLE);
4755 if (dst_idle) {
4756 pset_update_processor_state(pset, dst, PROCESSOR_DISPATCHING);
4757 }
4758
4759 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
4760 switch (ipi_type) {
4761 case SCHED_IPI_NONE:
4762 return SCHED_IPI_NONE;
4763 #if defined(CONFIG_SCHED_DEFERRED_AST)
4764 case SCHED_IPI_DEFERRED:
4765 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
4766 break;
4767 #endif /* CONFIG_SCHED_DEFERRED_AST */
4768 default:
4769 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
4770 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4771 dst->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 4);
4772 }
4773 bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
4774 break;
4775 }
4776 return ipi_type;
4777 }
4778
4779 sched_ipi_type_t
sched_ipi_policy(processor_t dst,thread_t thread,boolean_t dst_idle,sched_ipi_event_t event)4780 sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
4781 {
4782 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
4783 boolean_t deferred_ipi_supported = false;
4784 processor_set_t pset = dst->processor_set;
4785
4786 #if defined(CONFIG_SCHED_DEFERRED_AST)
4787 deferred_ipi_supported = true;
4788 #endif /* CONFIG_SCHED_DEFERRED_AST */
4789
4790 switch (event) {
4791 case SCHED_IPI_EVENT_SPILL:
4792 case SCHED_IPI_EVENT_SMT_REBAL:
4793 case SCHED_IPI_EVENT_REBALANCE:
4794 case SCHED_IPI_EVENT_BOUND_THR:
4795 case SCHED_IPI_EVENT_RT_PREEMPT:
4796 /*
4797 * The RT preempt, spill, SMT rebalance, rebalance and the bound thread
4798 * scenarios use immediate IPIs always.
4799 */
4800 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
4801 break;
4802 case SCHED_IPI_EVENT_PREEMPT:
4803 /* In the preemption case, use immediate IPIs for RT threads */
4804 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
4805 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
4806 break;
4807 }
4808
4809 /*
4810 * For Non-RT threads preemption,
4811 * If the core is active, use immediate IPIs.
4812 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
4813 */
4814 if (deferred_ipi_supported && dst_idle) {
4815 return sched_ipi_deferred_policy(pset, dst, thread, event);
4816 }
4817 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
4818 break;
4819 default:
4820 panic("Unrecognized scheduler IPI event type %d", event);
4821 }
4822 assert(ipi_type != SCHED_IPI_NONE);
4823 return ipi_type;
4824 }
4825
4826 void
sched_ipi_perform(processor_t dst,sched_ipi_type_t ipi)4827 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
4828 {
4829 switch (ipi) {
4830 case SCHED_IPI_NONE:
4831 break;
4832 case SCHED_IPI_IDLE:
4833 machine_signal_idle(dst);
4834 break;
4835 case SCHED_IPI_IMMEDIATE:
4836 cause_ast_check(dst);
4837 break;
4838 case SCHED_IPI_DEFERRED:
4839 machine_signal_idle_deferred(dst);
4840 break;
4841 default:
4842 panic("Unrecognized scheduler IPI type: %d", ipi);
4843 }
4844 }
4845
4846 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4847
4848 boolean_t
priority_is_urgent(int priority)4849 priority_is_urgent(int priority)
4850 {
4851 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
4852 }
4853
4854 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
4855
4856 /*
4857 * processor_setrun:
4858 *
4859 * Dispatch a thread for execution on a
4860 * processor.
4861 *
4862 * Thread must be locked. Associated pset must
4863 * be locked, and is returned unlocked.
4864 */
4865 static void
processor_setrun(processor_t processor,thread_t thread,integer_t options)4866 processor_setrun(
4867 processor_t processor,
4868 thread_t thread,
4869 integer_t options)
4870 {
4871 processor_set_t pset = processor->processor_set;
4872 pset_assert_locked(pset);
4873 ast_t preempt;
4874 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
4875
4876 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
4877
4878 thread->chosen_processor = processor;
4879
4880 /*
4881 * Set preemption mode.
4882 */
4883 #if defined(CONFIG_SCHED_DEFERRED_AST)
4884 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
4885 #endif
4886 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
4887 preempt = (AST_PREEMPT | AST_URGENT);
4888 } else if (processor->current_is_eagerpreempt) {
4889 preempt = (AST_PREEMPT | AST_URGENT);
4890 } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
4891 if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
4892 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
4893 } else {
4894 preempt = AST_NONE;
4895 }
4896 } else {
4897 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
4898 }
4899
4900 if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) {
4901 /*
4902 * Having gone to the trouble of forcing this thread off a less preferred core,
4903 * we should force the preferable core to reschedule immediately to give this
4904 * thread a chance to run instead of just sitting on the run queue where
4905 * it may just be stolen back by the idle core we just forced it off.
4906 */
4907 preempt |= AST_PREEMPT;
4908 }
4909
4910 SCHED(processor_enqueue)(processor, thread, options);
4911 sched_update_pset_load_average(pset, 0);
4912
4913 if (preempt != AST_NONE) {
4914 if (processor->state == PROCESSOR_IDLE) {
4915 ipi_action = eExitIdle;
4916 } else if (processor->state == PROCESSOR_DISPATCHING) {
4917 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4918 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4919 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 5);
4920 }
4921 } else if ((processor->state == PROCESSOR_RUNNING ||
4922 processor->state == PROCESSOR_SHUTDOWN) &&
4923 (thread->sched_pri >= processor->current_pri)) {
4924 ipi_action = eInterruptRunning;
4925 }
4926 } else {
4927 /*
4928 * New thread is not important enough to preempt what is running, but
4929 * special processor states may need special handling
4930 */
4931 if (processor->state == PROCESSOR_SHUTDOWN &&
4932 thread->sched_pri >= processor->current_pri) {
4933 ipi_action = eInterruptRunning;
4934 } else if (processor->state == PROCESSOR_IDLE) {
4935 ipi_action = eExitIdle;
4936 } else if (processor->state == PROCESSOR_DISPATCHING) {
4937 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4938 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4939 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 6);
4940 }
4941 }
4942 }
4943
4944 if (ipi_action != eDoNothing) {
4945 if (processor == current_processor()) {
4946 if (ipi_action == eExitIdle) {
4947 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
4948 }
4949 if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
4950 ast_on(preempt);
4951 }
4952
4953 if ((preempt & AST_URGENT) == AST_URGENT) {
4954 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4955 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4956 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 7);
4957 }
4958 } else {
4959 if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4960 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 7);
4961 }
4962 }
4963
4964 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4965 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4966 } else {
4967 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4968 }
4969 } else {
4970 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
4971 ipi_type = sched_ipi_action(processor, thread, event);
4972 }
4973 }
4974 pset_unlock(pset);
4975 sched_ipi_perform(processor, ipi_type);
4976 }
4977
4978 /*
4979 * choose_next_pset:
4980 *
4981 * Return the next sibling pset containing
4982 * available processors.
4983 *
4984 * Returns the original pset if none other is
4985 * suitable.
4986 */
4987 static processor_set_t
choose_next_pset(processor_set_t pset)4988 choose_next_pset(
4989 processor_set_t pset)
4990 {
4991 processor_set_t nset = pset;
4992
4993 do {
4994 nset = next_pset(nset);
4995
4996 /*
4997 * Sometimes during startup the pset_map can contain a bit
4998 * for a pset that isn't fully published in pset_array because
4999 * the pset_map read isn't an acquire load.
5000 *
5001 * In order to avoid needing an acquire barrier here, just bail
5002 * out.
5003 */
5004 if (nset == PROCESSOR_SET_NULL) {
5005 return pset;
5006 }
5007 } while (nset->online_processor_count < 1 && nset != pset);
5008
5009 return nset;
5010 }
5011
5012 /*
5013 * choose_processor:
5014 *
5015 * Choose a processor for the thread, beginning at
5016 * the pset. Accepts an optional processor hint in
5017 * the pset.
5018 *
5019 * Returns a processor, possibly from a different pset.
5020 *
5021 * The thread must be locked. The pset must be locked,
5022 * and the resulting pset is locked on return.
5023 */
5024 processor_t
choose_processor(processor_set_t starting_pset,processor_t processor,thread_t thread)5025 choose_processor(
5026 processor_set_t starting_pset,
5027 processor_t processor,
5028 thread_t thread)
5029 {
5030 processor_set_t pset = starting_pset;
5031 processor_set_t nset;
5032
5033 assert(thread->sched_pri <= MAXPRI);
5034
5035 /*
5036 * Prefer the hinted processor, when appropriate.
5037 */
5038
5039 /* Fold last processor hint from secondary processor to its primary */
5040 if (processor != PROCESSOR_NULL) {
5041 processor = processor->processor_primary;
5042 }
5043
5044 /*
5045 * Only consult platform layer if pset is active, which
5046 * it may not be in some cases when a multi-set system
5047 * is going to sleep.
5048 */
5049 if (pset->online_processor_count) {
5050 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
5051 processor_t mc_processor = machine_choose_processor(pset, processor);
5052 if (mc_processor != PROCESSOR_NULL) {
5053 processor = mc_processor->processor_primary;
5054 }
5055 }
5056 }
5057
5058 /*
5059 * At this point, we may have a processor hint, and we may have
5060 * an initial starting pset. If the hint is not in the pset, or
5061 * if the hint is for a processor in an invalid state, discard
5062 * the hint.
5063 */
5064 if (processor != PROCESSOR_NULL) {
5065 if (processor->processor_set != pset) {
5066 processor = PROCESSOR_NULL;
5067 } else if (!processor->is_recommended) {
5068 processor = PROCESSOR_NULL;
5069 } else {
5070 switch (processor->state) {
5071 case PROCESSOR_START:
5072 case PROCESSOR_SHUTDOWN:
5073 case PROCESSOR_PENDING_OFFLINE:
5074 case PROCESSOR_OFF_LINE:
5075 /*
5076 * Hint is for a processor that cannot support running new threads.
5077 */
5078 processor = PROCESSOR_NULL;
5079 break;
5080 case PROCESSOR_IDLE:
5081 /*
5082 * Hint is for an idle processor. Assume it is no worse than any other
5083 * idle processor. The platform layer had an opportunity to provide
5084 * the "least cost idle" processor above.
5085 */
5086 if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5087 return processor;
5088 }
5089 processor = PROCESSOR_NULL;
5090 break;
5091 case PROCESSOR_RUNNING:
5092 case PROCESSOR_DISPATCHING:
5093 /*
5094 * Hint is for an active CPU. This fast-path allows
5095 * realtime threads to preempt non-realtime threads
5096 * to regain their previous executing processor.
5097 */
5098 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5099 if (processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5100 return processor;
5101 }
5102 processor = PROCESSOR_NULL;
5103 }
5104
5105 /* Otherwise, use hint as part of search below */
5106 break;
5107 default:
5108 processor = PROCESSOR_NULL;
5109 break;
5110 }
5111 }
5112 }
5113
5114 /*
5115 * Iterate through the processor sets to locate
5116 * an appropriate processor. Seed results with
5117 * a last-processor hint, if available, so that
5118 * a search must find something strictly better
5119 * to replace it.
5120 *
5121 * A primary/secondary pair of SMT processors are
5122 * "unpaired" if the primary is busy but its
5123 * corresponding secondary is idle (so the physical
5124 * core has full use of its resources).
5125 */
5126
5127 integer_t lowest_priority = MAXPRI + 1;
5128 integer_t lowest_secondary_priority = MAXPRI + 1;
5129 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
5130 integer_t lowest_idle_secondary_priority = MAXPRI + 1;
5131 integer_t lowest_count = INT_MAX;
5132 processor_t lp_processor = PROCESSOR_NULL;
5133 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
5134 processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
5135 processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
5136 processor_t lc_processor = PROCESSOR_NULL;
5137
5138 if (processor != PROCESSOR_NULL) {
5139 /* All other states should be enumerated above. */
5140 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
5141 assert(thread->sched_pri < BASEPRI_RTQUEUES);
5142
5143 lowest_priority = processor->current_pri;
5144 lp_processor = processor;
5145
5146 lowest_count = SCHED(processor_runq_count)(processor);
5147 lc_processor = processor;
5148 }
5149
5150 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5151 pset_node_t node = pset->node;
5152 bool include_ast_urgent_pending_cpus = false;
5153 cpumap_t ast_urgent_pending;
5154 try_again:
5155 ast_urgent_pending = 0;
5156 int consider_secondaries = (!pset->is_SMT) || (bit_count(node->pset_map) == 1) || (node->pset_non_rt_primary_map == 0) || include_ast_urgent_pending_cpus;
5157 for (; consider_secondaries < 2; consider_secondaries++) {
5158 pset = change_locked_pset(pset, starting_pset);
5159 do {
5160 cpumap_t available_map = pset_available_cpumap(pset);
5161 if (available_map == 0) {
5162 goto no_available_cpus;
5163 }
5164
5165 processor = choose_processor_for_realtime_thread(pset, PROCESSOR_NULL, consider_secondaries, false);
5166 if (processor) {
5167 return processor;
5168 }
5169
5170 if (consider_secondaries) {
5171 processor = choose_furthest_deadline_processor_for_realtime_thread(pset, thread->sched_pri, thread->realtime.deadline, PROCESSOR_NULL, false, include_ast_urgent_pending_cpus);
5172 if (processor) {
5173 /*
5174 * Instead of looping through all the psets to find the global
5175 * furthest deadline processor, preempt the first candidate found.
5176 * The preempted thread will then find any other available far deadline
5177 * processors to preempt.
5178 */
5179 return processor;
5180 }
5181
5182 ast_urgent_pending |= pset->pending_AST_URGENT_cpu_mask;
5183
5184 if (rt_runq_count(pset) < lowest_count) {
5185 int cpuid = bit_first(available_map);
5186 assert(cpuid >= 0);
5187 lc_processor = processor_array[cpuid];
5188 lowest_count = rt_runq_count(pset);
5189 }
5190 }
5191
5192 no_available_cpus:
5193 nset = next_pset(pset);
5194
5195 if (nset != starting_pset) {
5196 pset = change_locked_pset(pset, nset);
5197 }
5198 } while (nset != starting_pset);
5199 }
5200
5201 /* Short cut for single pset nodes */
5202 if (bit_count(node->pset_map) == 1) {
5203 if (lc_processor) {
5204 pset_assert_locked(lc_processor->processor_set);
5205 return lc_processor;
5206 }
5207 } else {
5208 if (ast_urgent_pending && !include_ast_urgent_pending_cpus) {
5209 /* See the comment in choose_furthest_deadline_processor_for_realtime_thread() */
5210 include_ast_urgent_pending_cpus = true;
5211 goto try_again;
5212 }
5213 }
5214
5215 processor = lc_processor;
5216
5217 if (processor) {
5218 pset = change_locked_pset(pset, processor->processor_set);
5219 /* Check that chosen processor is still usable */
5220 cpumap_t available_map = pset_available_cpumap(pset);
5221 if (bit_test(available_map, processor->cpu_id)) {
5222 return processor;
5223 }
5224
5225 /* processor is no longer usable */
5226 processor = PROCESSOR_NULL;
5227 }
5228
5229 pset_assert_locked(pset);
5230 pset_unlock(pset);
5231 return PROCESSOR_NULL;
5232 }
5233
5234 /* No realtime threads from this point on */
5235 assert(thread->sched_pri < BASEPRI_RTQUEUES);
5236
5237 do {
5238 /*
5239 * Choose an idle processor, in pset traversal order
5240 */
5241
5242 uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
5243 pset->primary_map &
5244 pset->recommended_bitmask);
5245
5246 /* there shouldn't be a pending AST if the processor is idle */
5247 assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
5248
5249 int cpuid = lsb_first(idle_primary_map);
5250 if (cpuid >= 0) {
5251 processor = processor_array[cpuid];
5252 return processor;
5253 }
5254
5255 /*
5256 * Otherwise, enumerate active and idle processors to find primary candidates
5257 * with lower priority/etc.
5258 */
5259
5260 uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
5261 pset->recommended_bitmask &
5262 ~pset->pending_AST_URGENT_cpu_mask);
5263
5264 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
5265 active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
5266 }
5267
5268 active_map = bit_ror64(active_map, (pset->last_chosen + 1));
5269 for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
5270 cpuid = ((rotid + pset->last_chosen + 1) & 63);
5271 processor = processor_array[cpuid];
5272
5273 integer_t cpri = processor->current_pri;
5274 processor_t primary = processor->processor_primary;
5275 if (primary != processor) {
5276 /* If primary is running a NO_SMT thread, don't choose its secondary */
5277 if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
5278 if (cpri < lowest_secondary_priority) {
5279 lowest_secondary_priority = cpri;
5280 lp_paired_secondary_processor = processor;
5281 }
5282 }
5283 } else {
5284 if (cpri < lowest_priority) {
5285 lowest_priority = cpri;
5286 lp_processor = processor;
5287 }
5288 }
5289
5290 integer_t ccount = SCHED(processor_runq_count)(processor);
5291 if (ccount < lowest_count) {
5292 lowest_count = ccount;
5293 lc_processor = processor;
5294 }
5295 }
5296
5297 /*
5298 * For SMT configs, these idle secondary processors must have active primary. Otherwise
5299 * the idle primary would have short-circuited the loop above
5300 */
5301 uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
5302 ~pset->primary_map &
5303 pset->recommended_bitmask);
5304
5305 /* there shouldn't be a pending AST if the processor is idle */
5306 assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
5307 assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
5308
5309 for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
5310 processor = processor_array[cpuid];
5311
5312 processor_t cprimary = processor->processor_primary;
5313
5314 integer_t primary_pri = cprimary->current_pri;
5315
5316 /*
5317 * TODO: This should also make the same decisions
5318 * as secondary_can_run_realtime_thread
5319 *
5320 * TODO: Keep track of the pending preemption priority
5321 * of the primary to make this more accurate.
5322 */
5323
5324 /* If the primary is running a no-smt thread, then don't choose its secondary */
5325 if (cprimary->state == PROCESSOR_RUNNING &&
5326 processor_active_thread_no_smt(cprimary)) {
5327 continue;
5328 }
5329
5330 /*
5331 * Find the idle secondary processor with the lowest priority primary
5332 *
5333 * We will choose this processor as a fallback if we find no better
5334 * primary to preempt.
5335 */
5336 if (primary_pri < lowest_idle_secondary_priority) {
5337 lp_idle_secondary_processor = processor;
5338 lowest_idle_secondary_priority = primary_pri;
5339 }
5340
5341 /* Find the the lowest priority active primary with idle secondary */
5342 if (primary_pri < lowest_unpaired_primary_priority) {
5343 /* If the primary processor is offline or starting up, it's not a candidate for this path */
5344 if (cprimary->state != PROCESSOR_RUNNING &&
5345 cprimary->state != PROCESSOR_DISPATCHING) {
5346 continue;
5347 }
5348
5349 if (!cprimary->is_recommended) {
5350 continue;
5351 }
5352
5353 /* if the primary is pending preemption, don't try to re-preempt it */
5354 if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
5355 continue;
5356 }
5357
5358 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
5359 bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
5360 continue;
5361 }
5362
5363 lowest_unpaired_primary_priority = primary_pri;
5364 lp_unpaired_primary_processor = cprimary;
5365 }
5366 }
5367
5368 /*
5369 * We prefer preempting a primary processor over waking up its secondary.
5370 * The secondary will then be woken up by the preempted thread.
5371 */
5372 if (thread->sched_pri > lowest_unpaired_primary_priority) {
5373 pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
5374 return lp_unpaired_primary_processor;
5375 }
5376
5377 /*
5378 * We prefer preempting a lower priority active processor over directly
5379 * waking up an idle secondary.
5380 * The preempted thread will then find the idle secondary.
5381 */
5382 if (thread->sched_pri > lowest_priority) {
5383 pset->last_chosen = lp_processor->cpu_id;
5384 return lp_processor;
5385 }
5386
5387 /*
5388 * lc_processor is used to indicate the best processor set run queue
5389 * on which to enqueue a thread when all available CPUs are busy with
5390 * higher priority threads, so try to make sure it is initialized.
5391 */
5392 if (lc_processor == PROCESSOR_NULL) {
5393 cpumap_t available_map = pset_available_cpumap(pset);
5394 cpuid = lsb_first(available_map);
5395 if (cpuid >= 0) {
5396 lc_processor = processor_array[cpuid];
5397 lowest_count = SCHED(processor_runq_count)(lc_processor);
5398 }
5399 }
5400
5401 /*
5402 * Move onto the next processor set.
5403 *
5404 * If all primary processors in this pset are running a higher
5405 * priority thread, move on to next pset. Only when we have
5406 * exhausted the search for primary processors do we
5407 * fall back to secondaries.
5408 */
5409 #if CONFIG_SCHED_EDGE
5410 /*
5411 * The edge scheduler expects a CPU to be selected from the pset it passed in
5412 * as the starting pset for non-RT workloads. The edge migration algorithm
5413 * should already have considered idle CPUs and loads to decide the starting_pset;
5414 * which means that this loop can be short-circuted.
5415 */
5416 nset = starting_pset;
5417 #else /* CONFIG_SCHED_EDGE */
5418 nset = next_pset(pset);
5419 #endif /* CONFIG_SCHED_EDGE */
5420
5421 if (nset != starting_pset) {
5422 pset = change_locked_pset(pset, nset);
5423 }
5424 } while (nset != starting_pset);
5425
5426 /*
5427 * Make sure that we pick a running processor,
5428 * and that the correct processor set is locked.
5429 * Since we may have unlocked the candidate processor's
5430 * pset, it may have changed state.
5431 *
5432 * All primary processors are running a higher priority
5433 * thread, so the only options left are enqueuing on
5434 * the secondary processor that would perturb the least priority
5435 * primary, or the least busy primary.
5436 */
5437
5438 /* lowest_priority is evaluated in the main loops above */
5439 if (lp_idle_secondary_processor != PROCESSOR_NULL) {
5440 processor = lp_idle_secondary_processor;
5441 } else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
5442 processor = lp_paired_secondary_processor;
5443 } else if (lc_processor != PROCESSOR_NULL) {
5444 processor = lc_processor;
5445 } else {
5446 processor = PROCESSOR_NULL;
5447 }
5448
5449 if (processor) {
5450 pset = change_locked_pset(pset, processor->processor_set);
5451 /* Check that chosen processor is still usable */
5452 cpumap_t available_map = pset_available_cpumap(pset);
5453 if (bit_test(available_map, processor->cpu_id)) {
5454 pset->last_chosen = processor->cpu_id;
5455 return processor;
5456 }
5457
5458 /* processor is no longer usable */
5459 processor = PROCESSOR_NULL;
5460 }
5461
5462 pset_assert_locked(pset);
5463 pset_unlock(pset);
5464 return PROCESSOR_NULL;
5465 }
5466
5467 /*
5468 * Default implementation of SCHED(choose_node)()
5469 * for single node systems
5470 */
5471 pset_node_t
sched_choose_node(__unused thread_t thread)5472 sched_choose_node(__unused thread_t thread)
5473 {
5474 return &pset_node0;
5475 }
5476
5477 /*
5478 * choose_starting_pset:
5479 *
5480 * Choose a starting processor set for the thread.
5481 * May return a processor hint within the pset.
5482 *
5483 * Returns a starting processor set, to be used by
5484 * choose_processor.
5485 *
5486 * The thread must be locked. The resulting pset is unlocked on return,
5487 * and is chosen without taking any pset locks.
5488 */
5489 processor_set_t
choose_starting_pset(pset_node_t node,thread_t thread,processor_t * processor_hint)5490 choose_starting_pset(pset_node_t node, thread_t thread, processor_t *processor_hint)
5491 {
5492 processor_set_t pset;
5493 processor_t processor = PROCESSOR_NULL;
5494
5495 if (thread->affinity_set != AFFINITY_SET_NULL) {
5496 /*
5497 * Use affinity set policy hint.
5498 */
5499 pset = thread->affinity_set->aset_pset;
5500 } else if (thread->last_processor != PROCESSOR_NULL) {
5501 /*
5502 * Simple (last processor) affinity case.
5503 */
5504 processor = thread->last_processor;
5505 pset = processor->processor_set;
5506 } else {
5507 /*
5508 * No Affinity case:
5509 *
5510 * Utilitize a per task hint to spread threads
5511 * among the available processor sets.
5512 * NRG this seems like the wrong thing to do.
5513 * See also task->pset_hint = pset in thread_setrun()
5514 */
5515 pset = get_threadtask(thread)->pset_hint;
5516 if (pset == PROCESSOR_SET_NULL) {
5517 pset = current_processor()->processor_set;
5518 }
5519
5520 pset = choose_next_pset(pset);
5521 }
5522
5523 if (!bit_test(node->pset_map, pset->pset_id)) {
5524 /* pset is not from this node so choose one that is */
5525 int id = lsb_first(node->pset_map);
5526 if (id < 0) {
5527 /* startup race, so check again under the node lock */
5528 lck_spin_lock(&pset_node_lock);
5529 if (bit_test(node->pset_map, pset->pset_id)) {
5530 id = pset->pset_id;
5531 } else {
5532 id = lsb_first(node->pset_map);
5533 }
5534 lck_spin_unlock(&pset_node_lock);
5535 }
5536 assert(id >= 0);
5537 pset = pset_array[id];
5538 }
5539
5540 if (bit_count(node->pset_map) == 1) {
5541 /* Only a single pset in this node */
5542 goto out;
5543 }
5544
5545 bool avoid_cpu0 = false;
5546
5547 #if defined(__x86_64__)
5548 if ((thread->sched_pri >= BASEPRI_RTQUEUES) && sched_avoid_cpu0) {
5549 /* Avoid the pset containing cpu0 */
5550 avoid_cpu0 = true;
5551 /* Assert that cpu0 is in pset0. I expect this to be true on __x86_64__ */
5552 assert(bit_test(pset_array[0]->cpu_bitmask, 0));
5553 }
5554 #endif
5555
5556 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5557 pset_map_t rt_target_map = atomic_load(&node->pset_non_rt_primary_map);
5558 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
5559 if (avoid_cpu0) {
5560 rt_target_map = bit_ror64(rt_target_map, 1);
5561 }
5562 int rotid = lsb_first(rt_target_map);
5563 if (rotid >= 0) {
5564 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
5565 pset = pset_array[id];
5566 goto out;
5567 }
5568 }
5569 if (!pset->is_SMT || !sched_allow_rt_smt) {
5570 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
5571 goto out;
5572 }
5573 rt_target_map = atomic_load(&node->pset_non_rt_map);
5574 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
5575 if (avoid_cpu0) {
5576 rt_target_map = bit_ror64(rt_target_map, 1);
5577 }
5578 int rotid = lsb_first(rt_target_map);
5579 if (rotid >= 0) {
5580 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
5581 pset = pset_array[id];
5582 goto out;
5583 }
5584 }
5585 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
5586 } else {
5587 pset_map_t idle_map = atomic_load(&node->pset_idle_map);
5588 if (!bit_test(idle_map, pset->pset_id)) {
5589 int next_idle_pset_id = lsb_first(idle_map);
5590 if (next_idle_pset_id >= 0) {
5591 pset = pset_array[next_idle_pset_id];
5592 }
5593 }
5594 }
5595
5596 out:
5597 if ((processor != PROCESSOR_NULL) && (processor->processor_set != pset)) {
5598 processor = PROCESSOR_NULL;
5599 }
5600 if (processor != PROCESSOR_NULL) {
5601 *processor_hint = processor;
5602 }
5603
5604 assert(pset != NULL);
5605 return pset;
5606 }
5607
5608 /*
5609 * thread_setrun:
5610 *
5611 * Dispatch thread for execution, onto an idle
5612 * processor or run queue, and signal a preemption
5613 * as appropriate.
5614 *
5615 * Thread must be locked.
5616 */
5617 void
thread_setrun(thread_t thread,sched_options_t options)5618 thread_setrun(
5619 thread_t thread,
5620 sched_options_t options)
5621 {
5622 processor_t processor = PROCESSOR_NULL;
5623 processor_set_t pset;
5624
5625 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
5626 assert(thread->runq == PROCESSOR_NULL);
5627
5628 #if CONFIG_PREADOPT_TG
5629 /* We know that the thread is not in the runq by virtue of being in this
5630 * function and the thread is not self since we are running. We can safely
5631 * resolve the thread group hierarchy and modify the thread's thread group
5632 * here. */
5633 thread_resolve_and_enforce_thread_group_hierarchy_if_needed(thread);
5634 #endif
5635
5636 /*
5637 * Update priority if needed.
5638 */
5639 if (SCHED(can_update_priority)(thread)) {
5640 SCHED(update_priority)(thread);
5641 }
5642 thread->sfi_class = sfi_thread_classify(thread);
5643
5644 if (thread->bound_processor == PROCESSOR_NULL) {
5645 /*
5646 * Unbound case.
5647 *
5648 * Usually, this loop will only be executed once,
5649 * but if CLPC derecommends a processor after it has been chosen,
5650 * or if a processor is shut down after it is chosen,
5651 * choose_processor() may return NULL, so a retry
5652 * may be necessary. A single retry will usually
5653 * be enough, and we can't afford to retry too many times
5654 * because interrupts are disabled.
5655 */
5656 #define CHOOSE_PROCESSOR_MAX_RETRIES 3
5657 for (int retry = 0; retry <= CHOOSE_PROCESSOR_MAX_RETRIES; retry++) {
5658 processor_t processor_hint = PROCESSOR_NULL;
5659 pset_node_t node = SCHED(choose_node)(thread);
5660 processor_set_t starting_pset = choose_starting_pset(node, thread, &processor_hint);
5661
5662 pset_lock(starting_pset);
5663
5664 processor = SCHED(choose_processor)(starting_pset, processor_hint, thread);
5665 if (processor != PROCESSOR_NULL) {
5666 pset = processor->processor_set;
5667 pset_assert_locked(pset);
5668 break;
5669 }
5670 }
5671 /*
5672 * If choose_processor() still returns NULL,
5673 * which is very unlikely,
5674 * choose the master_processor, which is always
5675 * safe to choose.
5676 */
5677 if (processor == PROCESSOR_NULL) {
5678 /* Choose fallback processor */
5679 processor = master_processor;
5680 pset = processor->processor_set;
5681 pset_lock(pset);
5682 assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
5683 }
5684 task_t task = get_threadtask(thread);
5685 if (!(task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE)) {
5686 task->pset_hint = pset; /* NRG this is done without holding the task lock */
5687 }
5688 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
5689 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
5690 assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
5691 } else {
5692 /*
5693 * Bound case:
5694 *
5695 * Unconditionally dispatch on the processor.
5696 */
5697 processor = thread->bound_processor;
5698 pset = processor->processor_set;
5699 pset_lock(pset);
5700
5701 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
5702 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
5703 }
5704
5705 /*
5706 * Dispatch the thread on the chosen processor.
5707 * TODO: This should be based on sched_mode, not sched_pri
5708 */
5709 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5710 realtime_setrun(processor, thread);
5711 } else {
5712 processor_setrun(processor, thread, options);
5713 }
5714 /* pset is now unlocked */
5715 if (thread->bound_processor == PROCESSOR_NULL) {
5716 SCHED(check_spill)(pset, thread);
5717 }
5718 }
5719
5720 processor_set_t
task_choose_pset(task_t task)5721 task_choose_pset(
5722 task_t task)
5723 {
5724 processor_set_t pset = task->pset_hint;
5725
5726 if (pset != PROCESSOR_SET_NULL) {
5727 pset = choose_next_pset(pset);
5728 }
5729
5730 return pset;
5731 }
5732
5733 /*
5734 * Check for a preemption point in
5735 * the current context.
5736 *
5737 * Called at splsched with thread locked.
5738 */
5739 ast_t
csw_check(thread_t thread,processor_t processor,ast_t check_reason)5740 csw_check(
5741 thread_t thread,
5742 processor_t processor,
5743 ast_t check_reason)
5744 {
5745 processor_set_t pset = processor->processor_set;
5746
5747 assert(thread == processor->active_thread);
5748
5749 pset_lock(pset);
5750
5751 processor_state_update_from_thread(processor, thread, true);
5752
5753 ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
5754
5755 /* Acknowledge the IPI if we decided not to preempt */
5756
5757 if ((preempt & AST_URGENT) == 0) {
5758 if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5759 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 8);
5760 }
5761 }
5762
5763 if ((preempt & AST_PREEMPT) == 0) {
5764 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
5765 }
5766
5767 pset_unlock(pset);
5768
5769 return preempt;
5770 }
5771
5772 /*
5773 * Check for preemption at splsched with
5774 * pset and thread locked
5775 */
5776 ast_t
csw_check_locked(thread_t thread,processor_t processor,processor_set_t pset,ast_t check_reason)5777 csw_check_locked(
5778 thread_t thread,
5779 processor_t processor,
5780 processor_set_t pset,
5781 ast_t check_reason)
5782 {
5783 /*
5784 * If the current thread is running on a processor that is no longer recommended,
5785 * urgently preempt it, at which point thread_select() should
5786 * try to idle the processor and re-dispatch the thread to a recommended processor.
5787 */
5788 if (!processor->is_recommended) {
5789 return check_reason | AST_PREEMPT | AST_URGENT;
5790 }
5791
5792 if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
5793 return check_reason | AST_PREEMPT | AST_URGENT;
5794 }
5795
5796 if (rt_runq_count(pset) > 0) {
5797 if ((rt_runq_priority(pset) > processor->current_pri) || !processor->first_timeslice) {
5798 return check_reason | AST_PREEMPT | AST_URGENT;
5799 } else if (deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < processor->deadline) {
5800 return check_reason | AST_PREEMPT | AST_URGENT;
5801 } else {
5802 return check_reason | AST_PREEMPT;
5803 }
5804 }
5805
5806 ast_t result = SCHED(processor_csw_check)(processor);
5807 if (result != AST_NONE) {
5808 return check_reason | result | (thread_is_eager_preempt(thread) ? AST_URGENT : AST_NONE);
5809 }
5810
5811 /*
5812 * Same for avoid-processor
5813 *
5814 * TODO: Should these set AST_REBALANCE?
5815 */
5816 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
5817 return check_reason | AST_PREEMPT;
5818 }
5819
5820 /*
5821 * Even though we could continue executing on this processor, a
5822 * secondary SMT core should try to shed load to another primary core.
5823 *
5824 * TODO: Should this do the same check that thread_select does? i.e.
5825 * if no bound threads target this processor, and idle primaries exist, preempt
5826 * The case of RT threads existing is already taken care of above
5827 */
5828
5829 if (processor->current_pri < BASEPRI_RTQUEUES &&
5830 processor->processor_primary != processor) {
5831 return check_reason | AST_PREEMPT;
5832 }
5833
5834 if (thread->state & TH_SUSP) {
5835 return check_reason | AST_PREEMPT;
5836 }
5837
5838 #if CONFIG_SCHED_SFI
5839 /*
5840 * Current thread may not need to be preempted, but maybe needs
5841 * an SFI wait?
5842 */
5843 result = sfi_thread_needs_ast(thread, NULL);
5844 if (result != AST_NONE) {
5845 return check_reason | result;
5846 }
5847 #endif
5848
5849 return AST_NONE;
5850 }
5851
5852 /*
5853 * Handle preemption IPI or IPI in response to setting an AST flag
5854 * Triggered by cause_ast_check
5855 * Called at splsched
5856 */
5857 void
ast_check(processor_t processor)5858 ast_check(processor_t processor)
5859 {
5860 if (processor->state != PROCESSOR_RUNNING &&
5861 processor->state != PROCESSOR_SHUTDOWN) {
5862 return;
5863 }
5864
5865 thread_t thread = processor->active_thread;
5866
5867 assert(thread == current_thread());
5868
5869 /*
5870 * Pairs with task_restartable_ranges_synchronize
5871 */
5872 thread_lock(thread);
5873
5874 thread_reset_pcs_ack_IPI(thread);
5875
5876 /*
5877 * Propagate thread ast to processor.
5878 * (handles IPI in response to setting AST flag)
5879 */
5880 ast_propagate(thread);
5881
5882 /*
5883 * Stash the old urgency and perfctl values to find out if
5884 * csw_check updates them.
5885 */
5886 thread_urgency_t old_urgency = processor->current_urgency;
5887 perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
5888
5889 ast_t preempt;
5890
5891 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
5892 ast_on(preempt);
5893 }
5894
5895 if (old_urgency != processor->current_urgency) {
5896 /*
5897 * Urgency updates happen with the thread lock held (ugh).
5898 * TODO: This doesn't notice QoS changes...
5899 */
5900 uint64_t urgency_param1, urgency_param2;
5901
5902 thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
5903 thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
5904 }
5905
5906 thread_unlock(thread);
5907
5908 if (old_perfctl_class != processor->current_perfctl_class) {
5909 /*
5910 * We updated the perfctl class of this thread from another core.
5911 * Let CLPC know that the currently running thread has a new
5912 * class.
5913 */
5914
5915 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
5916 mach_approximate_time(), 0, thread);
5917 }
5918 }
5919
5920
5921 /*
5922 * set_sched_pri:
5923 *
5924 * Set the scheduled priority of the specified thread.
5925 *
5926 * This may cause the thread to change queues.
5927 *
5928 * Thread must be locked.
5929 */
5930 void
set_sched_pri(thread_t thread,int16_t new_priority,set_sched_pri_options_t options)5931 set_sched_pri(
5932 thread_t thread,
5933 int16_t new_priority,
5934 set_sched_pri_options_t options)
5935 {
5936 bool is_current_thread = (thread == current_thread());
5937 bool removed_from_runq = false;
5938 bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
5939
5940 int16_t old_priority = thread->sched_pri;
5941
5942 /* If we're already at this priority, no need to mess with the runqueue */
5943 if (new_priority == old_priority) {
5944 #if CONFIG_SCHED_CLUTCH
5945 /* For the first thread in the system, the priority is correct but
5946 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
5947 * scheduler relies on the bucket being set for all threads, update
5948 * its bucket here.
5949 */
5950 if (thread->th_sched_bucket == TH_BUCKET_RUN) {
5951 assert(thread == vm_pageout_scan_thread);
5952 SCHED(update_thread_bucket)(thread);
5953 }
5954 #endif /* CONFIG_SCHED_CLUTCH */
5955
5956 return;
5957 }
5958
5959 if (is_current_thread) {
5960 assert(thread->state & TH_RUN);
5961 assert(thread->runq == PROCESSOR_NULL);
5962 } else {
5963 removed_from_runq = thread_run_queue_remove(thread);
5964 }
5965
5966 thread->sched_pri = new_priority;
5967
5968 #if CONFIG_SCHED_CLUTCH
5969 /*
5970 * Since for the clutch scheduler, the thread's bucket determines its runq
5971 * in the hierarchy it is important to update the bucket when the thread
5972 * lock is held and the thread has been removed from the runq hierarchy.
5973 */
5974 SCHED(update_thread_bucket)(thread);
5975
5976 #endif /* CONFIG_SCHED_CLUTCH */
5977
5978 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
5979 (uintptr_t)thread_tid(thread),
5980 thread->base_pri,
5981 thread->sched_pri,
5982 thread->sched_usage,
5983 0);
5984
5985 if (removed_from_runq) {
5986 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
5987 } else if (is_current_thread) {
5988 processor_t processor = thread->last_processor;
5989 assert(processor == current_processor());
5990
5991 thread_urgency_t old_urgency = processor->current_urgency;
5992
5993 /*
5994 * When dropping in priority, check if the thread no longer belongs on core.
5995 * If a thread raises its own priority, don't aggressively rebalance it.
5996 * <rdar://problem/31699165>
5997 *
5998 * csw_check does a processor_state_update_from_thread, but
5999 * we should do our own if we're being lazy.
6000 */
6001 if (!lazy_update && new_priority < old_priority) {
6002 ast_t preempt;
6003
6004 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
6005 ast_on(preempt);
6006 }
6007 } else {
6008 processor_state_update_from_thread(processor, thread, false);
6009 }
6010
6011 /*
6012 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
6013 * class alterations from user space to occur relatively infrequently, hence
6014 * those are lazily handled. QoS classes have distinct priority bands, and QoS
6015 * inheritance is expected to involve priority changes.
6016 */
6017 if (processor->current_urgency != old_urgency) {
6018 uint64_t urgency_param1, urgency_param2;
6019
6020 thread_urgency_t new_urgency = thread_get_urgency(thread,
6021 &urgency_param1, &urgency_param2);
6022
6023 thread_tell_urgency(new_urgency, urgency_param1,
6024 urgency_param2, 0, thread);
6025 }
6026
6027 /* TODO: only call this if current_perfctl_class changed */
6028 uint64_t ctime = mach_approximate_time();
6029 machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
6030 } else if (thread->state & TH_RUN) {
6031 processor_t processor = thread->last_processor;
6032
6033 if (!lazy_update &&
6034 processor != PROCESSOR_NULL &&
6035 processor != current_processor() &&
6036 processor->active_thread == thread) {
6037 cause_ast_check(processor);
6038 }
6039 }
6040 }
6041
6042 /*
6043 * thread_run_queue_remove_for_handoff
6044 *
6045 * Pull a thread or its (recursive) push target out of the runqueue
6046 * so that it is ready for thread_run()
6047 *
6048 * Called at splsched
6049 *
6050 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6051 * This may be different than the thread that was passed in.
6052 */
6053 thread_t
thread_run_queue_remove_for_handoff(thread_t thread)6054 thread_run_queue_remove_for_handoff(thread_t thread)
6055 {
6056 thread_t pulled_thread = THREAD_NULL;
6057
6058 thread_lock(thread);
6059
6060 /*
6061 * Check that the thread is not bound to a different processor,
6062 * NO_SMT flag is not set on the thread, cluster type of
6063 * processor matches with thread if the thread is pinned to a
6064 * particular cluster and that realtime is not involved.
6065 *
6066 * Next, pull it off its run queue. If it doesn't come, it's not eligible.
6067 */
6068 processor_t processor = current_processor();
6069 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6070 && (!thread_no_smt(thread))
6071 && (processor->current_pri < BASEPRI_RTQUEUES)
6072 && (thread->sched_pri < BASEPRI_RTQUEUES)
6073 #if __AMP__
6074 && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6075 processor->processor_set->pset_id == thread->th_bound_cluster_id)
6076 #endif /* __AMP__ */
6077 ) {
6078 if (thread_run_queue_remove(thread)) {
6079 pulled_thread = thread;
6080 }
6081 }
6082
6083 thread_unlock(thread);
6084
6085 return pulled_thread;
6086 }
6087
6088 /*
6089 * thread_prepare_for_handoff
6090 *
6091 * Make the thread ready for handoff.
6092 * If the thread was runnable then pull it off the runq, if the thread could
6093 * not be pulled, return NULL.
6094 *
6095 * If the thread was woken up from wait for handoff, make sure it is not bound to
6096 * different processor.
6097 *
6098 * Called at splsched
6099 *
6100 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6101 * This may be different than the thread that was passed in.
6102 */
6103 thread_t
thread_prepare_for_handoff(thread_t thread,thread_handoff_option_t option)6104 thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option)
6105 {
6106 thread_t pulled_thread = THREAD_NULL;
6107
6108 if (option & THREAD_HANDOFF_SETRUN_NEEDED) {
6109 processor_t processor = current_processor();
6110 thread_lock(thread);
6111
6112 /*
6113 * Check that the thread is not bound to a different processor,
6114 * NO_SMT flag is not set on the thread and cluster type of
6115 * processor matches with thread if the thread is pinned to a
6116 * particular cluster. Call setrun instead if above conditions
6117 * are not satisfied.
6118 */
6119 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6120 && (!thread_no_smt(thread))
6121 #if __AMP__
6122 && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6123 processor->processor_set->pset_id == thread->th_bound_cluster_id)
6124 #endif /* __AMP__ */
6125 ) {
6126 pulled_thread = thread;
6127 } else {
6128 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
6129 }
6130 thread_unlock(thread);
6131 } else {
6132 pulled_thread = thread_run_queue_remove_for_handoff(thread);
6133 }
6134
6135 return pulled_thread;
6136 }
6137
6138 /*
6139 * thread_run_queue_remove:
6140 *
6141 * Remove a thread from its current run queue and
6142 * return TRUE if successful.
6143 *
6144 * Thread must be locked.
6145 *
6146 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
6147 * run queues because the caller locked the thread. Otherwise
6148 * the thread is on a run queue, but could be chosen for dispatch
6149 * and removed by another processor under a different lock, which
6150 * will set thread->runq to PROCESSOR_NULL.
6151 *
6152 * Hence the thread select path must not rely on anything that could
6153 * be changed under the thread lock after calling this function,
6154 * most importantly thread->sched_pri.
6155 */
6156 boolean_t
thread_run_queue_remove(thread_t thread)6157 thread_run_queue_remove(
6158 thread_t thread)
6159 {
6160 boolean_t removed = FALSE;
6161 processor_t processor = thread->runq;
6162
6163 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
6164 /* Thread isn't runnable */
6165 assert(thread->runq == PROCESSOR_NULL);
6166 return FALSE;
6167 }
6168
6169 if (processor == PROCESSOR_NULL) {
6170 /*
6171 * The thread is either not on the runq,
6172 * or is in the midst of being removed from the runq.
6173 *
6174 * runq is set to NULL under the pset lock, not the thread
6175 * lock, so the thread may still be in the process of being dequeued
6176 * from the runq. It will wait in invoke for the thread lock to be
6177 * dropped.
6178 */
6179
6180 return FALSE;
6181 }
6182
6183 if (thread->sched_pri < BASEPRI_RTQUEUES) {
6184 return SCHED(processor_queue_remove)(processor, thread);
6185 }
6186
6187 processor_set_t pset = processor->processor_set;
6188
6189 pset_lock(pset);
6190
6191 if (thread->runq != PROCESSOR_NULL) {
6192 /*
6193 * Thread is on the RT run queue and we have a lock on
6194 * that run queue.
6195 */
6196 rt_runq_remove(SCHED(rt_runq)(pset), thread);
6197 pset_update_rt_stealable_state(pset);
6198
6199 removed = TRUE;
6200 }
6201
6202 pset_unlock(pset);
6203
6204 return removed;
6205 }
6206
6207 /*
6208 * Put the thread back where it goes after a thread_run_queue_remove
6209 *
6210 * Thread must have been removed under the same thread lock hold
6211 *
6212 * thread locked, at splsched
6213 */
6214 void
thread_run_queue_reinsert(thread_t thread,sched_options_t options)6215 thread_run_queue_reinsert(thread_t thread, sched_options_t options)
6216 {
6217 assert(thread->runq == PROCESSOR_NULL);
6218 assert(thread->state & (TH_RUN));
6219
6220 thread_setrun(thread, options);
6221 }
6222
6223 void
sys_override_cpu_throttle(boolean_t enable_override)6224 sys_override_cpu_throttle(boolean_t enable_override)
6225 {
6226 if (enable_override) {
6227 cpu_throttle_enabled = 0;
6228 } else {
6229 cpu_throttle_enabled = 1;
6230 }
6231 }
6232
6233 thread_urgency_t
thread_get_urgency(thread_t thread,uint64_t * arg1,uint64_t * arg2)6234 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
6235 {
6236 uint64_t urgency_param1 = 0, urgency_param2 = 0;
6237 task_t task = get_threadtask_early(thread);
6238
6239 thread_urgency_t urgency;
6240
6241 if (thread == NULL || task == TASK_NULL || (thread->state & TH_IDLE)) {
6242 urgency_param1 = 0;
6243 urgency_param2 = 0;
6244
6245 urgency = THREAD_URGENCY_NONE;
6246 } else if (thread->sched_mode == TH_MODE_REALTIME) {
6247 urgency_param1 = thread->realtime.period;
6248 urgency_param2 = thread->realtime.deadline;
6249
6250 urgency = THREAD_URGENCY_REAL_TIME;
6251 } else if (cpu_throttle_enabled &&
6252 (thread->sched_pri <= MAXPRI_THROTTLE) &&
6253 (thread->base_pri <= MAXPRI_THROTTLE)) {
6254 /*
6255 * Threads that are running at low priority but are not
6256 * tagged with a specific QoS are separated out from
6257 * the "background" urgency. Performance management
6258 * subsystem can decide to either treat these threads
6259 * as normal threads or look at other signals like thermal
6260 * levels for optimal power/perf tradeoffs for a platform.
6261 */
6262 boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
6263 boolean_t task_is_suppressed = (proc_get_effective_task_policy(task, TASK_POLICY_SUP_ACTIVE) == 0x1);
6264
6265 /*
6266 * Background urgency applied when thread priority is
6267 * MAXPRI_THROTTLE or lower and thread is not promoted
6268 * and thread has a QoS specified
6269 */
6270 urgency_param1 = thread->sched_pri;
6271 urgency_param2 = thread->base_pri;
6272
6273 if (thread_lacks_qos && !task_is_suppressed) {
6274 urgency = THREAD_URGENCY_LOWPRI;
6275 } else {
6276 urgency = THREAD_URGENCY_BACKGROUND;
6277 }
6278 } else {
6279 /* For otherwise unclassified threads, report throughput QoS parameters */
6280 urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
6281 urgency_param2 = proc_get_effective_task_policy(task, TASK_POLICY_THROUGH_QOS);
6282 urgency = THREAD_URGENCY_NORMAL;
6283 }
6284
6285 if (arg1 != NULL) {
6286 *arg1 = urgency_param1;
6287 }
6288 if (arg2 != NULL) {
6289 *arg2 = urgency_param2;
6290 }
6291
6292 return urgency;
6293 }
6294
6295 perfcontrol_class_t
thread_get_perfcontrol_class(thread_t thread)6296 thread_get_perfcontrol_class(thread_t thread)
6297 {
6298 /* Special case handling */
6299 if (thread->state & TH_IDLE) {
6300 return PERFCONTROL_CLASS_IDLE;
6301 }
6302
6303 if (thread->sched_mode == TH_MODE_REALTIME) {
6304 return PERFCONTROL_CLASS_REALTIME;
6305 }
6306
6307 /* perfcontrol_class based on base_pri */
6308 if (thread->base_pri <= MAXPRI_THROTTLE) {
6309 return PERFCONTROL_CLASS_BACKGROUND;
6310 } else if (thread->base_pri <= BASEPRI_UTILITY) {
6311 return PERFCONTROL_CLASS_UTILITY;
6312 } else if (thread->base_pri <= BASEPRI_DEFAULT) {
6313 return PERFCONTROL_CLASS_NONUI;
6314 } else if (thread->base_pri <= BASEPRI_FOREGROUND) {
6315 return PERFCONTROL_CLASS_UI;
6316 } else {
6317 if (get_threadtask(thread) == kernel_task) {
6318 /*
6319 * Classify Above UI kernel threads as PERFCONTROL_CLASS_KERNEL.
6320 * All other lower priority kernel threads should be treated
6321 * as regular threads for performance control purposes.
6322 */
6323 return PERFCONTROL_CLASS_KERNEL;
6324 }
6325 return PERFCONTROL_CLASS_ABOVEUI;
6326 }
6327 }
6328
6329 /*
6330 * This is the processor idle loop, which just looks for other threads
6331 * to execute. Processor idle threads invoke this without supplying a
6332 * current thread to idle without an asserted wait state.
6333 *
6334 * Returns a the next thread to execute if dispatched directly.
6335 */
6336
6337 #if 0
6338 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
6339 #else
6340 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
6341 #endif
6342
6343 #if (DEVELOPMENT || DEBUG)
6344 int sched_idle_delay_cpuid = -1;
6345 #endif
6346
6347 thread_t
processor_idle(thread_t thread,processor_t processor)6348 processor_idle(
6349 thread_t thread,
6350 processor_t processor)
6351 {
6352 processor_set_t pset = processor->processor_set;
6353 struct recount_snap snap = { 0 };
6354
6355 (void)splsched();
6356
6357 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6358 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
6359 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
6360
6361 SCHED_STATS_INC(idle_transitions);
6362 assert(processor->running_timers_active == false);
6363
6364 recount_snapshot(&snap);
6365 recount_processor_idle(&processor->pr_recount, &snap);
6366 cpu_quiescent_counter_leave(snap.rsn_time_mach);
6367
6368 while (1) {
6369 /*
6370 * Ensure that updates to my processor and pset state,
6371 * made by the IPI source processor before sending the IPI,
6372 * are visible on this processor now (even though we don't
6373 * take the pset lock yet).
6374 */
6375 atomic_thread_fence(memory_order_acquire);
6376
6377 if (processor->state != PROCESSOR_IDLE) {
6378 break;
6379 }
6380 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
6381 break;
6382 }
6383 #if defined(CONFIG_SCHED_DEFERRED_AST)
6384 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
6385 break;
6386 }
6387 #endif
6388 if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
6389 break;
6390 }
6391
6392 if (processor->is_recommended && (processor->processor_primary == processor)) {
6393 if (rt_runq_count(pset)) {
6394 break;
6395 }
6396 } else {
6397 if (SCHED(processor_bound_count)(processor)) {
6398 break;
6399 }
6400 }
6401
6402 IDLE_KERNEL_DEBUG_CONSTANT(
6403 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
6404
6405 machine_track_platform_idle(TRUE);
6406
6407 machine_idle();
6408 /* returns with interrupts enabled */
6409
6410 machine_track_platform_idle(FALSE);
6411
6412 #if (DEVELOPMENT || DEBUG)
6413 if (processor->cpu_id == sched_idle_delay_cpuid) {
6414 delay(500);
6415 }
6416 #endif
6417
6418 (void)splsched();
6419
6420 atomic_thread_fence(memory_order_acquire);
6421
6422 IDLE_KERNEL_DEBUG_CONSTANT(
6423 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
6424
6425 /*
6426 * Check if we should call sched_timeshare_consider_maintenance() here.
6427 * The CPU was woken out of idle due to an interrupt and we should do the
6428 * call only if the processor is still idle. If the processor is non-idle,
6429 * the threads running on the processor would do the call as part of
6430 * context swithing.
6431 */
6432 if (processor->state == PROCESSOR_IDLE) {
6433 sched_timeshare_consider_maintenance(mach_absolute_time());
6434 }
6435
6436 if (!SCHED(processor_queue_empty)(processor)) {
6437 /* Secondary SMT processors respond to directed wakeups
6438 * exclusively. Some platforms induce 'spurious' SMT wakeups.
6439 */
6440 if (processor->processor_primary == processor) {
6441 break;
6442 }
6443 }
6444 }
6445
6446 recount_snapshot(&snap);
6447 recount_processor_run(&processor->pr_recount, &snap);
6448 cpu_quiescent_counter_join(snap.rsn_time_mach);
6449
6450 ast_t reason = AST_NONE;
6451
6452 /* We're handling all scheduling AST's */
6453 ast_off(AST_SCHEDULING);
6454
6455 /*
6456 * thread_select will move the processor from dispatching to running,
6457 * or put it in idle if there's nothing to do.
6458 */
6459 thread_t cur_thread = current_thread();
6460
6461 thread_lock(cur_thread);
6462 thread_t new_thread = thread_select(cur_thread, processor, &reason);
6463 thread_unlock(cur_thread);
6464
6465 assert(processor->running_timers_active == false);
6466
6467 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6468 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
6469 (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
6470
6471 return new_thread;
6472 }
6473
6474 /*
6475 * Each processor has a dedicated thread which
6476 * executes the idle loop when there is no suitable
6477 * previous context.
6478 *
6479 * This continuation is entered with interrupts disabled.
6480 */
6481 void
idle_thread(__assert_only void * parameter,__unused wait_result_t result)6482 idle_thread(__assert_only void* parameter,
6483 __unused wait_result_t result)
6484 {
6485 assert(ml_get_interrupts_enabled() == FALSE);
6486 assert(parameter == NULL);
6487
6488 processor_t processor = current_processor();
6489
6490 /*
6491 * Ensure that anything running in idle context triggers
6492 * preemption-disabled checks.
6493 */
6494 disable_preemption_without_measurements();
6495
6496 /*
6497 * Enable interrupts temporarily to handle any pending interrupts
6498 * or IPIs before deciding to sleep
6499 */
6500 spllo();
6501
6502 thread_t new_thread = processor_idle(THREAD_NULL, processor);
6503 /* returns with interrupts disabled */
6504
6505 enable_preemption();
6506
6507 if (new_thread != THREAD_NULL) {
6508 thread_run(processor->idle_thread,
6509 idle_thread, NULL, new_thread);
6510 /*NOTREACHED*/
6511 }
6512
6513 thread_block(idle_thread);
6514 /*NOTREACHED*/
6515 }
6516
6517 kern_return_t
idle_thread_create(processor_t processor)6518 idle_thread_create(
6519 processor_t processor)
6520 {
6521 kern_return_t result;
6522 thread_t thread;
6523 spl_t s;
6524 char name[MAXTHREADNAMESIZE];
6525
6526 result = kernel_thread_create(idle_thread, NULL, MAXPRI_KERNEL, &thread);
6527 if (result != KERN_SUCCESS) {
6528 return result;
6529 }
6530
6531 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
6532 thread_set_thread_name(thread, name);
6533
6534 s = splsched();
6535 thread_lock(thread);
6536 thread->bound_processor = processor;
6537 processor->idle_thread = thread;
6538 thread->sched_pri = thread->base_pri = IDLEPRI;
6539 thread->state = (TH_RUN | TH_IDLE);
6540 thread->options |= TH_OPT_IDLE_THREAD;
6541 thread->last_made_runnable_time = thread->last_basepri_change_time = mach_absolute_time();
6542 thread_unlock(thread);
6543 splx(s);
6544
6545 thread_deallocate(thread);
6546
6547 return KERN_SUCCESS;
6548 }
6549
6550 static void sched_update_powered_cores_continue(void);
6551
6552 /*
6553 * sched_startup:
6554 *
6555 * Kicks off scheduler services.
6556 *
6557 * Called at splsched.
6558 */
6559 void
sched_startup(void)6560 sched_startup(void)
6561 {
6562 kern_return_t result;
6563 thread_t thread;
6564
6565 simple_lock_init(&sched_vm_group_list_lock, 0);
6566
6567 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
6568 NULL, MAXPRI_KERNEL, &thread);
6569 if (result != KERN_SUCCESS) {
6570 panic("sched_startup");
6571 }
6572
6573 thread_deallocate(thread);
6574
6575 assert_thread_magic(thread);
6576
6577 /*
6578 * Yield to the sched_init_thread once, to
6579 * initialize our own thread after being switched
6580 * back to.
6581 *
6582 * The current thread is the only other thread
6583 * active at this point.
6584 */
6585 thread_block(THREAD_CONTINUE_NULL);
6586
6587 result = kernel_thread_start_priority((thread_continue_t)sched_update_powered_cores_continue,
6588 NULL, MAXPRI_KERNEL, &thread);
6589 if (result != KERN_SUCCESS) {
6590 panic("sched_startup");
6591 }
6592
6593 thread_deallocate(thread);
6594
6595 assert_thread_magic(thread);
6596 }
6597
6598 #if __arm64__
6599 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
6600 #endif /* __arm64__ */
6601
6602
6603 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
6604
6605 static volatile uint64_t sched_maintenance_deadline;
6606 static uint64_t sched_tick_last_abstime;
6607 static uint64_t sched_tick_delta;
6608 uint64_t sched_tick_max_delta;
6609
6610
6611 /*
6612 * sched_init_thread:
6613 *
6614 * Perform periodic bookkeeping functions about ten
6615 * times per second.
6616 */
6617 void
sched_timeshare_maintenance_continue(void)6618 sched_timeshare_maintenance_continue(void)
6619 {
6620 uint64_t sched_tick_ctime, late_time;
6621
6622 struct sched_update_scan_context scan_context = {
6623 .earliest_bg_make_runnable_time = UINT64_MAX,
6624 .earliest_normal_make_runnable_time = UINT64_MAX,
6625 .earliest_rt_make_runnable_time = UINT64_MAX
6626 };
6627
6628 sched_tick_ctime = mach_absolute_time();
6629
6630 if (__improbable(sched_tick_last_abstime == 0)) {
6631 sched_tick_last_abstime = sched_tick_ctime;
6632 late_time = 0;
6633 sched_tick_delta = 1;
6634 } else {
6635 late_time = sched_tick_ctime - sched_tick_last_abstime;
6636 sched_tick_delta = late_time / sched_tick_interval;
6637 /* Ensure a delta of 1, since the interval could be slightly
6638 * smaller than the sched_tick_interval due to dispatch
6639 * latencies.
6640 */
6641 sched_tick_delta = MAX(sched_tick_delta, 1);
6642
6643 /* In the event interrupt latencies or platform
6644 * idle events that advanced the timebase resulted
6645 * in periods where no threads were dispatched,
6646 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
6647 * iterations.
6648 */
6649 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
6650
6651 sched_tick_last_abstime = sched_tick_ctime;
6652 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
6653 }
6654
6655 scan_context.sched_tick_last_abstime = sched_tick_last_abstime;
6656 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
6657 sched_tick_delta, late_time, 0, 0, 0);
6658
6659 /* Add a number of pseudo-ticks corresponding to the elapsed interval
6660 * This could be greater than 1 if substantial intervals where
6661 * all processors are idle occur, which rarely occurs in practice.
6662 */
6663
6664 sched_tick += sched_tick_delta;
6665
6666 update_vm_info();
6667
6668 /*
6669 * Compute various averages.
6670 */
6671 compute_averages(sched_tick_delta);
6672
6673 /*
6674 * Scan the run queues for threads which
6675 * may need to be updated, and find the earliest runnable thread on the runqueue
6676 * to report its latency.
6677 */
6678 SCHED(thread_update_scan)(&scan_context);
6679
6680 SCHED(rt_runq_scan)(&scan_context);
6681
6682 uint64_t ctime = mach_absolute_time();
6683
6684 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
6685 ctime - scan_context.earliest_bg_make_runnable_time : 0;
6686
6687 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
6688 ctime - scan_context.earliest_normal_make_runnable_time : 0;
6689
6690 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
6691 ctime - scan_context.earliest_rt_make_runnable_time : 0;
6692
6693 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
6694
6695 /*
6696 * Check to see if the special sched VM group needs attention.
6697 */
6698 sched_vm_group_maintenance();
6699
6700 #if __arm64__
6701 /* Check to see if the recommended cores failsafe is active */
6702 sched_recommended_cores_maintenance();
6703 #endif /* __arm64__ */
6704
6705
6706 #if DEBUG || DEVELOPMENT
6707 #if __x86_64__
6708 #include <i386/misc_protos.h>
6709 /* Check for long-duration interrupts */
6710 mp_interrupt_watchdog();
6711 #endif /* __x86_64__ */
6712 #endif /* DEBUG || DEVELOPMENT */
6713
6714 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
6715 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
6716 sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
6717
6718 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
6719 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
6720 /*NOTREACHED*/
6721 }
6722
6723 static uint64_t sched_maintenance_wakeups;
6724
6725 /*
6726 * Determine if the set of routines formerly driven by a maintenance timer
6727 * must be invoked, based on a deadline comparison. Signals the scheduler
6728 * maintenance thread on deadline expiration. Must be invoked at an interval
6729 * lower than the "sched_tick_interval", currently accomplished by
6730 * invocation via the quantum expiration timer and at context switch time.
6731 * Performance matters: this routine reuses a timestamp approximating the
6732 * current absolute time received from the caller, and should perform
6733 * no more than a comparison against the deadline in the common case.
6734 */
6735 void
sched_timeshare_consider_maintenance(uint64_t ctime)6736 sched_timeshare_consider_maintenance(uint64_t ctime)
6737 {
6738 cpu_quiescent_counter_checkin(ctime);
6739
6740 uint64_t deadline = sched_maintenance_deadline;
6741
6742 if (__improbable(ctime >= deadline)) {
6743 if (__improbable(current_thread() == sched_maintenance_thread)) {
6744 return;
6745 }
6746 OSMemoryBarrier();
6747
6748 uint64_t ndeadline = ctime + sched_tick_interval;
6749
6750 if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline, deadline, ndeadline, seq_cst))) {
6751 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
6752 sched_maintenance_wakeups++;
6753 }
6754 }
6755
6756 #if !CONFIG_SCHED_CLUTCH
6757 /*
6758 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
6759 * scheduler, the load is maintained at the thread group and bucket level.
6760 */
6761 uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed);
6762
6763 if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
6764 uint64_t new_deadline = 0;
6765 if (os_atomic_cmpxchg(&sched_load_compute_deadline, load_compute_deadline, new_deadline, relaxed)) {
6766 compute_sched_load();
6767 new_deadline = ctime + sched_load_compute_interval_abs;
6768 os_atomic_store_wide(&sched_load_compute_deadline, new_deadline, relaxed);
6769 }
6770 }
6771 #endif /* CONFIG_SCHED_CLUTCH */
6772
6773 #if __arm64__
6774 uint64_t perf_deadline = os_atomic_load(&sched_perfcontrol_callback_deadline, relaxed);
6775
6776 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
6777 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
6778 if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline, perf_deadline, 0, relaxed)) {
6779 machine_perfcontrol_deadline_passed(perf_deadline);
6780 }
6781 }
6782 #endif /* __arm64__ */
6783 }
6784
6785 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
6786
6787 void
sched_init_thread(void)6788 sched_init_thread(void)
6789 {
6790 thread_block(THREAD_CONTINUE_NULL);
6791
6792 thread_t thread = current_thread();
6793
6794 thread_set_thread_name(thread, "sched_maintenance_thread");
6795
6796 sched_maintenance_thread = thread;
6797
6798 SCHED(maintenance_continuation)();
6799
6800 /*NOTREACHED*/
6801 }
6802
6803 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
6804
6805 /*
6806 * thread_update_scan / runq_scan:
6807 *
6808 * Scan the run queues to account for timesharing threads
6809 * which need to be updated.
6810 *
6811 * Scanner runs in two passes. Pass one squirrels likely
6812 * threads away in an array, pass two does the update.
6813 *
6814 * This is necessary because the run queue is locked for
6815 * the candidate scan, but the thread is locked for the update.
6816 *
6817 * Array should be sized to make forward progress, without
6818 * disabling preemption for long periods.
6819 */
6820
6821 #define THREAD_UPDATE_SIZE 128
6822
6823 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
6824 static uint32_t thread_update_count = 0;
6825
6826 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
6827 boolean_t
thread_update_add_thread(thread_t thread)6828 thread_update_add_thread(thread_t thread)
6829 {
6830 if (thread_update_count == THREAD_UPDATE_SIZE) {
6831 return FALSE;
6832 }
6833
6834 thread_update_array[thread_update_count++] = thread;
6835 thread_reference(thread);
6836 return TRUE;
6837 }
6838
6839 void
thread_update_process_threads(void)6840 thread_update_process_threads(void)
6841 {
6842 assert(thread_update_count <= THREAD_UPDATE_SIZE);
6843
6844 for (uint32_t i = 0; i < thread_update_count; i++) {
6845 thread_t thread = thread_update_array[i];
6846 assert_thread_magic(thread);
6847 thread_update_array[i] = THREAD_NULL;
6848
6849 spl_t s = splsched();
6850 thread_lock(thread);
6851 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
6852 SCHED(update_priority)(thread);
6853 }
6854 thread_unlock(thread);
6855 splx(s);
6856
6857 thread_deallocate(thread);
6858 }
6859
6860 thread_update_count = 0;
6861 }
6862
6863 static boolean_t
runq_scan_thread(thread_t thread,sched_update_scan_context_t scan_context)6864 runq_scan_thread(
6865 thread_t thread,
6866 sched_update_scan_context_t scan_context)
6867 {
6868 assert_thread_magic(thread);
6869
6870 if (thread->sched_stamp != sched_tick &&
6871 thread->sched_mode == TH_MODE_TIMESHARE) {
6872 if (thread_update_add_thread(thread) == FALSE) {
6873 return TRUE;
6874 }
6875 }
6876
6877 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
6878 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
6879 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
6880 }
6881 } else {
6882 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
6883 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
6884 }
6885 }
6886
6887 return FALSE;
6888 }
6889
6890 /*
6891 * Scan a runq for candidate threads.
6892 *
6893 * Returns TRUE if retry is needed.
6894 */
6895 boolean_t
runq_scan(run_queue_t runq,sched_update_scan_context_t scan_context)6896 runq_scan(
6897 run_queue_t runq,
6898 sched_update_scan_context_t scan_context)
6899 {
6900 int count = runq->count;
6901 int queue_index;
6902
6903 assert(count >= 0);
6904
6905 if (count == 0) {
6906 return FALSE;
6907 }
6908
6909 for (queue_index = bitmap_first(runq->bitmap, NRQS);
6910 queue_index >= 0;
6911 queue_index = bitmap_next(runq->bitmap, queue_index)) {
6912 thread_t thread;
6913 circle_queue_t queue = &runq->queues[queue_index];
6914
6915 cqe_foreach_element(thread, queue, runq_links) {
6916 assert(count > 0);
6917 if (runq_scan_thread(thread, scan_context) == TRUE) {
6918 return TRUE;
6919 }
6920 count--;
6921 }
6922 }
6923
6924 return FALSE;
6925 }
6926
6927 #if CONFIG_SCHED_CLUTCH
6928
6929 boolean_t
sched_clutch_timeshare_scan(queue_t thread_queue,uint16_t thread_count,sched_update_scan_context_t scan_context)6930 sched_clutch_timeshare_scan(
6931 queue_t thread_queue,
6932 uint16_t thread_count,
6933 sched_update_scan_context_t scan_context)
6934 {
6935 if (thread_count == 0) {
6936 return FALSE;
6937 }
6938
6939 thread_t thread;
6940 qe_foreach_element_safe(thread, thread_queue, th_clutch_timeshare_link) {
6941 if (runq_scan_thread(thread, scan_context) == TRUE) {
6942 return TRUE;
6943 }
6944 thread_count--;
6945 }
6946
6947 assert(thread_count == 0);
6948 return FALSE;
6949 }
6950
6951
6952 #endif /* CONFIG_SCHED_CLUTCH */
6953
6954 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
6955
6956 bool
thread_is_eager_preempt(thread_t thread)6957 thread_is_eager_preempt(thread_t thread)
6958 {
6959 return thread->sched_flags & TH_SFLAG_EAGERPREEMPT;
6960 }
6961
6962 void
thread_set_eager_preempt(thread_t thread)6963 thread_set_eager_preempt(thread_t thread)
6964 {
6965 spl_t s = splsched();
6966 thread_lock(thread);
6967
6968 assert(!thread_is_eager_preempt(thread));
6969
6970 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
6971
6972 if (thread == current_thread()) {
6973 /* csw_check updates current_is_eagerpreempt on the processor */
6974 ast_t ast = csw_check(thread, current_processor(), AST_NONE);
6975
6976 thread_unlock(thread);
6977
6978 if (ast != AST_NONE) {
6979 thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
6980 }
6981 } else {
6982 processor_t last_processor = thread->last_processor;
6983
6984 if (last_processor != PROCESSOR_NULL &&
6985 last_processor->state == PROCESSOR_RUNNING &&
6986 last_processor->active_thread == thread) {
6987 cause_ast_check(last_processor);
6988 }
6989
6990 thread_unlock(thread);
6991 }
6992
6993 splx(s);
6994 }
6995
6996 void
thread_clear_eager_preempt(thread_t thread)6997 thread_clear_eager_preempt(thread_t thread)
6998 {
6999 spl_t s = splsched();
7000 thread_lock(thread);
7001
7002 assert(thread_is_eager_preempt(thread));
7003
7004 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
7005
7006 if (thread == current_thread()) {
7007 current_processor()->current_is_eagerpreempt = false;
7008 }
7009
7010 thread_unlock(thread);
7011 splx(s);
7012 }
7013
7014 /*
7015 * Scheduling statistics
7016 */
7017 void
sched_stats_handle_csw(processor_t processor,int reasons,int selfpri,int otherpri)7018 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
7019 {
7020 struct sched_statistics *stats;
7021 boolean_t to_realtime = FALSE;
7022
7023 stats = PERCPU_GET_RELATIVE(sched_stats, processor, processor);
7024 stats->csw_count++;
7025
7026 if (otherpri >= BASEPRI_REALTIME) {
7027 stats->rt_sched_count++;
7028 to_realtime = TRUE;
7029 }
7030
7031 if ((reasons & AST_PREEMPT) != 0) {
7032 stats->preempt_count++;
7033
7034 if (selfpri >= BASEPRI_REALTIME) {
7035 stats->preempted_rt_count++;
7036 }
7037
7038 if (to_realtime) {
7039 stats->preempted_by_rt_count++;
7040 }
7041 }
7042 }
7043
7044 void
sched_stats_handle_runq_change(struct runq_stats * stats,int old_count)7045 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
7046 {
7047 uint64_t timestamp = mach_absolute_time();
7048
7049 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
7050 stats->last_change_timestamp = timestamp;
7051 }
7052
7053 /*
7054 * For calls from assembly code
7055 */
7056 #undef thread_wakeup
7057 void
7058 thread_wakeup(
7059 event_t x);
7060
7061 void
thread_wakeup(event_t x)7062 thread_wakeup(
7063 event_t x)
7064 {
7065 thread_wakeup_with_result(x, THREAD_AWAKENED);
7066 }
7067
7068 boolean_t
preemption_enabled(void)7069 preemption_enabled(void)
7070 {
7071 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
7072 }
7073
7074 static void
sched_timer_deadline_tracking_init(void)7075 sched_timer_deadline_tracking_init(void)
7076 {
7077 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
7078 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
7079 }
7080
7081 static uint64_t latest_requested_powered_cores = ALL_CORES_POWERED;
7082 processor_reason_t latest_requested_reason = REASON_NONE;
7083 static uint64_t current_requested_powered_cores = ALL_CORES_POWERED;
7084 bool perfcontrol_sleep_override = false;
7085
7086 LCK_GRP_DECLARE(cluster_powerdown_grp, "cluster_powerdown");
7087 LCK_MTX_DECLARE(cluster_powerdown_lock, &cluster_powerdown_grp);
7088 int32_t cluster_powerdown_suspend_count = 0;
7089
7090 bool
sched_is_in_sleep(void)7091 sched_is_in_sleep(void)
7092 {
7093 os_atomic_thread_fence(acquire);
7094 return perfcontrol_sleep_override;
7095 }
7096
7097 static void
sched_update_powered_cores_continue(void)7098 sched_update_powered_cores_continue(void)
7099 {
7100 lck_mtx_lock(&cluster_powerdown_lock);
7101
7102 if (!cluster_powerdown_suspend_count) {
7103 spl_t s = splsched();
7104 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7105
7106 uint64_t latest = latest_requested_powered_cores;
7107 processor_reason_t reason = latest_requested_reason;
7108 uint64_t current = current_requested_powered_cores;
7109 current_requested_powered_cores = latest;
7110 bool in_sleep = perfcontrol_sleep_override;
7111
7112 simple_unlock(&sched_available_cores_lock);
7113 splx(s);
7114
7115 while (latest != current) {
7116 if (!in_sleep) {
7117 assert((reason == REASON_CLPC_SYSTEM) || (reason == REASON_CLPC_USER));
7118 sched_update_powered_cores(latest, reason, SHUTDOWN_TEMPORARY | WAIT_FOR_LAST_START);
7119 }
7120
7121 s = splsched();
7122 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7123
7124 latest = latest_requested_powered_cores;
7125 reason = latest_requested_reason;
7126 current = current_requested_powered_cores;
7127 current_requested_powered_cores = latest;
7128 in_sleep = perfcontrol_sleep_override;
7129
7130 simple_unlock(&sched_available_cores_lock);
7131 splx(s);
7132 }
7133
7134 assert_wait((event_t)sched_update_powered_cores_continue, THREAD_UNINT);
7135
7136 s = splsched();
7137 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7138 if (latest_requested_powered_cores != current_requested_powered_cores) {
7139 clear_wait(current_thread(), THREAD_AWAKENED);
7140 }
7141 simple_unlock(&sched_available_cores_lock);
7142 splx(s);
7143 }
7144
7145 lck_mtx_unlock(&cluster_powerdown_lock);
7146
7147 thread_block((thread_continue_t)sched_update_powered_cores_continue);
7148 /*NOTREACHED*/
7149 }
7150
7151 void
sched_perfcontrol_update_powered_cores(uint64_t requested_powered_cores,processor_reason_t reason,__unused uint32_t flags)7152 sched_perfcontrol_update_powered_cores(uint64_t requested_powered_cores, processor_reason_t reason, __unused uint32_t flags)
7153 {
7154 assert((reason == REASON_CLPC_SYSTEM) || (reason == REASON_CLPC_USER));
7155
7156 #if DEVELOPMENT || DEBUG
7157 if (flags & (ASSERT_IN_SLEEP | ASSERT_POWERDOWN_SUSPENDED)) {
7158 if (flags & ASSERT_POWERDOWN_SUSPENDED) {
7159 assert(cluster_powerdown_suspend_count > 0);
7160 }
7161 if (flags & ASSERT_IN_SLEEP) {
7162 assert(perfcontrol_sleep_override == true);
7163 }
7164 return;
7165 }
7166 #endif
7167
7168 spl_t s = splsched();
7169 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7170
7171 bool should_wakeup = !cluster_powerdown_suspend_count;
7172 if (should_wakeup) {
7173 latest_requested_powered_cores = requested_powered_cores;
7174 latest_requested_reason = reason;
7175 }
7176
7177 simple_unlock(&sched_available_cores_lock);
7178 splx(s);
7179
7180 if (should_wakeup) {
7181 thread_wakeup((event_t)sched_update_powered_cores_continue);
7182 }
7183 }
7184
7185 void
suspend_cluster_powerdown(void)7186 suspend_cluster_powerdown(void)
7187 {
7188 lck_mtx_lock(&cluster_powerdown_lock);
7189
7190 assert(cluster_powerdown_suspend_count >= 0);
7191
7192 bool first_suspend = (cluster_powerdown_suspend_count == 0);
7193 if (first_suspend) {
7194 spl_t s = splsched();
7195 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7196 latest_requested_powered_cores = ALL_CORES_POWERED;
7197 current_requested_powered_cores = ALL_CORES_POWERED;
7198 latest_requested_reason = REASON_SYSTEM;
7199 simple_unlock(&sched_available_cores_lock);
7200 splx(s);
7201 }
7202
7203 cluster_powerdown_suspend_count++;
7204
7205 if (first_suspend) {
7206 kprintf("%s>calling sched_update_powered_cores(ALL_CORES_POWERED, REASON_SYSTEM, LOCK_STATE | WAIT_FOR_START)\n", __FUNCTION__);
7207 sched_update_powered_cores(ALL_CORES_POWERED, REASON_SYSTEM, LOCK_STATE | WAIT_FOR_START);
7208 }
7209
7210 lck_mtx_unlock(&cluster_powerdown_lock);
7211 }
7212
7213 void
resume_cluster_powerdown(void)7214 resume_cluster_powerdown(void)
7215 {
7216 lck_mtx_lock(&cluster_powerdown_lock);
7217
7218 if (cluster_powerdown_suspend_count <= 0) {
7219 panic("resume_cluster_powerdown() called with cluster_powerdown_suspend_count=%d\n", cluster_powerdown_suspend_count);
7220 }
7221
7222 cluster_powerdown_suspend_count--;
7223
7224 bool last_resume = (cluster_powerdown_suspend_count == 0);
7225
7226 if (last_resume) {
7227 spl_t s = splsched();
7228 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7229 latest_requested_powered_cores = ALL_CORES_POWERED;
7230 current_requested_powered_cores = ALL_CORES_POWERED;
7231 latest_requested_reason = REASON_SYSTEM;
7232 simple_unlock(&sched_available_cores_lock);
7233 splx(s);
7234
7235 kprintf("%s>calling sched_update_powered_cores(ALL_CORES_POWERED, REASON_SYSTEM, UNLOCK_STATE)\n", __FUNCTION__);
7236 sched_update_powered_cores(ALL_CORES_POWERED, REASON_SYSTEM, UNLOCK_STATE);
7237 }
7238
7239 lck_mtx_unlock(&cluster_powerdown_lock);
7240 }
7241
7242 LCK_MTX_DECLARE(user_cluster_powerdown_lock, &cluster_powerdown_grp);
7243 static bool user_suspended_cluster_powerdown = false;
7244
7245 kern_return_t
suspend_cluster_powerdown_from_user(void)7246 suspend_cluster_powerdown_from_user(void)
7247 {
7248 kern_return_t ret = KERN_FAILURE;
7249
7250 lck_mtx_lock(&user_cluster_powerdown_lock);
7251
7252 if (!user_suspended_cluster_powerdown) {
7253 suspend_cluster_powerdown();
7254 user_suspended_cluster_powerdown = true;
7255 ret = KERN_SUCCESS;
7256 }
7257
7258 lck_mtx_unlock(&user_cluster_powerdown_lock);
7259
7260 return ret;
7261 }
7262
7263 kern_return_t
resume_cluster_powerdown_from_user(void)7264 resume_cluster_powerdown_from_user(void)
7265 {
7266 kern_return_t ret = KERN_FAILURE;
7267
7268 lck_mtx_lock(&user_cluster_powerdown_lock);
7269
7270 if (user_suspended_cluster_powerdown) {
7271 resume_cluster_powerdown();
7272 user_suspended_cluster_powerdown = false;
7273 ret = KERN_SUCCESS;
7274 }
7275
7276 lck_mtx_unlock(&user_cluster_powerdown_lock);
7277
7278 return ret;
7279 }
7280
7281 int
get_cluster_powerdown_user_suspended(void)7282 get_cluster_powerdown_user_suspended(void)
7283 {
7284 lck_mtx_lock(&user_cluster_powerdown_lock);
7285
7286 int ret = (int)user_suspended_cluster_powerdown;
7287
7288 lck_mtx_unlock(&user_cluster_powerdown_lock);
7289
7290 return ret;
7291 }
7292
7293 #if DEVELOPMENT || DEBUG
7294 /* Functions to support the temporary sysctl */
7295 static uint64_t saved_requested_powered_cores = ALL_CORES_POWERED;
7296 void
sched_set_powered_cores(int requested_powered_cores)7297 sched_set_powered_cores(int requested_powered_cores)
7298 {
7299 processor_reason_t reason = bit_test(requested_powered_cores, 31) ? REASON_CLPC_USER : REASON_CLPC_SYSTEM;
7300 uint32_t flags = requested_powered_cores & 0x30000000;
7301
7302 saved_requested_powered_cores = requested_powered_cores;
7303
7304 requested_powered_cores = bits(requested_powered_cores, 28, 0);
7305
7306 sched_perfcontrol_update_powered_cores(requested_powered_cores, reason, flags);
7307 }
7308 int
sched_get_powered_cores(void)7309 sched_get_powered_cores(void)
7310 {
7311 return (int)saved_requested_powered_cores;
7312 }
7313 #endif
7314
7315 /*
7316 * Ensure that all cores are powered and recommended before sleep
7317 */
7318 void
sched_override_available_cores_for_sleep(void)7319 sched_override_available_cores_for_sleep(void)
7320 {
7321 spl_t s = splsched();
7322 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7323
7324 if (perfcontrol_sleep_override == false) {
7325 perfcontrol_sleep_override = true;
7326 #if __arm__ || __arm64__
7327 sched_update_recommended_cores(ALL_CORES_RECOMMENDED, REASON_SYSTEM, 0);
7328 #endif
7329 }
7330
7331 simple_unlock(&sched_available_cores_lock);
7332 splx(s);
7333
7334 suspend_cluster_powerdown();
7335 }
7336
7337 /*
7338 * Restore the previously recommended cores, but leave all cores powered
7339 * after sleep
7340 */
7341 void
sched_restore_available_cores_after_sleep(void)7342 sched_restore_available_cores_after_sleep(void)
7343 {
7344 spl_t s = splsched();
7345 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7346
7347 if (perfcontrol_sleep_override == true) {
7348 perfcontrol_sleep_override = false;
7349 #if __arm__ || __arm64__
7350 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores,
7351 REASON_NONE, 0);
7352 #endif
7353 }
7354
7355 simple_unlock(&sched_available_cores_lock);
7356 splx(s);
7357
7358 resume_cluster_powerdown();
7359 }
7360
7361 #if __arm__ || __arm64__
7362
7363 uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
7364 bool perfcontrol_failsafe_active = false;
7365
7366 uint64_t perfcontrol_failsafe_maintenance_runnable_time;
7367 uint64_t perfcontrol_failsafe_activation_time;
7368 uint64_t perfcontrol_failsafe_deactivation_time;
7369
7370 /* data covering who likely caused it and how long they ran */
7371 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
7372 char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
7373 int perfcontrol_failsafe_pid;
7374 uint64_t perfcontrol_failsafe_tid;
7375 uint64_t perfcontrol_failsafe_thread_timer_at_start;
7376 uint64_t perfcontrol_failsafe_thread_timer_last_seen;
7377 uint64_t perfcontrol_failsafe_recommended_at_trigger;
7378
7379 /*
7380 * Perf controller calls here to update the recommended core bitmask.
7381 * If the failsafe is active, we don't immediately apply the new value.
7382 * Instead, we store the new request and use it after the failsafe deactivates.
7383 *
7384 * If the failsafe is not active, immediately apply the update.
7385 *
7386 * No scheduler locks are held, no other locks are held that scheduler might depend on,
7387 * interrupts are enabled
7388 *
7389 * currently prototype is in osfmk/arm/machine_routines.h
7390 */
7391 void
sched_perfcontrol_update_recommended_cores_reason(uint64_t recommended_cores,processor_reason_t reason,uint32_t flags)7392 sched_perfcontrol_update_recommended_cores_reason(uint64_t recommended_cores, processor_reason_t reason, uint32_t flags)
7393 {
7394 assert(preemption_enabled());
7395
7396 spl_t s = splsched();
7397 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7398
7399 if (reason == REASON_CLPC_SYSTEM) {
7400 perfcontrol_system_requested_recommended_cores = recommended_cores;
7401 } else {
7402 assert(reason == REASON_CLPC_USER);
7403 perfcontrol_user_requested_recommended_cores = recommended_cores;
7404 }
7405
7406 perfcontrol_requested_recommended_cores = perfcontrol_system_requested_recommended_cores & perfcontrol_user_requested_recommended_cores;
7407 perfcontrol_requested_recommended_core_count = __builtin_popcountll(perfcontrol_requested_recommended_cores);
7408
7409 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
7410 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores, reason, flags);
7411 } else {
7412 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
7413 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
7414 perfcontrol_requested_recommended_cores,
7415 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
7416 }
7417
7418 simple_unlock(&sched_available_cores_lock);
7419 splx(s);
7420 }
7421
7422 void
sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)7423 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
7424 {
7425 sched_perfcontrol_update_recommended_cores_reason(recommended_cores, REASON_CLPC_USER, 0);
7426 }
7427
7428 /*
7429 * Consider whether we need to activate the recommended cores failsafe
7430 *
7431 * Called from quantum timer interrupt context of a realtime thread
7432 * No scheduler locks are held, interrupts are disabled
7433 */
7434 void
sched_consider_recommended_cores(uint64_t ctime,thread_t cur_thread)7435 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
7436 {
7437 /*
7438 * Check if a realtime thread is starving the system
7439 * and bringing up non-recommended cores would help
7440 *
7441 * TODO: Is this the correct check for recommended == possible cores?
7442 * TODO: Validate the checks without the relevant lock are OK.
7443 */
7444
7445 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
7446 /* keep track of how long the responsible thread runs */
7447 uint64_t cur_th_time = recount_current_thread_time_mach();
7448
7449 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7450
7451 if (perfcontrol_failsafe_active == TRUE &&
7452 cur_thread->thread_id == perfcontrol_failsafe_tid) {
7453 perfcontrol_failsafe_thread_timer_last_seen = cur_th_time;
7454 }
7455
7456 simple_unlock(&sched_available_cores_lock);
7457
7458 /* we're already trying to solve the problem, so bail */
7459 return;
7460 }
7461
7462 /* The failsafe won't help if there are no more processors to enable */
7463 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count)) {
7464 return;
7465 }
7466
7467 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
7468
7469 /* Use the maintenance thread as our canary in the coal mine */
7470 thread_t m_thread = sched_maintenance_thread;
7471
7472 /* If it doesn't look bad, nothing to see here */
7473 if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
7474 return;
7475 }
7476
7477 /* It looks bad, take the lock to be sure */
7478 thread_lock(m_thread);
7479
7480 if (m_thread->runq == PROCESSOR_NULL ||
7481 (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
7482 m_thread->last_made_runnable_time >= too_long_ago) {
7483 /*
7484 * Maintenance thread is either on cpu or blocked, and
7485 * therefore wouldn't benefit from more cores
7486 */
7487 thread_unlock(m_thread);
7488 return;
7489 }
7490
7491 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
7492
7493 thread_unlock(m_thread);
7494
7495 /*
7496 * There are cores disabled at perfcontrol's recommendation, but the
7497 * system is so overloaded that the maintenance thread can't run.
7498 * That likely means that perfcontrol can't run either, so it can't fix
7499 * the recommendation. We have to kick in a failsafe to keep from starving.
7500 *
7501 * When the maintenance thread has been starved for too long,
7502 * ignore the recommendation from perfcontrol and light up all the cores.
7503 *
7504 * TODO: Consider weird states like boot, sleep, or debugger
7505 */
7506
7507 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7508
7509 if (perfcontrol_failsafe_active == TRUE) {
7510 simple_unlock(&sched_available_cores_lock);
7511 return;
7512 }
7513
7514 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
7515 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
7516 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
7517
7518 perfcontrol_failsafe_active = TRUE;
7519 perfcontrol_failsafe_activation_time = mach_absolute_time();
7520 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
7521 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
7522
7523 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
7524 task_t task = get_threadtask(cur_thread);
7525 perfcontrol_failsafe_pid = task_pid(task);
7526 strlcpy(perfcontrol_failsafe_name, proc_name_address(get_bsdtask_info(task)), sizeof(perfcontrol_failsafe_name));
7527
7528 perfcontrol_failsafe_tid = cur_thread->thread_id;
7529
7530 /* Blame the thread for time it has run recently */
7531 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
7532
7533 uint64_t last_seen = recount_current_thread_time_mach();
7534
7535 /* Compute the start time of the bad behavior in terms of the thread's on core time */
7536 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
7537 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
7538
7539 /* Ignore the previously recommended core configuration */
7540 sched_update_recommended_cores(ALL_CORES_RECOMMENDED, REASON_SYSTEM, 0);
7541
7542 simple_unlock(&sched_available_cores_lock);
7543 }
7544
7545 /*
7546 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
7547 *
7548 * Runs in the context of the maintenance thread, no locks held
7549 */
7550 static void
sched_recommended_cores_maintenance(void)7551 sched_recommended_cores_maintenance(void)
7552 {
7553 /* Common case - no failsafe, nothing to be done here */
7554 if (__probable(perfcontrol_failsafe_active == FALSE)) {
7555 return;
7556 }
7557
7558 uint64_t ctime = mach_absolute_time();
7559
7560 boolean_t print_diagnostic = FALSE;
7561 char p_name[FAILSAFE_NAME_LEN] = "";
7562
7563 spl_t s = splsched();
7564 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7565
7566 /* Check again, under the lock, to avoid races */
7567 if (perfcontrol_failsafe_active == FALSE) {
7568 goto out;
7569 }
7570
7571 /*
7572 * Ensure that the other cores get another few ticks to run some threads
7573 * If we don't have this hysteresis, the maintenance thread is the first
7574 * to run, and then it immediately kills the other cores
7575 */
7576 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
7577 goto out;
7578 }
7579
7580 /* Capture some diagnostic state under the lock so we can print it out later */
7581
7582 int pid = perfcontrol_failsafe_pid;
7583 uint64_t tid = perfcontrol_failsafe_tid;
7584
7585 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
7586 perfcontrol_failsafe_thread_timer_at_start;
7587 uint64_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
7588 uint64_t rec_cores_after = perfcontrol_requested_recommended_cores;
7589 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
7590 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
7591
7592 print_diagnostic = TRUE;
7593
7594 /* Deactivate the failsafe and reinstate the requested recommendation settings */
7595
7596 perfcontrol_failsafe_deactivation_time = ctime;
7597 perfcontrol_failsafe_active = FALSE;
7598
7599 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
7600 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
7601 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
7602
7603 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores,
7604 REASON_NONE, 0);
7605
7606 out:
7607 simple_unlock(&sched_available_cores_lock);
7608 splx(s);
7609
7610 if (print_diagnostic) {
7611 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
7612
7613 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
7614 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
7615
7616 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
7617 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
7618
7619 printf("recommended core failsafe kicked in for %lld ms "
7620 "likely due to %s[%d] thread 0x%llx spending "
7621 "%lld ms on cpu at realtime priority - "
7622 "new recommendation: 0x%llx -> 0x%llx\n",
7623 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
7624 rec_cores_before, rec_cores_after);
7625 }
7626 }
7627
7628 #endif /* __arm64__ */
7629
7630 kern_return_t
sched_processor_enable(processor_t processor,boolean_t enable)7631 sched_processor_enable(processor_t processor, boolean_t enable)
7632 {
7633 assert(preemption_enabled());
7634
7635 if (processor == master_processor) {
7636 /* The system can hang if this is allowed */
7637 return KERN_NOT_SUPPORTED;
7638 }
7639
7640 spl_t s = splsched();
7641 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7642
7643 if (enable) {
7644 bit_set(usercontrol_requested_recommended_cores, processor->cpu_id);
7645 } else {
7646 bit_clear(usercontrol_requested_recommended_cores, processor->cpu_id);
7647 }
7648
7649 #if __arm64__
7650 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
7651 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores,
7652 REASON_USER, 0);
7653 } else {
7654 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
7655 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
7656 perfcontrol_requested_recommended_cores,
7657 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
7658 }
7659 #else /* __arm64__ */
7660 sched_update_recommended_cores(usercontrol_requested_recommended_cores, REASON_USER, 0);
7661 #endif /* ! __arm64__ */
7662
7663 simple_unlock(&sched_available_cores_lock);
7664 splx(s);
7665
7666 return KERN_SUCCESS;
7667 }
7668
7669 void
sched_mark_processor_online_locked(processor_t processor,__assert_only processor_reason_t reason)7670 sched_mark_processor_online_locked(processor_t processor, __assert_only processor_reason_t reason)
7671 {
7672 assert((processor != master_processor) || (reason == REASON_SYSTEM));
7673
7674 bit_set(sched_online_processors, processor->cpu_id);
7675 }
7676
7677 kern_return_t
sched_mark_processor_offline(processor_t processor,processor_reason_t reason)7678 sched_mark_processor_offline(processor_t processor, processor_reason_t reason)
7679 {
7680 assert((processor != master_processor) || (reason == REASON_SYSTEM));
7681 kern_return_t ret = KERN_SUCCESS;
7682
7683 spl_t s = splsched();
7684 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7685
7686 if (reason == REASON_SYSTEM) {
7687 bit_clear(sched_online_processors, processor->cpu_id);
7688 simple_unlock(&sched_available_cores_lock);
7689 splx(s);
7690 return ret;
7691 }
7692
7693 uint64_t available_cores = sched_online_processors & perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores;
7694
7695 if (!bit_test(sched_online_processors, processor->cpu_id)) {
7696 /* Processor is already offline */
7697 ret = KERN_NOT_IN_SET;
7698 } else if (available_cores == BIT(processor->cpu_id)) {
7699 ret = KERN_RESOURCE_SHORTAGE;
7700 } else {
7701 bit_clear(sched_online_processors, processor->cpu_id);
7702 ret = KERN_SUCCESS;
7703 }
7704
7705 simple_unlock(&sched_available_cores_lock);
7706 splx(s);
7707
7708 return ret;
7709 }
7710
7711 /*
7712 * Apply a new recommended cores mask to the processors it affects
7713 * Runs after considering failsafes and such
7714 *
7715 * Iterate over processors and update their ->is_recommended field.
7716 * If a processor is running, we let it drain out at its next
7717 * quantum expiration or blocking point. If a processor is idle, there
7718 * may be more work for it to do, so IPI it.
7719 *
7720 * interrupts disabled, sched_available_cores_lock is held
7721 */
7722 static void
sched_update_recommended_cores(uint64_t recommended_cores,processor_reason_t reason,__unused uint32_t flags)7723 sched_update_recommended_cores(uint64_t recommended_cores, processor_reason_t reason, __unused uint32_t flags)
7724 {
7725 uint64_t needs_exit_idle_mask = 0x0;
7726
7727 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
7728 recommended_cores,
7729 #if __arm64__
7730 perfcontrol_failsafe_active, 0, 0);
7731 #else /* __arm64__ */
7732 0, 0, 0);
7733 #endif /* ! __arm64__ */
7734
7735 if (__builtin_popcountll(recommended_cores & sched_online_processors) == 0) {
7736 bit_set(recommended_cores, master_processor->cpu_id); /* add boot processor or we hang */
7737 }
7738
7739 /* First set recommended cores */
7740 for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
7741 for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
7742 processor_set_t pset = pset_array[pset_id];
7743
7744 cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
7745 cpumap_t newly_recommended = changed_recommendations & recommended_cores;
7746
7747 if (newly_recommended == 0) {
7748 /* Nothing to do */
7749 continue;
7750 }
7751
7752 pset_lock(pset);
7753
7754 for (int cpu_id = lsb_first(newly_recommended); cpu_id >= 0; cpu_id = lsb_next(newly_recommended, cpu_id)) {
7755 processor_t processor = processor_array[cpu_id];
7756 processor->is_recommended = TRUE;
7757 processor->last_recommend_reason = reason;
7758 bit_set(pset->recommended_bitmask, processor->cpu_id);
7759
7760 if (processor->state == PROCESSOR_IDLE) {
7761 if (processor != current_processor()) {
7762 bit_set(needs_exit_idle_mask, processor->cpu_id);
7763 }
7764 }
7765 if ((processor->state != PROCESSOR_OFF_LINE) && (processor->state != PROCESSOR_PENDING_OFFLINE)) {
7766 os_atomic_inc(&processor_avail_count_user, relaxed);
7767 if (processor->processor_primary == processor) {
7768 os_atomic_inc(&primary_processor_avail_count_user, relaxed);
7769 }
7770 SCHED(pset_made_schedulable)(processor, pset, false);
7771 }
7772 }
7773 pset_update_rt_stealable_state(pset);
7774
7775 pset_unlock(pset);
7776 }
7777 }
7778
7779 /* Now shutdown not recommended cores */
7780 for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
7781 for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
7782 processor_set_t pset = pset_array[pset_id];
7783
7784 cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
7785 cpumap_t newly_unrecommended = changed_recommendations & ~recommended_cores;
7786
7787 if (newly_unrecommended == 0) {
7788 /* Nothing to do */
7789 continue;
7790 }
7791
7792 pset_lock(pset);
7793
7794 for (int cpu_id = lsb_first(newly_unrecommended); cpu_id >= 0; cpu_id = lsb_next(newly_unrecommended, cpu_id)) {
7795 processor_t processor = processor_array[cpu_id];
7796 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
7797
7798 processor->is_recommended = FALSE;
7799 if (reason != REASON_NONE) {
7800 processor->last_derecommend_reason = reason;
7801 }
7802 bit_clear(pset->recommended_bitmask, processor->cpu_id);
7803 if ((processor->state != PROCESSOR_OFF_LINE) && (processor->state != PROCESSOR_PENDING_OFFLINE)) {
7804 os_atomic_dec(&processor_avail_count_user, relaxed);
7805 if (processor->processor_primary == processor) {
7806 os_atomic_dec(&primary_processor_avail_count_user, relaxed);
7807 }
7808 }
7809 pset_update_rt_stealable_state(pset);
7810
7811 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
7812 ipi_type = SCHED_IPI_IMMEDIATE;
7813 }
7814 SCHED(processor_queue_shutdown)(processor);
7815 /* pset unlocked */
7816
7817 SCHED(rt_queue_shutdown)(processor);
7818
7819 if (ipi_type != SCHED_IPI_NONE) {
7820 if (processor == current_processor()) {
7821 ast_on(AST_PREEMPT);
7822 } else {
7823 sched_ipi_perform(processor, ipi_type);
7824 }
7825 }
7826
7827 pset_lock(pset);
7828 }
7829 pset_unlock(pset);
7830 }
7831 }
7832
7833 #if defined(__x86_64__)
7834 commpage_update_active_cpus();
7835 #endif
7836 /* Issue all pending IPIs now that the pset lock has been dropped */
7837 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
7838 processor_t processor = processor_array[cpuid];
7839 machine_signal_idle(processor);
7840 }
7841
7842 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
7843 needs_exit_idle_mask, 0, 0, 0);
7844 }
7845
7846 static void
sched_update_powered_cores(uint64_t requested_powered_cores,processor_reason_t reason,uint32_t flags)7847 sched_update_powered_cores(uint64_t requested_powered_cores, processor_reason_t reason, uint32_t flags)
7848 {
7849 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_START,
7850 requested_powered_cores, reason, flags, 0);
7851
7852 assert((flags & (LOCK_STATE | UNLOCK_STATE)) ? (reason == REASON_SYSTEM) && (requested_powered_cores == ALL_CORES_POWERED) : 1);
7853
7854 /*
7855 * Loop through newly set requested_powered_cores and start them.
7856 * Loop through newly cleared requested_powered_cores and shut them down.
7857 */
7858
7859 if ((reason == REASON_CLPC_SYSTEM) || (reason == REASON_CLPC_USER)) {
7860 flags |= SHUTDOWN_TEMPORARY;
7861 }
7862
7863 /* First set powered cores */
7864 for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
7865 for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
7866 processor_set_t pset = pset_array[pset_id];
7867
7868 spl_t s = splsched();
7869 pset_lock(pset);
7870 cpumap_t pset_requested_powered_cores = requested_powered_cores & pset->cpu_bitmask;
7871 cpumap_t powered_cores = (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING] | pset->cpu_state_map[PROCESSOR_RUNNING]);
7872 cpumap_t requested_changes = pset_requested_powered_cores ^ powered_cores;
7873 pset_unlock(pset);
7874 splx(s);
7875
7876 cpumap_t newly_powered = requested_changes & requested_powered_cores;
7877
7878 cpumap_t cpu_map = newly_powered;
7879
7880 if (flags & (LOCK_STATE | UNLOCK_STATE)) {
7881 /*
7882 * We need to change the lock state even if
7883 * we don't need to change the actual state.
7884 */
7885 cpu_map = pset_requested_powered_cores;
7886 /* But not the master_processor, which is always implicitly locked */
7887 bit_clear(cpu_map, master_processor->cpu_id);
7888 }
7889
7890 if (cpu_map == 0) {
7891 /* Nothing to do */
7892 continue;
7893 }
7894
7895 int last_start_cpu_id = bit_first(cpu_map);
7896
7897 for (int cpu_id = lsb_first(cpu_map); cpu_id >= 0; cpu_id = lsb_next(cpu_map, cpu_id)) {
7898 processor_t processor = processor_array[cpu_id];
7899
7900 if ((flags & WAIT_FOR_LAST_START) && (cpu_id == last_start_cpu_id)) {
7901 processor_start_reason(processor, reason, flags | WAIT_FOR_START);
7902 } else {
7903 processor_start_reason(processor, reason, flags);
7904 }
7905 }
7906 }
7907 }
7908
7909 /* Now shutdown not powered cores */
7910 for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
7911 for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
7912 processor_set_t pset = pset_array[pset_id];
7913
7914 spl_t s = splsched();
7915 pset_lock(pset);
7916 cpumap_t powered_cores = (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING] | pset->cpu_state_map[PROCESSOR_RUNNING]);
7917 cpumap_t requested_changes = (requested_powered_cores & pset->cpu_bitmask) ^ powered_cores;
7918 pset_unlock(pset);
7919 splx(s);
7920
7921 cpumap_t newly_unpowered = requested_changes & ~requested_powered_cores;
7922
7923 if (newly_unpowered == 0) {
7924 /* Nothing to do */
7925 continue;
7926 }
7927
7928 for (int cpu_id = lsb_first(newly_unpowered); cpu_id >= 0; cpu_id = lsb_next(newly_unpowered, cpu_id)) {
7929 processor_t processor = processor_array[cpu_id];
7930
7931 processor_exit_reason(processor, reason, flags);
7932 }
7933 }
7934 }
7935
7936 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_END, 0, 0, 0, 0);
7937 }
7938
7939 void
thread_set_options(uint32_t thopt)7940 thread_set_options(uint32_t thopt)
7941 {
7942 spl_t x;
7943 thread_t t = current_thread();
7944
7945 x = splsched();
7946 thread_lock(t);
7947
7948 t->options |= thopt;
7949
7950 thread_unlock(t);
7951 splx(x);
7952 }
7953
7954 void
thread_set_pending_block_hint(thread_t thread,block_hint_t block_hint)7955 thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
7956 {
7957 thread->pending_block_hint = block_hint;
7958 }
7959
7960 uint32_t
qos_max_parallelism(int qos,uint64_t options)7961 qos_max_parallelism(int qos, uint64_t options)
7962 {
7963 return SCHED(qos_max_parallelism)(qos, options);
7964 }
7965
7966 uint32_t
sched_qos_max_parallelism(__unused int qos,uint64_t options)7967 sched_qos_max_parallelism(__unused int qos, uint64_t options)
7968 {
7969 host_basic_info_data_t hinfo;
7970 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
7971
7972
7973 /*
7974 * The QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE should be used on AMP platforms only which
7975 * implement their own qos_max_parallelism() interfaces.
7976 */
7977 assert((options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) == 0);
7978
7979 /* Query the machine layer for core information */
7980 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
7981 (host_info_t)&hinfo, &count);
7982 assert(kret == KERN_SUCCESS);
7983
7984 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
7985 return hinfo.logical_cpu;
7986 } else {
7987 return hinfo.physical_cpu;
7988 }
7989 }
7990
7991 int sched_allow_NO_SMT_threads = 1;
7992 bool
thread_no_smt(thread_t thread)7993 thread_no_smt(thread_t thread)
7994 {
7995 return sched_allow_NO_SMT_threads &&
7996 (thread->bound_processor == PROCESSOR_NULL) &&
7997 ((thread->sched_flags & TH_SFLAG_NO_SMT) || (get_threadtask(thread)->t_flags & TF_NO_SMT));
7998 }
7999
8000 bool
processor_active_thread_no_smt(processor_t processor)8001 processor_active_thread_no_smt(processor_t processor)
8002 {
8003 return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
8004 }
8005
8006 #if __arm64__
8007
8008 /*
8009 * Set up or replace old timer with new timer
8010 *
8011 * Returns true if canceled old timer, false if it did not
8012 */
8013 boolean_t
sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)8014 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
8015 {
8016 /*
8017 * Exchange deadline for new deadline, if old deadline was nonzero,
8018 * then I cancelled the callback, otherwise I didn't
8019 */
8020
8021 return os_atomic_xchg(&sched_perfcontrol_callback_deadline, new_deadline,
8022 relaxed) != 0;
8023 }
8024
8025 /*
8026 * Set global SFI window (in usec)
8027 */
8028 kern_return_t
sched_perfcontrol_sfi_set_window(uint64_t window_usecs)8029 sched_perfcontrol_sfi_set_window(uint64_t window_usecs)
8030 {
8031 kern_return_t ret = KERN_NOT_SUPPORTED;
8032 #if CONFIG_THREAD_GROUPS
8033 if (window_usecs == 0ULL) {
8034 ret = sfi_window_cancel();
8035 } else {
8036 ret = sfi_set_window(window_usecs);
8037 }
8038 #endif // CONFIG_THREAD_GROUPS
8039 return ret;
8040 }
8041
8042 /*
8043 * Set background and maintenance SFI class offtimes
8044 */
8045 kern_return_t
sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)8046 sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)
8047 {
8048 kern_return_t ret = KERN_NOT_SUPPORTED;
8049 #if CONFIG_THREAD_GROUPS
8050 if (offtime_usecs == 0ULL) {
8051 ret = sfi_class_offtime_cancel(SFI_CLASS_MAINTENANCE);
8052 ret |= sfi_class_offtime_cancel(SFI_CLASS_DARWIN_BG);
8053 } else {
8054 ret = sfi_set_class_offtime(SFI_CLASS_MAINTENANCE, offtime_usecs);
8055 ret |= sfi_set_class_offtime(SFI_CLASS_DARWIN_BG, offtime_usecs);
8056 }
8057 #endif // CONFIG_THREAD_GROUPS
8058 return ret;
8059 }
8060
8061 /*
8062 * Set utility SFI class offtime
8063 */
8064 kern_return_t
sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)8065 sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)
8066 {
8067 kern_return_t ret = KERN_NOT_SUPPORTED;
8068 #if CONFIG_THREAD_GROUPS
8069 if (offtime_usecs == 0ULL) {
8070 ret = sfi_class_offtime_cancel(SFI_CLASS_UTILITY);
8071 } else {
8072 ret = sfi_set_class_offtime(SFI_CLASS_UTILITY, offtime_usecs);
8073 }
8074 #endif // CONFIG_THREAD_GROUPS
8075 return ret;
8076 }
8077
8078 #endif /* __arm64__ */
8079
8080 #if CONFIG_SCHED_EDGE
8081
8082 #define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u
8083
8084 /*
8085 * sched_edge_pset_running_higher_bucket()
8086 *
8087 * Routine to calculate cumulative running counts for each scheduling
8088 * bucket. This effectively lets the load calculation calculate if a
8089 * cluster is running any threads at a QoS lower than the thread being
8090 * migrated etc.
8091 */
8092
8093 static void
sched_edge_pset_running_higher_bucket(processor_set_t pset,uint32_t * running_higher)8094 sched_edge_pset_running_higher_bucket(processor_set_t pset, uint32_t *running_higher)
8095 {
8096 bitmap_t *active_map = &pset->cpu_state_map[PROCESSOR_RUNNING];
8097
8098 /* Edge Scheduler Optimization */
8099 for (int cpu = bitmap_first(active_map, MAX_CPUS); cpu >= 0; cpu = bitmap_next(active_map, cpu)) {
8100 sched_bucket_t cpu_bucket = os_atomic_load(&pset->cpu_running_buckets[cpu], relaxed);
8101 for (sched_bucket_t bucket = cpu_bucket; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
8102 running_higher[bucket]++;
8103 }
8104 }
8105 }
8106
8107 /*
8108 * sched_update_pset_load_average()
8109 *
8110 * Updates the load average for each sched bucket for a cluster.
8111 * This routine must be called with the pset lock held.
8112 */
8113 void
sched_update_pset_load_average(processor_set_t pset,uint64_t curtime)8114 sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
8115 {
8116 int avail_cpu_count = pset_available_cpu_count(pset);
8117 if (avail_cpu_count == 0) {
8118 /* Looks like the pset is not runnable any more; nothing to do here */
8119 return;
8120 }
8121
8122 /*
8123 * Edge Scheduler Optimization
8124 *
8125 * See if more callers of this routine can pass in timestamps to avoid the
8126 * mach_absolute_time() call here.
8127 */
8128
8129 if (!curtime) {
8130 curtime = mach_absolute_time();
8131 }
8132 uint64_t last_update = os_atomic_load(&pset->pset_load_last_update, relaxed);
8133 int64_t delta_ticks = curtime - last_update;
8134 if (delta_ticks < 0) {
8135 return;
8136 }
8137
8138 uint64_t delta_nsecs = 0;
8139 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
8140
8141 if (__improbable(delta_nsecs > UINT32_MAX)) {
8142 delta_nsecs = UINT32_MAX;
8143 }
8144
8145 #if CONFIG_SCHED_EDGE
8146 /* Update the shared resource load on the pset */
8147 for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
8148 uint64_t shared_rsrc_runnable_load = sched_edge_shared_rsrc_runnable_load(&pset->pset_clutch_root, shared_rsrc_type);
8149 uint64_t shared_rsrc_running_load = bit_count(pset->cpu_running_cluster_shared_rsrc_thread[shared_rsrc_type]);
8150 uint64_t new_shared_load = shared_rsrc_runnable_load + shared_rsrc_running_load;
8151 uint64_t old_shared_load = os_atomic_xchg(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], new_shared_load, relaxed);
8152 if (old_shared_load != new_shared_load) {
8153 KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_CLUSTER_SHARED_LOAD) | DBG_FUNC_NONE, pset->pset_cluster_id, shared_rsrc_type, new_shared_load, shared_rsrc_running_load);
8154 }
8155 }
8156 #endif /* CONFIG_SCHED_EDGE */
8157
8158 uint32_t running_higher[TH_BUCKET_SCHED_MAX] = {0};
8159 sched_edge_pset_running_higher_bucket(pset, running_higher);
8160
8161 for (sched_bucket_t sched_bucket = TH_BUCKET_FIXPRI; sched_bucket < TH_BUCKET_SCHED_MAX; sched_bucket++) {
8162 uint64_t old_load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
8163 uint64_t old_load_average_factor = old_load_average * SCHED_PSET_LOAD_EWMA_TC_NSECS;
8164 uint32_t current_runq_depth = (sched_edge_cluster_cumulative_count(&pset->pset_clutch_root, sched_bucket) + rt_runq_count(pset) + running_higher[sched_bucket]) / avail_cpu_count;
8165
8166 /*
8167 * For the new load average multiply current_runq_depth by delta_nsecs (which resuts in a 32.0 value).
8168 * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the
8169 * new load averga needs to be shifted before it can be added to the old load average.
8170 */
8171 uint64_t new_load_average_factor = (current_runq_depth * delta_nsecs) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS;
8172
8173 /*
8174 * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero
8175 * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA
8176 * when the system is already loaded; otherwise for an idle system use the latest load average immediately.
8177 */
8178 int old_load_shifted = (int)((old_load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
8179 boolean_t load_uptick = (old_load_shifted == 0) && (current_runq_depth != 0);
8180 boolean_t load_downtick = (old_load_shifted != 0) && (current_runq_depth == 0);
8181 uint64_t load_average;
8182 if (load_uptick || load_downtick) {
8183 load_average = (current_runq_depth << SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
8184 } else {
8185 /* Indicates a loaded system; use EWMA for load average calculation */
8186 load_average = (old_load_average_factor + new_load_average_factor) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
8187 }
8188 os_atomic_store(&pset->pset_load_average[sched_bucket], load_average, relaxed);
8189 if (load_average != old_load_average) {
8190 KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_LOAD_AVG) | DBG_FUNC_NONE, pset->pset_cluster_id, (load_average >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS), load_average & SCHED_PSET_LOAD_EWMA_FRACTION_MASK, sched_bucket);
8191 }
8192 }
8193 os_atomic_store(&pset->pset_load_last_update, curtime, relaxed);
8194 }
8195
8196 void
sched_update_pset_avg_execution_time(processor_set_t pset,uint64_t execution_time,uint64_t curtime,sched_bucket_t sched_bucket)8197 sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t execution_time, uint64_t curtime, sched_bucket_t sched_bucket)
8198 {
8199 pset_execution_time_t old_execution_time_packed, new_execution_time_packed;
8200 uint64_t avg_thread_execution_time = 0;
8201
8202 os_atomic_rmw_loop(&pset->pset_execution_time[sched_bucket].pset_execution_time_packed,
8203 old_execution_time_packed.pset_execution_time_packed,
8204 new_execution_time_packed.pset_execution_time_packed, relaxed, {
8205 uint64_t last_update = old_execution_time_packed.pset_execution_time_last_update;
8206 int64_t delta_ticks = curtime - last_update;
8207 if (delta_ticks < 0) {
8208 /*
8209 * Its possible that another CPU came in and updated the pset_execution_time
8210 * before this CPU could do it. Since the average execution time is meant to
8211 * be an approximate measure per cluster, ignore the older update.
8212 */
8213 os_atomic_rmw_loop_give_up(return );
8214 }
8215 uint64_t delta_nsecs = 0;
8216 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
8217
8218 uint64_t nanotime = 0;
8219 absolutetime_to_nanoseconds(execution_time, &nanotime);
8220 uint64_t execution_time_us = nanotime / NSEC_PER_USEC;
8221
8222 uint64_t old_execution_time = (old_execution_time_packed.pset_avg_thread_execution_time * SCHED_PSET_LOAD_EWMA_TC_NSECS);
8223 uint64_t new_execution_time = (execution_time_us * delta_nsecs);
8224
8225 avg_thread_execution_time = (old_execution_time + new_execution_time) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
8226 new_execution_time_packed.pset_avg_thread_execution_time = avg_thread_execution_time;
8227 new_execution_time_packed.pset_execution_time_last_update = curtime;
8228 });
8229 if (new_execution_time_packed.pset_avg_thread_execution_time != old_execution_time_packed.pset_execution_time_packed) {
8230 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_AVG_EXEC_TIME) | DBG_FUNC_NONE, pset->pset_cluster_id, avg_thread_execution_time, sched_bucket);
8231 }
8232 }
8233
8234 uint64_t
sched_pset_cluster_shared_rsrc_load(processor_set_t pset,cluster_shared_rsrc_type_t shared_rsrc_type)8235 sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type)
8236 {
8237 return os_atomic_load(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], relaxed);
8238 }
8239
8240 #else /* CONFIG_SCHED_EDGE */
8241
8242 void
sched_update_pset_load_average(processor_set_t pset,__unused uint64_t curtime)8243 sched_update_pset_load_average(processor_set_t pset, __unused uint64_t curtime)
8244 {
8245 int non_rt_load = pset->pset_runq.count;
8246 int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
8247 int new_load_average = ((int)pset->load_average + load) >> 1;
8248
8249 pset->load_average = new_load_average;
8250 #if (DEVELOPMENT || DEBUG)
8251 #if __AMP__
8252 if (pset->pset_cluster_type == PSET_AMP_P) {
8253 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset, 0), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)));
8254 }
8255 #endif
8256 #endif
8257 }
8258
8259 void
sched_update_pset_avg_execution_time(__unused processor_set_t pset,__unused uint64_t execution_time,__unused uint64_t curtime,__unused sched_bucket_t sched_bucket)8260 sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket)
8261 {
8262 }
8263
8264 #endif /* CONFIG_SCHED_EDGE */
8265
8266 /* pset is locked */
8267 static bool
processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset,processor_t processor)8268 processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor)
8269 {
8270 int cpuid = processor->cpu_id;
8271 #if defined(__x86_64__)
8272 if (sched_avoid_cpu0 && (cpuid == 0)) {
8273 return false;
8274 }
8275 #endif
8276
8277 cpumap_t fasttrack_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
8278
8279 return bit_test(fasttrack_map, cpuid);
8280 }
8281
8282 /* pset is locked */
8283 static processor_t
choose_processor_for_realtime_thread(processor_set_t pset,processor_t skip_processor,bool consider_secondaries,bool skip_spills)8284 choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries, bool skip_spills)
8285 {
8286 #if defined(__x86_64__)
8287 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
8288 #else
8289 const bool avoid_cpu0 = false;
8290 #endif
8291 cpumap_t cpu_map;
8292
8293 try_again:
8294 cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
8295 if (skip_processor) {
8296 bit_clear(cpu_map, skip_processor->cpu_id);
8297 }
8298 if (skip_spills) {
8299 cpu_map &= ~pset->rt_pending_spill_cpu_mask;
8300 }
8301
8302 if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
8303 bit_clear(cpu_map, 0);
8304 }
8305
8306 cpumap_t primary_map = cpu_map & pset->primary_map;
8307 if (avoid_cpu0) {
8308 primary_map = bit_ror64(primary_map, 1);
8309 }
8310
8311 int rotid = lsb_first(primary_map);
8312 if (rotid >= 0) {
8313 int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
8314
8315 processor_t processor = processor_array[cpuid];
8316
8317 return processor;
8318 }
8319
8320 if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
8321 goto out;
8322 }
8323
8324 if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
8325 /* Also avoid cpu1 */
8326 bit_clear(cpu_map, 1);
8327 }
8328
8329 /* Consider secondary processors whose primary is actually running a realtime thread */
8330 cpumap_t secondary_map = cpu_map & ~pset->primary_map & (pset->realtime_map << 1);
8331 if (avoid_cpu0) {
8332 /* Also avoid cpu1 */
8333 secondary_map = bit_ror64(secondary_map, 2);
8334 }
8335 rotid = lsb_first(secondary_map);
8336 if (rotid >= 0) {
8337 int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid;
8338
8339 processor_t processor = processor_array[cpuid];
8340
8341 return processor;
8342 }
8343
8344 /* Consider secondary processors */
8345 secondary_map = cpu_map & ~pset->primary_map;
8346 if (avoid_cpu0) {
8347 /* Also avoid cpu1 */
8348 secondary_map = bit_ror64(secondary_map, 2);
8349 }
8350 rotid = lsb_first(secondary_map);
8351 if (rotid >= 0) {
8352 int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid;
8353
8354 processor_t processor = processor_array[cpuid];
8355
8356 return processor;
8357 }
8358
8359 /*
8360 * I was hoping the compiler would optimize
8361 * this away when avoid_cpu0 is const bool false
8362 * but it still complains about the assignmnent
8363 * in that case.
8364 */
8365 if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
8366 #if defined(__x86_64__)
8367 avoid_cpu0 = false;
8368 #else
8369 assert(0);
8370 #endif
8371 goto try_again;
8372 }
8373
8374 out:
8375 if (skip_processor) {
8376 return PROCESSOR_NULL;
8377 }
8378
8379 /*
8380 * If we didn't find an obvious processor to choose, but there are still more CPUs
8381 * not already running realtime threads than realtime threads in the realtime run queue,
8382 * this thread belongs in this pset, so choose some other processor in this pset
8383 * to ensure the thread is enqueued here.
8384 */
8385 cpumap_t non_realtime_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
8386 if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
8387 cpu_map = non_realtime_map;
8388 assert(cpu_map != 0);
8389 int cpuid = bit_first(cpu_map);
8390 assert(cpuid >= 0);
8391 return processor_array[cpuid];
8392 }
8393
8394 if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
8395 goto skip_secondaries;
8396 }
8397
8398 non_realtime_map = pset_available_cpumap(pset) & ~pset->realtime_map;
8399 if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
8400 cpu_map = non_realtime_map;
8401 assert(cpu_map != 0);
8402 int cpuid = bit_first(cpu_map);
8403 assert(cpuid >= 0);
8404 return processor_array[cpuid];
8405 }
8406
8407 skip_secondaries:
8408 return PROCESSOR_NULL;
8409 }
8410
8411 /*
8412 * Choose the processor with (1) the lowest priority less than max_pri and (2) the furthest deadline for that priority.
8413 * If all available processors are at max_pri, choose the furthest deadline that is greater than minimum_deadline.
8414 *
8415 * pset is locked.
8416 */
8417 static processor_t
choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset,int max_pri,uint64_t minimum_deadline,processor_t skip_processor,bool skip_spills,bool include_ast_urgent_pending_cpus)8418 choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool skip_spills, bool include_ast_urgent_pending_cpus)
8419 {
8420 uint64_t furthest_deadline = deadline_add(minimum_deadline, rt_deadline_epsilon);
8421 processor_t fd_processor = PROCESSOR_NULL;
8422 int lowest_priority = max_pri;
8423
8424 cpumap_t cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask;
8425 if (skip_processor) {
8426 bit_clear(cpu_map, skip_processor->cpu_id);
8427 }
8428 if (skip_spills) {
8429 cpu_map &= ~pset->rt_pending_spill_cpu_mask;
8430 }
8431
8432 for (int cpuid = bit_first(cpu_map); cpuid >= 0; cpuid = bit_next(cpu_map, cpuid)) {
8433 processor_t processor = processor_array[cpuid];
8434
8435 if (processor->current_pri > lowest_priority) {
8436 continue;
8437 }
8438
8439 if (processor->current_pri < lowest_priority) {
8440 lowest_priority = processor->current_pri;
8441 furthest_deadline = processor->deadline;
8442 fd_processor = processor;
8443 continue;
8444 }
8445
8446 if (processor->deadline > furthest_deadline) {
8447 furthest_deadline = processor->deadline;
8448 fd_processor = processor;
8449 }
8450 }
8451
8452 if (fd_processor) {
8453 return fd_processor;
8454 }
8455
8456 /*
8457 * There is a race condition possible when there are multiple processor sets.
8458 * choose_processor() takes pset lock A, sees the pending_AST_URGENT_cpu_mask set for a processor in that set and finds no suitable candiate CPU,
8459 * so it drops pset lock A and tries to take pset lock B. Meanwhile the pending_AST_URGENT_cpu_mask CPU is looking for a thread to run and holds
8460 * pset lock B. It doesn't find any threads (because the candidate thread isn't yet on any run queue), so drops lock B, takes lock A again to clear
8461 * the pending_AST_URGENT_cpu_mask bit, and keeps running the current (far deadline) thread. choose_processor() now has lock B and can only find
8462 * the lowest count processor in set B so enqueues it on set B's run queue but doesn't IPI anyone. (The lowest count includes all threads,
8463 * near and far deadlines, so will prefer a low count of earlier deadlines to a high count of far deadlines, which is suboptimal for EDF scheduling.
8464 * To make a better choice we would need to know how many threads with earlier deadlines than the candidate thread exist on each pset's run queue.
8465 * But even if we chose the better run queue, we still wouldn't send an IPI in this case.)
8466 *
8467 * The migitation is to also look for suitable CPUs that have their pending_AST_URGENT_cpu_mask bit set where there are no earlier deadline threads
8468 * on the run queue of that pset.
8469 */
8470 if (include_ast_urgent_pending_cpus && (rt_runq_earliest_deadline(pset) > furthest_deadline)) {
8471 cpu_map = pset_available_cpumap(pset) & pset->pending_AST_URGENT_cpu_mask;
8472 assert(skip_processor == PROCESSOR_NULL);
8473 assert(skip_spills == false);
8474
8475 for (int cpuid = bit_first(cpu_map); cpuid >= 0; cpuid = bit_next(cpu_map, cpuid)) {
8476 processor_t processor = processor_array[cpuid];
8477
8478 if (processor->current_pri > lowest_priority) {
8479 continue;
8480 }
8481
8482 if (processor->current_pri < lowest_priority) {
8483 lowest_priority = processor->current_pri;
8484 furthest_deadline = processor->deadline;
8485 fd_processor = processor;
8486 continue;
8487 }
8488
8489 if (processor->deadline > furthest_deadline) {
8490 furthest_deadline = processor->deadline;
8491 fd_processor = processor;
8492 }
8493 }
8494 }
8495
8496 return fd_processor;
8497 }
8498
8499 /* pset is locked */
8500 static processor_t
choose_next_processor_for_realtime_thread(processor_set_t pset,int max_pri,uint64_t minimum_deadline,processor_t skip_processor,bool consider_secondaries)8501 choose_next_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool consider_secondaries)
8502 {
8503 bool skip_spills = true;
8504 bool include_ast_urgent_pending_cpus = false;
8505
8506 processor_t next_processor = choose_processor_for_realtime_thread(pset, skip_processor, consider_secondaries, skip_spills);
8507 if (next_processor != PROCESSOR_NULL) {
8508 return next_processor;
8509 }
8510
8511 next_processor = choose_furthest_deadline_processor_for_realtime_thread(pset, max_pri, minimum_deadline, skip_processor, skip_spills, include_ast_urgent_pending_cpus);
8512 return next_processor;
8513 }
8514
8515 #if defined(__x86_64__)
8516 /* pset is locked */
8517 static bool
all_available_primaries_are_running_realtime_threads(processor_set_t pset,bool include_backups)8518 all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups)
8519 {
8520 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
8521 int nbackup_cpus = 0;
8522
8523 if (include_backups && rt_runq_is_low_latency(pset)) {
8524 nbackup_cpus = sched_rt_n_backup_processors;
8525 }
8526
8527 cpumap_t cpu_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
8528 if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
8529 bit_clear(cpu_map, 0);
8530 }
8531 return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
8532 }
8533
8534 /* pset is locked */
8535 static bool
these_processors_are_running_realtime_threads(processor_set_t pset,uint64_t these_map,bool include_backups)8536 these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups)
8537 {
8538 int nbackup_cpus = 0;
8539
8540 if (include_backups && rt_runq_is_low_latency(pset)) {
8541 nbackup_cpus = sched_rt_n_backup_processors;
8542 }
8543
8544 cpumap_t cpu_map = pset_available_cpumap(pset) & these_map & ~pset->realtime_map;
8545 return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
8546 }
8547 #endif
8548
8549 static bool
sched_ok_to_run_realtime_thread(processor_set_t pset,processor_t processor,bool as_backup)8550 sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup)
8551 {
8552 if (!processor->is_recommended) {
8553 return false;
8554 }
8555 bool ok_to_run_realtime_thread = true;
8556 #if defined(__x86_64__)
8557 bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
8558 if (spill_pending) {
8559 return true;
8560 }
8561 if (processor->cpu_id == 0) {
8562 if (sched_avoid_cpu0 == 1) {
8563 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1, as_backup);
8564 } else if (sched_avoid_cpu0 == 2) {
8565 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, ~0x3, as_backup);
8566 }
8567 } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
8568 ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2, as_backup);
8569 } else if (processor->processor_primary != processor) {
8570 ok_to_run_realtime_thread = (sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset, as_backup));
8571 }
8572 #else
8573 (void)pset;
8574 (void)processor;
8575 (void)as_backup;
8576 #endif
8577 return ok_to_run_realtime_thread;
8578 }
8579
8580 void
sched_pset_made_schedulable(__unused processor_t processor,processor_set_t pset,boolean_t drop_lock)8581 sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset, boolean_t drop_lock)
8582 {
8583 if (drop_lock) {
8584 pset_unlock(pset);
8585 }
8586 }
8587
8588 void
thread_set_no_smt(bool set)8589 thread_set_no_smt(bool set)
8590 {
8591 if (!system_is_SMT) {
8592 /* Not a machine that supports SMT */
8593 return;
8594 }
8595
8596 thread_t thread = current_thread();
8597
8598 spl_t s = splsched();
8599 thread_lock(thread);
8600 if (set) {
8601 thread->sched_flags |= TH_SFLAG_NO_SMT;
8602 }
8603 thread_unlock(thread);
8604 splx(s);
8605 }
8606
8607 bool
thread_get_no_smt(void)8608 thread_get_no_smt(void)
8609 {
8610 return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
8611 }
8612
8613 extern void task_set_no_smt(task_t);
8614 void
task_set_no_smt(task_t task)8615 task_set_no_smt(task_t task)
8616 {
8617 if (!system_is_SMT) {
8618 /* Not a machine that supports SMT */
8619 return;
8620 }
8621
8622 if (task == TASK_NULL) {
8623 task = current_task();
8624 }
8625
8626 task_lock(task);
8627 task->t_flags |= TF_NO_SMT;
8628 task_unlock(task);
8629 }
8630
8631 #if DEBUG || DEVELOPMENT
8632 extern void sysctl_task_set_no_smt(char no_smt);
8633 void
sysctl_task_set_no_smt(char no_smt)8634 sysctl_task_set_no_smt(char no_smt)
8635 {
8636 if (!system_is_SMT) {
8637 /* Not a machine that supports SMT */
8638 return;
8639 }
8640
8641 task_t task = current_task();
8642
8643 task_lock(task);
8644 if (no_smt == '1') {
8645 task->t_flags |= TF_NO_SMT;
8646 }
8647 task_unlock(task);
8648 }
8649
8650 extern char sysctl_task_get_no_smt(void);
8651 char
sysctl_task_get_no_smt(void)8652 sysctl_task_get_no_smt(void)
8653 {
8654 task_t task = current_task();
8655
8656 if (task->t_flags & TF_NO_SMT) {
8657 return '1';
8658 }
8659 return '0';
8660 }
8661 #endif /* DEVELOPMENT || DEBUG */
8662
8663
8664 __private_extern__ void
thread_bind_cluster_type(thread_t thread,char cluster_type,bool soft_bound)8665 thread_bind_cluster_type(thread_t thread, char cluster_type, bool soft_bound)
8666 {
8667 #if __AMP__
8668 spl_t s = splsched();
8669 thread_lock(thread);
8670 thread->sched_flags &= ~(TH_SFLAG_BOUND_SOFT);
8671 thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
8672 if (soft_bound) {
8673 thread->sched_flags |= TH_SFLAG_BOUND_SOFT;
8674 }
8675 switch (cluster_type) {
8676 case 'e':
8677 case 'E':
8678 if (pset0.pset_cluster_type == PSET_AMP_E) {
8679 thread->th_bound_cluster_id = pset0.pset_id;
8680 } else if (pset_node1.psets != PROCESSOR_SET_NULL) {
8681 thread->th_bound_cluster_id = pset_node1.psets->pset_id;
8682 }
8683 break;
8684 case 'p':
8685 case 'P':
8686 if (pset0.pset_cluster_type == PSET_AMP_P) {
8687 thread->th_bound_cluster_id = pset0.pset_id;
8688 } else if (pset_node1.psets != PROCESSOR_SET_NULL) {
8689 thread->th_bound_cluster_id = pset_node1.psets->pset_id;
8690 }
8691 break;
8692 default:
8693 break;
8694 }
8695 thread_unlock(thread);
8696 splx(s);
8697
8698 if (thread == current_thread()) {
8699 thread_block(THREAD_CONTINUE_NULL);
8700 }
8701 #else /* __AMP__ */
8702 (void)thread;
8703 (void)cluster_type;
8704 (void)soft_bound;
8705 #endif /* __AMP__ */
8706 }
8707
8708 extern uint32_t thread_bound_cluster_id(thread_t thread);
8709 uint32_t
thread_bound_cluster_id(thread_t thread)8710 thread_bound_cluster_id(thread_t thread)
8711 {
8712 return thread->th_bound_cluster_id;
8713 }
8714
8715 __private_extern__ kern_return_t
thread_bind_cluster_id(thread_t thread,uint32_t cluster_id,thread_bind_option_t options)8716 thread_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options)
8717 {
8718 #if __AMP__
8719
8720 processor_set_t pset = NULL;
8721 if (options & (THREAD_BIND_SOFT | THREAD_BIND_ELIGIBLE_ONLY)) {
8722 /* Validate the inputs for the bind case */
8723 int max_clusters = ml_get_cluster_count();
8724 if (cluster_id >= max_clusters) {
8725 /* Invalid cluster id */
8726 return KERN_INVALID_ARGUMENT;
8727 }
8728 pset = pset_array[cluster_id];
8729 if (pset == NULL) {
8730 /* Cluster has not been initialized yet */
8731 return KERN_INVALID_ARGUMENT;
8732 }
8733 if (options & THREAD_BIND_ELIGIBLE_ONLY) {
8734 if (SCHED(thread_eligible_for_pset(thread, pset)) == false) {
8735 /* Thread is not recommended for the cluster type */
8736 return KERN_INVALID_POLICY;
8737 }
8738 }
8739 }
8740
8741 if (options & THREAD_UNBIND) {
8742 /* If the thread was actually not bound to some cluster, nothing to do here */
8743 if (thread_bound_cluster_id(thread) == THREAD_BOUND_CLUSTER_NONE) {
8744 return KERN_SUCCESS;
8745 }
8746 }
8747
8748 spl_t s = splsched();
8749 thread_lock(thread);
8750
8751 /* Unbind the thread from its previous bound state */
8752 thread->sched_flags &= ~(TH_SFLAG_BOUND_SOFT);
8753 thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
8754
8755 if (options & THREAD_UNBIND) {
8756 /* Nothing more to do here */
8757 goto thread_bind_cluster_complete;
8758 }
8759
8760 if (options & THREAD_BIND_SOFT) {
8761 thread->sched_flags |= TH_SFLAG_BOUND_SOFT;
8762 }
8763 thread->th_bound_cluster_id = cluster_id;
8764
8765 thread_bind_cluster_complete:
8766 thread_unlock(thread);
8767 splx(s);
8768
8769 if (thread == current_thread()) {
8770 thread_block(THREAD_CONTINUE_NULL);
8771 }
8772 #else /* __AMP__ */
8773 (void)thread;
8774 (void)cluster_id;
8775 (void)options;
8776 #endif /* __AMP__ */
8777 return KERN_SUCCESS;
8778 }
8779
8780 #if DEVELOPMENT || DEBUG
8781 extern int32_t sysctl_get_bound_cpuid(void);
8782 int32_t
sysctl_get_bound_cpuid(void)8783 sysctl_get_bound_cpuid(void)
8784 {
8785 int32_t cpuid = -1;
8786 thread_t self = current_thread();
8787
8788 processor_t processor = self->bound_processor;
8789 if (processor == NULL) {
8790 cpuid = -1;
8791 } else {
8792 cpuid = processor->cpu_id;
8793 }
8794
8795 return cpuid;
8796 }
8797
8798 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
8799 kern_return_t
sysctl_thread_bind_cpuid(int32_t cpuid)8800 sysctl_thread_bind_cpuid(int32_t cpuid)
8801 {
8802 processor_t processor = PROCESSOR_NULL;
8803
8804 if (cpuid == -1) {
8805 goto unbind;
8806 }
8807
8808 if (cpuid < 0 || cpuid >= MAX_SCHED_CPUS) {
8809 return KERN_INVALID_VALUE;
8810 }
8811
8812 processor = processor_array[cpuid];
8813 if (processor == PROCESSOR_NULL) {
8814 return KERN_INVALID_VALUE;
8815 }
8816
8817 #if __AMP__
8818
8819 thread_t thread = current_thread();
8820
8821 if (thread->th_bound_cluster_id != THREAD_BOUND_CLUSTER_NONE) {
8822 if ((thread->sched_flags & TH_SFLAG_BOUND_SOFT) == 0) {
8823 /* Cannot hard-bind an already hard-cluster-bound thread */
8824 return KERN_NOT_SUPPORTED;
8825 }
8826 }
8827
8828 #endif /* __AMP__ */
8829
8830 unbind:
8831 thread_bind(processor);
8832
8833 thread_block(THREAD_CONTINUE_NULL);
8834 return KERN_SUCCESS;
8835 }
8836
8837 extern char sysctl_get_task_cluster_type(void);
8838 char
sysctl_get_task_cluster_type(void)8839 sysctl_get_task_cluster_type(void)
8840 {
8841 task_t task = current_task();
8842 processor_set_t pset_hint = task->pset_hint;
8843
8844 if (!pset_hint) {
8845 return '0';
8846 }
8847
8848 #if __AMP__
8849 if (pset_hint->pset_cluster_type == PSET_AMP_E) {
8850 return 'E';
8851 } else if (pset_hint->pset_cluster_type == PSET_AMP_P) {
8852 return 'P';
8853 }
8854 #endif
8855
8856 return '0';
8857 }
8858
8859 #if __AMP__
8860 static processor_set_t
find_pset_of_type(pset_cluster_type_t t)8861 find_pset_of_type(pset_cluster_type_t t)
8862 {
8863 for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
8864 if (node->pset_cluster_type != t) {
8865 continue;
8866 }
8867
8868 processor_set_t pset = PROCESSOR_SET_NULL;
8869 for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
8870 pset = pset_array[pset_id];
8871 /* Prefer one with recommended processsors */
8872 if (pset->recommended_bitmask != 0) {
8873 assert(pset->pset_cluster_type == t);
8874 return pset;
8875 }
8876 }
8877 /* Otherwise return whatever was found last */
8878 return pset;
8879 }
8880
8881 return PROCESSOR_SET_NULL;
8882 }
8883 #endif
8884
8885 extern void sysctl_task_set_cluster_type(char cluster_type);
8886 void
sysctl_task_set_cluster_type(char cluster_type)8887 sysctl_task_set_cluster_type(char cluster_type)
8888 {
8889 task_t task = current_task();
8890 processor_set_t pset_hint = PROCESSOR_SET_NULL;
8891
8892 #if __AMP__
8893 switch (cluster_type) {
8894 case 'e':
8895 case 'E':
8896 pset_hint = find_pset_of_type(PSET_AMP_E);
8897 break;
8898 case 'p':
8899 case 'P':
8900 pset_hint = find_pset_of_type(PSET_AMP_P);
8901 break;
8902 default:
8903 break;
8904 }
8905
8906 if (pset_hint) {
8907 task_lock(task);
8908 task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
8909 task->pset_hint = pset_hint;
8910 task_unlock(task);
8911
8912 thread_block(THREAD_CONTINUE_NULL);
8913 }
8914 #else
8915 (void)cluster_type;
8916 (void)task;
8917 (void)pset_hint;
8918 #endif
8919 }
8920
8921 /*
8922 * The quantum length used for Fixed and RT sched modes. In general the quantum
8923 * can vary - for example for background or QOS.
8924 */
8925 extern uint64_t sysctl_get_quantum_us(void);
8926 uint64_t
sysctl_get_quantum_us(void)8927 sysctl_get_quantum_us(void)
8928 {
8929 uint32_t quantum;
8930 uint64_t quantum_ns;
8931
8932 quantum = SCHED(initial_quantum_size)(THREAD_NULL);
8933 absolutetime_to_nanoseconds(quantum, &quantum_ns);
8934
8935 return quantum_ns / 1000;
8936 }
8937
8938 #endif /* DEVELOPMENT || DEBUG */
8939