1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/limits.h>
79 #include <machine/atomic.h>
80
81 #include <machine/commpage.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/cpu_number.h>
87 #include <kern/cpu_data.h>
88 #include <kern/smp.h>
89 #include <kern/smr.h>
90 #include <kern/debug.h>
91 #include <kern/macro_help.h>
92 #include <kern/machine.h>
93 #include <kern/misc_protos.h>
94 #include <kern/monotonic.h>
95 #include <kern/processor.h>
96 #include <kern/queue.h>
97 #include <kern/recount.h>
98 #include <kern/restartable.h>
99 #include <kern/sched.h>
100 #include <kern/sched_prim.h>
101 #include <kern/sched_rt.h>
102 #include <kern/sfi.h>
103 #include <kern/syscall_subr.h>
104 #include <kern/task.h>
105 #include <kern/thread.h>
106 #include <kern/thread_group.h>
107 #include <kern/ledger.h>
108 #include <kern/timer_queue.h>
109 #include <kern/waitq.h>
110 #include <kern/policy_internal.h>
111
112 #include <vm/pmap.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_pageout_xnu.h>
116
117 #include <mach/sdt.h>
118 #include <mach/mach_host.h>
119 #include <mach/host_info.h>
120
121 #include <sys/kdebug.h>
122 #include <kperf/kperf.h>
123 #include <kern/kpc.h>
124 #include <san/kasan.h>
125 #include <kern/pms.h>
126 #include <kern/host.h>
127 #include <stdatomic.h>
128 #include <os/atomic_private.h>
129 #include <os/log.h>
130
131 #ifdef KDBG_MACOS_RELEASE
132 #define KTRC KDBG_MACOS_RELEASE
133 #else
134 #define KTRC KDBG_RELEASE
135 #endif
136
137
138 struct sched_statistics PERCPU_DATA(sched_stats);
139 bool sched_stats_active;
140
141 TUNABLE(bool, cpulimit_affects_quantum, "cpulimit_affects_quantum", true);
142
143 TUNABLE(uint32_t, nonurgent_preemption_timer_us, "nonurgent_preemption_timer", 50); /* microseconds */
144 static uint64_t nonurgent_preemption_timer_abs = 0;
145
146 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
147 TUNABLE(int, default_preemption_rate, "preempt", DEFAULT_PREEMPTION_RATE);
148
149 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
150 TUNABLE(int, default_bg_preemption_rate, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE);
151
152 #if XNU_TARGET_OS_XR
153 #define MAX_UNSAFE_RT_QUANTA 1
154 #define SAFE_RT_MULTIPLIER 5
155 #else
156 #define MAX_UNSAFE_RT_QUANTA 100
157 #define SAFE_RT_MULTIPLIER 2
158 #endif /* XNU_TARGET_OS_XR */
159
160 #define MAX_UNSAFE_FIXED_QUANTA 100
161 #define SAFE_FIXED_MULTIPLIER SAFE_RT_MULTIPLIER
162
163 TUNABLE_DEV_WRITEABLE(int, max_unsafe_rt_quanta, "max_unsafe_rt_quanta", MAX_UNSAFE_RT_QUANTA);
164 TUNABLE_DEV_WRITEABLE(int, max_unsafe_fixed_quanta, "max_unsafe_fixed_quanta", MAX_UNSAFE_FIXED_QUANTA);
165
166 TUNABLE_DEV_WRITEABLE(int, safe_rt_multiplier, "safe_rt_multiplier", SAFE_RT_MULTIPLIER);
167 TUNABLE_DEV_WRITEABLE(int, safe_fixed_multiplier, "safe_fixed_multiplier", SAFE_FIXED_MULTIPLIER);
168
169 #define MAX_POLL_QUANTA 2
170 TUNABLE(int, max_poll_quanta, "poll", MAX_POLL_QUANTA);
171
172 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
173 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
174
175 uint64_t max_poll_computation;
176
177 uint64_t max_unsafe_rt_computation;
178 uint64_t max_unsafe_fixed_computation;
179 uint64_t sched_safe_rt_duration;
180 uint64_t sched_safe_fixed_duration;
181
182 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
183
184 uint32_t std_quantum;
185 uint32_t min_std_quantum;
186 uint32_t bg_quantum;
187
188 uint32_t std_quantum_us;
189 uint32_t bg_quantum_us;
190
191 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
192
193 uint32_t thread_depress_time;
194 uint32_t default_timeshare_computation;
195 uint32_t default_timeshare_constraint;
196
197
198 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
199
200 _Atomic uint32_t sched_tick;
201 uint32_t sched_tick_interval;
202
203 /* Timeshare load calculation interval (15ms) */
204 uint32_t sched_load_compute_interval_us = 15000;
205 uint64_t sched_load_compute_interval_abs;
206 static _Atomic uint64_t sched_load_compute_deadline;
207
208 uint32_t sched_pri_shifts[TH_BUCKET_MAX];
209 uint32_t sched_fixed_shift;
210
211 uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
212
213 /* Allow foreground to decay past default to resolve inversions */
214 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
215 int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
216
217 /* Defaults for timer deadline profiling */
218 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
219 * 2ms */
220 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
221 * <= 5ms */
222
223 uint64_t timer_deadline_tracking_bin_1;
224 uint64_t timer_deadline_tracking_bin_2;
225
226 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
227
228 thread_t sched_maintenance_thread;
229
230 LCK_GRP_DECLARE(cluster_powerdown_grp, "cluster_powerdown");
231 LCK_MTX_DECLARE(cluster_powerdown_lock, &cluster_powerdown_grp);
232
233 /* interrupts disabled lock to guard core online, recommendation, pcs state, scheduling policy bits */
234 decl_simple_lock_data(, sched_available_cores_lock);
235
236 /*
237 * Locked by sched_available_cores_lock.
238 * cluster_powerdown_lock is held while making changes to CPU offline state.
239 */
240 static struct global_powered_cores_state {
241 /*
242 * Set when PCS has seen all cores boot up and is ready to manage online
243 * state. CPU recommendation works before this point.
244 */
245 bool pcs_init_completed;
246
247 cpumap_t pcs_managed_cores; /* all cores managed by the PCS */
248
249 /*
250 * Inputs for CPU offline state provided by clients
251 */
252 cpumap_t pcs_requested_online_user; /* updated by processor_start/exit from userspace */
253 cpumap_t pcs_requested_online_clpc_user;
254 cpumap_t pcs_requested_online_clpc_system;
255 cpumap_t pcs_required_online_pmgr; /* e.g. ANE needs these powered for their rail to be happy */
256 cpumap_t pcs_required_online_system; /* e.g. smt1 for interrupts, boot processor unless boot arg is set, makes them disable instead of sleep */
257
258 /*
259 * When a suspend count is held, all CPUs must be powered up.
260 */
261 int32_t pcs_powerdown_suspend_count;
262
263 /*
264 * Disable automatic cluster powerdown in favor of explicit user core online control
265 */
266 bool pcs_user_online_core_control;
267 bool pcs_wants_kernel_sleep;
268 bool pcs_in_kernel_sleep;
269
270 struct powered_cores_state {
271 /*
272 * The input into the recommendation computation from update powered cores.
273 */
274 cpumap_t pcs_powerdown_recommended_cores;
275
276 /*
277 * These cores are online and are not powered down.
278 *
279 * Processors with processor->processor_online bit set.
280 */
281 cpumap_t pcs_online_cores;
282
283 /*
284 * These cores are disabled or powered down
285 * due to temporary reasons and will come back under presented load
286 * so the user should still see them as active in the cpu count.
287 *
288 * Processors with processor->shutdown_temporary bit set.
289 */
290 cpumap_t pcs_tempdown_cores;
291 } pcs_effective;
292
293 /* The 'goal state' PCS has computed and is attempting to apply */
294 struct powered_cores_state pcs_requested;
295
296 /*
297 * Inputs into CPU recommended cores provided by clients.
298 * Note that these may be changed under the available cores lock and
299 * become effective while sched_update_powered_cores_drops_lock is in
300 * the middle of making changes to CPU online state.
301 */
302
303 cpumap_t pcs_requested_recommended_clpc;
304 cpumap_t pcs_requested_recommended_clpc_system;
305 cpumap_t pcs_requested_recommended_clpc_user;
306 bool pcs_recommended_clpc_failsafe_active;
307 bool pcs_sleep_override_recommended;
308
309 /*
310 * These cores are recommended and can be used for execution
311 * of non-bound threads.
312 *
313 * Processors with processor->is_recommended bit set.
314 */
315 cpumap_t pcs_recommended_cores;
316
317 /*
318 * These are for the debugger.
319 * Use volatile to stop the compiler from optimizing out the stores
320 */
321 volatile processor_reason_t pcs_in_flight_reason;
322 volatile processor_reason_t pcs_previous_reason;
323 } pcs = {
324 /*
325 * Powerdown is suspended during boot until after all CPUs finish booting,
326 * released by sched_cpu_init_completed.
327 */
328 .pcs_powerdown_suspend_count = 1,
329 .pcs_requested_online_user = ALL_CORES_POWERED,
330 .pcs_requested_online_clpc_user = ALL_CORES_POWERED,
331 .pcs_requested_online_clpc_system = ALL_CORES_POWERED,
332 .pcs_in_flight_reason = REASON_NONE,
333 .pcs_previous_reason = REASON_NONE,
334 .pcs_requested.pcs_powerdown_recommended_cores = ALL_CORES_POWERED,
335 .pcs_requested_recommended_clpc = ALL_CORES_RECOMMENDED,
336 .pcs_requested_recommended_clpc_system = ALL_CORES_RECOMMENDED,
337 .pcs_requested_recommended_clpc_user = ALL_CORES_RECOMMENDED,
338 };
339
340 uint64_t sysctl_sched_recommended_cores = ALL_CORES_RECOMMENDED;
341
342 static int sched_last_resort_cpu(void);
343
344 static void sched_update_recommended_cores_locked(processor_reason_t reason, cpumap_t core_going_offline);
345 static void sched_update_powered_cores_drops_lock(processor_reason_t requested_reason, spl_t s);
346
347 #if __arm64__
348 static void sched_recommended_cores_maintenance(void);
349 uint64_t perfcontrol_failsafe_starvation_threshold;
350 extern char *proc_name_address(struct proc *p);
351 #endif /* __arm64__ */
352
353 uint64_t sched_one_second_interval;
354 boolean_t allow_direct_handoff = TRUE;
355
356 /* Forwards */
357
358 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
359
360 static void load_shift_init(void);
361 static void preempt_pri_init(void);
362
363 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
364
365 thread_t processor_idle(
366 thread_t thread,
367 processor_t processor);
368
369 static ast_t
370 csw_check_locked(
371 thread_t thread,
372 processor_t processor,
373 processor_set_t pset,
374 ast_t check_reason);
375
376 static void processor_setrun(
377 processor_t processor,
378 thread_t thread,
379 sched_options_t options);
380
381 static void
382 sched_timer_deadline_tracking_init(void);
383
384 #if DEBUG
385 extern int debug_task;
386 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
387 #else
388 #define TLOG(a, fmt, args...) do {} while (0)
389 #endif
390
391 static processor_t
392 thread_bind_internal(
393 thread_t thread,
394 processor_t processor);
395
396 static void
397 sched_vm_group_maintenance(void);
398
399 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
400 int8_t sched_load_shifts[NRQS];
401 bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS_MAX)];
402 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
403
404 /*
405 * Statically allocate a buffer to hold the longest possible
406 * scheduler description string, as currently implemented.
407 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
408 * to export to userspace via sysctl(3). If either version
409 * changes, update the other.
410 *
411 * Note that in addition to being an upper bound on the strings
412 * in the kernel, it's also an exact parameter to PE_get_default(),
413 * which interrogates the device tree on some platforms. That
414 * API requires the caller know the exact size of the device tree
415 * property, so we need both a legacy size (32) and the current size
416 * (48) to deal with old and new device trees. The device tree property
417 * is similarly padded to a fixed size so that the same kernel image
418 * can run on multiple devices with different schedulers configured
419 * in the device tree.
420 */
421 char sched_string[SCHED_STRING_MAX_LENGTH];
422
423 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
424
425 /* Global flag which indicates whether Background Stepper Context is enabled */
426 static int cpu_throttle_enabled = 1;
427
428 #if DEVELOPMENT || DEBUG
429 int enable_task_set_cluster_type = 0;
430 bool system_ecore_only = false;
431 #endif /* DEVELOPMENT || DEBUG */
432
433 void
sched_init(void)434 sched_init(void)
435 {
436 boolean_t direct_handoff = FALSE;
437 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
438
439 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
440 /* No boot-args, check in device tree */
441 if (!PE_get_default("kern.sched_pri_decay_limit",
442 &sched_pri_decay_band_limit,
443 sizeof(sched_pri_decay_band_limit))) {
444 /* Allow decay all the way to normal limits */
445 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
446 }
447 }
448
449 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
450
451 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
452 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
453 }
454 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
455
456 #if __arm64__
457 clock_interval_to_absolutetime_interval(expecting_ipi_wfe_timeout_usec, NSEC_PER_USEC, &expecting_ipi_wfe_timeout_mt);
458 #endif /* __arm64__ */
459
460 SCHED(init)();
461 sched_timer_deadline_tracking_init();
462 SCHED(pset_init)(&pset0);
463 SCHED(rt_init_pset)(&pset0);
464 SCHED(processor_init)(master_processor);
465
466 if (PE_parse_boot_argn("direct_handoff", &direct_handoff, sizeof(direct_handoff))) {
467 allow_direct_handoff = direct_handoff;
468 }
469
470 #if DEVELOPMENT || DEBUG
471 if (PE_parse_boot_argn("enable_skstsct", &enable_task_set_cluster_type, sizeof(enable_task_set_cluster_type))) {
472 system_ecore_only = (enable_task_set_cluster_type == 2);
473 }
474 #endif /* DEVELOPMENT || DEBUG */
475 }
476
477 void
sched_timebase_init(void)478 sched_timebase_init(void)
479 {
480 uint64_t abstime;
481
482 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
483 sched_one_second_interval = abstime;
484
485 SCHED(timebase_init)();
486 sched_realtime_timebase_init();
487 }
488
489 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
490
491 void
sched_timeshare_init(void)492 sched_timeshare_init(void)
493 {
494 /*
495 * Calculate the timeslicing quantum
496 * in us.
497 */
498 if (default_preemption_rate < 1) {
499 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
500 }
501 std_quantum_us = (1000 * 1000) / default_preemption_rate;
502
503 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
504
505 if (default_bg_preemption_rate < 1) {
506 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
507 }
508 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
509
510 printf("standard background quantum is %d us\n", bg_quantum_us);
511
512 load_shift_init();
513 preempt_pri_init();
514 os_atomic_store(&sched_tick, 0, relaxed);
515 }
516
517 void
sched_set_max_unsafe_rt_quanta(int max)518 sched_set_max_unsafe_rt_quanta(int max)
519 {
520 const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
521
522 max_unsafe_rt_computation = ((uint64_t)max) * quantum_size;
523
524 const int mult = safe_rt_multiplier <= 0 ? 2 : safe_rt_multiplier;
525 sched_safe_rt_duration = mult * ((uint64_t)max) * quantum_size;
526
527
528 #if DEVELOPMENT || DEBUG
529 max_unsafe_rt_quanta = max;
530 #else
531 /*
532 * On RELEASE kernels, this is only called on boot where
533 * max is already equal to max_unsafe_rt_quanta.
534 */
535 assert3s(max, ==, max_unsafe_rt_quanta);
536 #endif
537 }
538
539 void
sched_set_max_unsafe_fixed_quanta(int max)540 sched_set_max_unsafe_fixed_quanta(int max)
541 {
542 const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
543
544 max_unsafe_fixed_computation = ((uint64_t)max) * quantum_size;
545
546 const int mult = safe_fixed_multiplier <= 0 ? 2 : safe_fixed_multiplier;
547 sched_safe_fixed_duration = mult * ((uint64_t)max) * quantum_size;
548
549 #if DEVELOPMENT || DEBUG
550 max_unsafe_fixed_quanta = max;
551 #else
552 /*
553 * On RELEASE kernels, this is only called on boot where
554 * max is already equal to max_unsafe_fixed_quanta.
555 */
556 assert3s(max, ==, max_unsafe_fixed_quanta);
557 #endif
558 }
559
560 uint64_t
sched_get_quantum_us(void)561 sched_get_quantum_us(void)
562 {
563 uint32_t quantum = SCHED(initial_quantum_size)(THREAD_NULL);
564
565 uint64_t quantum_ns;
566 absolutetime_to_nanoseconds(quantum, &quantum_ns);
567
568 return quantum_ns / 1000;
569 }
570
571 void
sched_timeshare_timebase_init(void)572 sched_timeshare_timebase_init(void)
573 {
574 uint64_t abstime;
575 uint32_t shift;
576
577 /* standard timeslicing quantum */
578 clock_interval_to_absolutetime_interval(
579 std_quantum_us, NSEC_PER_USEC, &abstime);
580 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
581 std_quantum = (uint32_t)abstime;
582
583 /* smallest remaining quantum (250 us) */
584 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
585 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
586 min_std_quantum = (uint32_t)abstime;
587
588 /* quantum for background tasks */
589 clock_interval_to_absolutetime_interval(
590 bg_quantum_us, NSEC_PER_USEC, &abstime);
591 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
592 bg_quantum = (uint32_t)abstime;
593
594 /* scheduler tick interval */
595 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
596 NSEC_PER_USEC, &abstime);
597 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
598 sched_tick_interval = (uint32_t)abstime;
599
600 /* timeshare load calculation interval & deadline initialization */
601 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
602 os_atomic_init(&sched_load_compute_deadline, sched_load_compute_interval_abs);
603
604 /*
605 * Compute conversion factor from usage to
606 * timesharing priorities with 5/8 ** n aging.
607 */
608 abstime = (abstime * 5) / 3;
609 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
610 abstime >>= 1;
611 }
612 sched_fixed_shift = shift;
613
614 for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
615 sched_pri_shifts[i] = INT8_MAX;
616 }
617
618 sched_set_max_unsafe_rt_quanta(max_unsafe_rt_quanta);
619 sched_set_max_unsafe_fixed_quanta(max_unsafe_fixed_quanta);
620
621 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
622 thread_depress_time = 1 * std_quantum;
623 default_timeshare_computation = std_quantum / 2;
624 default_timeshare_constraint = std_quantum;
625
626 #if __arm64__
627 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
628 #endif /* __arm64__ */
629
630 if (nonurgent_preemption_timer_us) {
631 clock_interval_to_absolutetime_interval(nonurgent_preemption_timer_us, NSEC_PER_USEC, &abstime);
632 nonurgent_preemption_timer_abs = abstime;
633 }
634 }
635
636 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
637
638 void
sched_check_spill(processor_set_t pset,thread_t thread)639 sched_check_spill(processor_set_t pset, thread_t thread)
640 {
641 (void)pset;
642 (void)thread;
643
644 return;
645 }
646
647 bool
sched_thread_should_yield(processor_t processor,thread_t thread)648 sched_thread_should_yield(processor_t processor, thread_t thread)
649 {
650 (void)thread;
651
652 return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
653 }
654
655 /* Default implementations of .steal_thread_enabled */
656 bool
sched_steal_thread_DISABLED(processor_set_t pset)657 sched_steal_thread_DISABLED(processor_set_t pset)
658 {
659 (void)pset;
660 return false;
661 }
662
663 bool
sched_steal_thread_enabled(processor_set_t pset)664 sched_steal_thread_enabled(processor_set_t pset)
665 {
666 return bit_count(pset->node->pset_map) > 1;
667 }
668
669 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
670
671 /*
672 * Set up values for timeshare
673 * loading factors.
674 */
675 static void
load_shift_init(void)676 load_shift_init(void)
677 {
678 int8_t k, *p = sched_load_shifts;
679 uint32_t i, j;
680
681 uint32_t sched_decay_penalty = 1;
682
683 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
684 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
685 }
686
687 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
688 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
689 }
690
691 if (sched_decay_penalty == 0) {
692 /*
693 * There is no penalty for timeshare threads for using too much
694 * CPU, so set all load shifts to INT8_MIN. Even under high load,
695 * sched_pri_shift will be >INT8_MAX, and there will be no
696 * penalty applied to threads (nor will sched_usage be updated per
697 * thread).
698 */
699 for (i = 0; i < NRQS; i++) {
700 sched_load_shifts[i] = INT8_MIN;
701 }
702
703 return;
704 }
705
706 *p++ = INT8_MIN; *p++ = 0;
707
708 /*
709 * For a given system load "i", the per-thread priority
710 * penalty per quantum of CPU usage is ~2^k priority
711 * levels. "sched_decay_penalty" can cause more
712 * array entries to be filled with smaller "k" values
713 */
714 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
715 for (j <<= 1; (i < j) && (i < NRQS); ++i) {
716 *p++ = k;
717 }
718 }
719 }
720
721 static void
preempt_pri_init(void)722 preempt_pri_init(void)
723 {
724 bitmap_t *p = sched_preempt_pri;
725
726 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
727 bitmap_set(p, i);
728 }
729
730 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
731 bitmap_set(p, i);
732 }
733 }
734
735 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
736
737 void
check_monotonic_time(uint64_t ctime)738 check_monotonic_time(uint64_t ctime)
739 {
740 processor_t processor = current_processor();
741 uint64_t last_dispatch = processor->last_dispatch;
742
743 if (last_dispatch > ctime) {
744 panic("Non-monotonic time: last_dispatch at 0x%llx, ctime 0x%llx",
745 last_dispatch, ctime);
746 }
747 }
748
749
750 /*
751 * Thread wait timer expiration.
752 * Runs in timer interrupt context with interrupts disabled.
753 */
754 void
thread_timer_expire(void * p0,__unused void * p1)755 thread_timer_expire(void *p0, __unused void *p1)
756 {
757 thread_t thread = (thread_t)p0;
758
759 assert_thread_magic(thread);
760
761 assert(ml_get_interrupts_enabled() == FALSE);
762
763 thread_lock(thread);
764
765 if (thread->wait_timer_armed) {
766 thread->wait_timer_armed = false;
767 clear_wait_internal(thread, THREAD_TIMED_OUT);
768 /* clear_wait_internal may have dropped and retaken the thread lock */
769 }
770
771 thread->wait_timer_active--;
772
773 thread_unlock(thread);
774 }
775
776 /*
777 * thread_unblock:
778 *
779 * Unblock thread on wake up.
780 *
781 * Returns TRUE if the thread should now be placed on the runqueue.
782 *
783 * Thread must be locked.
784 *
785 * Called at splsched().
786 */
787 boolean_t
thread_unblock(thread_t thread,wait_result_t wresult)788 thread_unblock(
789 thread_t thread,
790 wait_result_t wresult)
791 {
792 boolean_t ready_for_runq = FALSE;
793 thread_t cthread = current_thread();
794 uint32_t new_run_count;
795 int old_thread_state;
796
797 /*
798 * Set wait_result.
799 */
800 thread->wait_result = wresult;
801
802 /*
803 * Cancel pending wait timer.
804 */
805 if (thread->wait_timer_armed) {
806 if (timer_call_cancel(thread->wait_timer)) {
807 thread->wait_timer_active--;
808 }
809 thread->wait_timer_armed = false;
810 }
811
812 boolean_t aticontext, pidle;
813 ml_get_power_state(&aticontext, &pidle);
814
815 /*
816 * Update scheduling state: not waiting,
817 * set running.
818 */
819 old_thread_state = thread->state;
820 thread->state = (old_thread_state | TH_RUN) &
821 ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT | TH_WAKING);
822
823 if ((old_thread_state & TH_RUN) == 0) {
824 uint64_t ctime = mach_approximate_time();
825
826 check_monotonic_time(ctime);
827
828 thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
829 timer_start(&thread->runnable_timer, ctime);
830
831 ready_for_runq = TRUE;
832
833 if (old_thread_state & TH_WAIT_REPORT) {
834 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
835 }
836
837 /* Update the runnable thread count */
838 new_run_count = SCHED(run_count_incr)(thread);
839
840 #if CONFIG_SCHED_AUTO_JOIN
841 if (aticontext == FALSE && work_interval_should_propagate(cthread, thread)) {
842 work_interval_auto_join_propagate(cthread, thread);
843 }
844 #endif /*CONFIG_SCHED_AUTO_JOIN */
845
846 } else {
847 /*
848 * Either the thread is idling in place on another processor,
849 * or it hasn't finished context switching yet.
850 */
851 assert((thread->state & TH_IDLE) == 0);
852 /*
853 * The run count is only dropped after the context switch completes
854 * and the thread is still waiting, so we should not run_incr here
855 */
856 new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
857 }
858
859 /*
860 * Calculate deadline for real-time threads.
861 */
862 if (thread->sched_mode == TH_MODE_REALTIME) {
863 uint64_t ctime = mach_absolute_time();
864 thread->realtime.deadline = thread->realtime.constraint + ctime;
865 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SET_RT_DEADLINE) | DBG_FUNC_NONE,
866 (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
867 }
868
869 /*
870 * Clear old quantum, fail-safe computation, etc.
871 */
872 thread->quantum_remaining = 0;
873 thread->computation_metered = 0;
874 thread->reason = AST_NONE;
875 thread->block_hint = kThreadWaitNone;
876
877 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
878 * We also account for "double hop" thread signaling via
879 * the thread callout infrastructure.
880 * DRK: consider removing the callout wakeup counters in the future
881 * they're present for verification at the moment.
882 */
883
884 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
885 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, current_proc());
886
887 uint64_t ttd = current_processor()->timer_call_ttd;
888
889 if (ttd) {
890 if (ttd <= timer_deadline_tracking_bin_1) {
891 thread->thread_timer_wakeups_bin_1++;
892 } else if (ttd <= timer_deadline_tracking_bin_2) {
893 thread->thread_timer_wakeups_bin_2++;
894 }
895 }
896
897 ledger_credit_thread(thread, thread->t_ledger,
898 task_ledgers.interrupt_wakeups, 1);
899 if (pidle) {
900 ledger_credit_thread(thread, thread->t_ledger,
901 task_ledgers.platform_idle_wakeups, 1);
902 }
903 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
904 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
905 if (cthread->callout_woken_from_icontext) {
906 ledger_credit_thread(thread, thread->t_ledger,
907 task_ledgers.interrupt_wakeups, 1);
908 thread->thread_callout_interrupt_wakeups++;
909
910 if (cthread->callout_woken_from_platform_idle) {
911 ledger_credit_thread(thread, thread->t_ledger,
912 task_ledgers.platform_idle_wakeups, 1);
913 thread->thread_callout_platform_idle_wakeups++;
914 }
915
916 cthread->callout_woke_thread = TRUE;
917 }
918 }
919
920 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
921 thread->callout_woken_from_icontext = !!aticontext;
922 thread->callout_woken_from_platform_idle = !!pidle;
923 thread->callout_woke_thread = FALSE;
924 }
925
926 #if KPERF
927 if (ready_for_runq) {
928 kperf_make_runnable(thread, aticontext);
929 }
930 #endif /* KPERF */
931
932 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
933 MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
934 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
935 sched_run_buckets[TH_BUCKET_RUN], 0);
936
937 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, current_proc());
938
939 return ready_for_runq;
940 }
941
942 /*
943 * Routine: thread_allowed_for_handoff
944 * Purpose:
945 * Check if the thread is allowed for handoff operation
946 * Conditions:
947 * thread lock held, IPC locks may be held.
948 * TODO: In future, do not allow handoff if threads have different cluster
949 * recommendations.
950 */
951 boolean_t
thread_allowed_for_handoff(thread_t thread)952 thread_allowed_for_handoff(
953 thread_t thread)
954 {
955 thread_t self = current_thread();
956
957 if (allow_direct_handoff &&
958 thread->sched_mode == TH_MODE_REALTIME &&
959 self->sched_mode == TH_MODE_REALTIME) {
960 return TRUE;
961 }
962
963 return FALSE;
964 }
965
966 /*
967 * Routine: thread_go
968 * Purpose:
969 * Unblock and dispatch thread.
970 * Conditions:
971 * thread lock held, IPC locks may be held.
972 * thread must have been waiting
973 */
974 void
thread_go(thread_t thread,wait_result_t wresult,bool try_handoff)975 thread_go(
976 thread_t thread,
977 wait_result_t wresult,
978 bool try_handoff)
979 {
980 thread_t self = current_thread();
981
982 assert_thread_magic(thread);
983
984 assert(thread->at_safe_point == FALSE);
985 assert(thread->wait_event == NO_EVENT64);
986 assert(waitq_is_null(thread->waitq));
987
988 assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
989 assert(thread->state & TH_WAIT);
990
991 if (thread->started) {
992 assert(thread->state & TH_WAKING);
993 }
994
995 thread_lock_assert(thread, LCK_ASSERT_OWNED);
996
997 assert(ml_get_interrupts_enabled() == false);
998
999 if (thread_unblock(thread, wresult)) {
1000 #if SCHED_TRACE_THREAD_WAKEUPS
1001 backtrace(&thread->thread_wakeup_bt[0],
1002 (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL,
1003 NULL);
1004 #endif /* SCHED_TRACE_THREAD_WAKEUPS */
1005 if (try_handoff && thread_allowed_for_handoff(thread)) {
1006 thread_reference(thread);
1007 assert(self->handoff_thread == NULL);
1008 self->handoff_thread = thread;
1009
1010 /*
1011 * A TH_RUN'ed thread must have a chosen_processor.
1012 * thread_setrun would have set it, so we need to
1013 * replicate that here.
1014 */
1015 thread->chosen_processor = current_processor();
1016 } else {
1017 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1018 }
1019 }
1020 }
1021
1022 /*
1023 * Routine: thread_mark_wait_locked
1024 * Purpose:
1025 * Mark a thread as waiting. If, given the circumstances,
1026 * it doesn't want to wait (i.e. already aborted), then
1027 * indicate that in the return value.
1028 * Conditions:
1029 * at splsched() and thread is locked.
1030 */
1031 __private_extern__
1032 wait_result_t
thread_mark_wait_locked(thread_t thread,wait_interrupt_t interruptible_orig)1033 thread_mark_wait_locked(
1034 thread_t thread,
1035 wait_interrupt_t interruptible_orig)
1036 {
1037 boolean_t at_safe_point;
1038 wait_interrupt_t interruptible = interruptible_orig;
1039
1040 if (thread->state & TH_IDLE) {
1041 panic("Invalid attempt to wait while running the idle thread");
1042 }
1043
1044 assert(!(thread->state & (TH_WAIT | TH_WAKING | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
1045
1046 /*
1047 * The thread may have certain types of interrupts/aborts masked
1048 * off. Even if the wait location says these types of interrupts
1049 * are OK, we have to honor mask settings (outer-scoped code may
1050 * not be able to handle aborts at the moment).
1051 */
1052 interruptible &= TH_OPT_INTMASK;
1053 if (interruptible > (thread->options & TH_OPT_INTMASK)) {
1054 interruptible = thread->options & TH_OPT_INTMASK;
1055 }
1056
1057 at_safe_point = (interruptible == THREAD_ABORTSAFE);
1058
1059 if (interruptible == THREAD_UNINT ||
1060 !(thread->sched_flags & TH_SFLAG_ABORT) ||
1061 (!at_safe_point &&
1062 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
1063 if (!(thread->state & TH_TERMINATE)) {
1064 DTRACE_SCHED(sleep);
1065 }
1066
1067 int state_bits = TH_WAIT;
1068 if (!interruptible) {
1069 state_bits |= TH_UNINT;
1070 }
1071 if (thread->sched_call) {
1072 wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
1073 if (is_kerneltask(get_threadtask(thread))) {
1074 mask = THREAD_WAIT_NOREPORT_KERNEL;
1075 }
1076 if ((interruptible_orig & mask) == 0) {
1077 state_bits |= TH_WAIT_REPORT;
1078 }
1079 }
1080 thread->state |= state_bits;
1081 thread->at_safe_point = at_safe_point;
1082
1083 /* TODO: pass this through assert_wait instead, have
1084 * assert_wait just take a struct as an argument */
1085 assert(!thread->block_hint);
1086 thread->block_hint = thread->pending_block_hint;
1087 thread->pending_block_hint = kThreadWaitNone;
1088
1089 return thread->wait_result = THREAD_WAITING;
1090 } else {
1091 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
1092 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1093 }
1094 }
1095 thread->pending_block_hint = kThreadWaitNone;
1096
1097 return thread->wait_result = THREAD_INTERRUPTED;
1098 }
1099
1100 /*
1101 * Routine: thread_interrupt_level
1102 * Purpose:
1103 * Set the maximum interruptible state for the
1104 * current thread. The effective value of any
1105 * interruptible flag passed into assert_wait
1106 * will never exceed this.
1107 *
1108 * Useful for code that must not be interrupted,
1109 * but which calls code that doesn't know that.
1110 * Returns:
1111 * The old interrupt level for the thread.
1112 */
1113 __private_extern__
1114 wait_interrupt_t
thread_interrupt_level(wait_interrupt_t new_level)1115 thread_interrupt_level(
1116 wait_interrupt_t new_level)
1117 {
1118 thread_t thread = current_thread();
1119 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1120
1121 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1122
1123 return result;
1124 }
1125
1126 /*
1127 * assert_wait:
1128 *
1129 * Assert that the current thread is about to go to
1130 * sleep until the specified event occurs.
1131 */
1132 wait_result_t
assert_wait(event_t event,wait_interrupt_t interruptible)1133 assert_wait(
1134 event_t event,
1135 wait_interrupt_t interruptible)
1136 {
1137 if (__improbable(event == NO_EVENT)) {
1138 panic("%s() called with NO_EVENT", __func__);
1139 }
1140
1141 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1142 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1143 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
1144
1145 struct waitq *waitq;
1146 waitq = global_eventq(event);
1147 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
1148 }
1149
1150 /*
1151 * assert_wait_queue:
1152 *
1153 * Return the global waitq for the specified event
1154 */
1155 struct waitq *
assert_wait_queue(event_t event)1156 assert_wait_queue(
1157 event_t event)
1158 {
1159 return global_eventq(event);
1160 }
1161
1162 wait_result_t
assert_wait_timeout(event_t event,wait_interrupt_t interruptible,uint32_t interval,uint32_t scale_factor)1163 assert_wait_timeout(
1164 event_t event,
1165 wait_interrupt_t interruptible,
1166 uint32_t interval,
1167 uint32_t scale_factor)
1168 {
1169 thread_t thread = current_thread();
1170 wait_result_t wresult;
1171 uint64_t deadline;
1172 spl_t s;
1173
1174 if (__improbable(event == NO_EVENT)) {
1175 panic("%s() called with NO_EVENT", __func__);
1176 }
1177
1178 struct waitq *waitq;
1179 waitq = global_eventq(event);
1180
1181 s = splsched();
1182 waitq_lock(waitq);
1183
1184 clock_interval_to_deadline(interval, scale_factor, &deadline);
1185
1186 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1187 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1188 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1189
1190 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1191 interruptible,
1192 TIMEOUT_URGENCY_SYS_NORMAL,
1193 deadline, TIMEOUT_NO_LEEWAY,
1194 thread);
1195
1196 waitq_unlock(waitq);
1197 splx(s);
1198 return wresult;
1199 }
1200
1201 wait_result_t
assert_wait_timeout_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint32_t interval,uint32_t leeway,uint32_t scale_factor)1202 assert_wait_timeout_with_leeway(
1203 event_t event,
1204 wait_interrupt_t interruptible,
1205 wait_timeout_urgency_t urgency,
1206 uint32_t interval,
1207 uint32_t leeway,
1208 uint32_t scale_factor)
1209 {
1210 thread_t thread = current_thread();
1211 wait_result_t wresult;
1212 uint64_t deadline;
1213 uint64_t abstime;
1214 uint64_t slop;
1215 uint64_t now;
1216 spl_t s;
1217
1218 if (__improbable(event == NO_EVENT)) {
1219 panic("%s() called with NO_EVENT", __func__);
1220 }
1221
1222 now = mach_absolute_time();
1223 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1224 deadline = now + abstime;
1225
1226 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1227
1228 struct waitq *waitq;
1229 waitq = global_eventq(event);
1230
1231 s = splsched();
1232 waitq_lock(waitq);
1233
1234 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1235 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1236 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1237
1238 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1239 interruptible,
1240 urgency, deadline, slop,
1241 thread);
1242
1243 waitq_unlock(waitq);
1244 splx(s);
1245 return wresult;
1246 }
1247
1248 wait_result_t
assert_wait_deadline(event_t event,wait_interrupt_t interruptible,uint64_t deadline)1249 assert_wait_deadline(
1250 event_t event,
1251 wait_interrupt_t interruptible,
1252 uint64_t deadline)
1253 {
1254 thread_t thread = current_thread();
1255 wait_result_t wresult;
1256 spl_t s;
1257
1258 if (__improbable(event == NO_EVENT)) {
1259 panic("%s() called with NO_EVENT", __func__);
1260 }
1261
1262 struct waitq *waitq;
1263 waitq = global_eventq(event);
1264
1265 s = splsched();
1266 waitq_lock(waitq);
1267
1268 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1269 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1270 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1271
1272 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1273 interruptible,
1274 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1275 TIMEOUT_NO_LEEWAY, thread);
1276 waitq_unlock(waitq);
1277 splx(s);
1278 return wresult;
1279 }
1280
1281 wait_result_t
assert_wait_deadline_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint64_t deadline,uint64_t leeway)1282 assert_wait_deadline_with_leeway(
1283 event_t event,
1284 wait_interrupt_t interruptible,
1285 wait_timeout_urgency_t urgency,
1286 uint64_t deadline,
1287 uint64_t leeway)
1288 {
1289 thread_t thread = current_thread();
1290 wait_result_t wresult;
1291 spl_t s;
1292
1293 if (__improbable(event == NO_EVENT)) {
1294 panic("%s() called with NO_EVENT", __func__);
1295 }
1296
1297 struct waitq *waitq;
1298 waitq = global_eventq(event);
1299
1300 s = splsched();
1301 waitq_lock(waitq);
1302
1303 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1304 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1305 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1306
1307 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1308 interruptible,
1309 urgency, deadline, leeway,
1310 thread);
1311 waitq_unlock(waitq);
1312 splx(s);
1313 return wresult;
1314 }
1315
1316 void
sched_cond_init(sched_cond_atomic_t * cond)1317 sched_cond_init(
1318 sched_cond_atomic_t *cond)
1319 {
1320 os_atomic_init(cond, SCHED_COND_INIT);
1321 }
1322
1323 wait_result_t
sched_cond_wait_parameter(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation,void * parameter)1324 sched_cond_wait_parameter(
1325 sched_cond_atomic_t *cond,
1326 wait_interrupt_t interruptible,
1327 thread_continue_t continuation,
1328 void *parameter)
1329 {
1330 assert_wait((event_t) cond, interruptible);
1331 /* clear active bit to indicate future wakeups will have to unblock this thread */
1332 sched_cond_t new_state = (sched_cond_t) os_atomic_andnot(cond, SCHED_COND_ACTIVE, relaxed);
1333 if (__improbable(new_state & SCHED_COND_WAKEUP)) {
1334 /* a wakeup has been issued; undo wait assertion, ack the wakeup, and return */
1335 thread_t thread = current_thread();
1336 clear_wait(thread, THREAD_AWAKENED);
1337 sched_cond_ack(cond);
1338 return THREAD_AWAKENED;
1339 }
1340 return thread_block_parameter(continuation, parameter);
1341 }
1342
1343 wait_result_t
sched_cond_wait(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation)1344 sched_cond_wait(
1345 sched_cond_atomic_t *cond,
1346 wait_interrupt_t interruptible,
1347 thread_continue_t continuation)
1348 {
1349 return sched_cond_wait_parameter(cond, interruptible, continuation, NULL);
1350 }
1351
1352 sched_cond_t
sched_cond_ack(sched_cond_atomic_t * cond)1353 sched_cond_ack(
1354 sched_cond_atomic_t *cond)
1355 {
1356 sched_cond_t new_cond = (sched_cond_t) os_atomic_xor(cond, SCHED_COND_ACTIVE | SCHED_COND_WAKEUP, acquire);
1357 assert(new_cond & SCHED_COND_ACTIVE);
1358 return new_cond;
1359 }
1360
1361 kern_return_t
sched_cond_signal(sched_cond_atomic_t * cond,thread_t thread)1362 sched_cond_signal(
1363 sched_cond_atomic_t *cond,
1364 thread_t thread)
1365 {
1366 disable_preemption();
1367 sched_cond_t old_cond = (sched_cond_t) os_atomic_or_orig(cond, SCHED_COND_WAKEUP, release);
1368 if (!(old_cond & (SCHED_COND_WAKEUP | SCHED_COND_ACTIVE))) {
1369 /* this was the first wakeup to be issued AND the thread was inactive */
1370 thread_wakeup_thread((event_t) cond, thread);
1371 }
1372 enable_preemption();
1373 return KERN_SUCCESS;
1374 }
1375
1376 /*
1377 * thread_isoncpu:
1378 *
1379 * Return TRUE if a thread is running on a processor such that an AST
1380 * is needed to pull it out of userspace execution, or if executing in
1381 * the kernel, bring to a context switch boundary that would cause
1382 * thread state to be serialized in the thread PCB.
1383 *
1384 * Thread locked, returns the same way. While locked, fields
1385 * like "state" cannot change. "runq" can change only from set to unset.
1386 */
1387 static inline boolean_t
thread_isoncpu(thread_t thread)1388 thread_isoncpu(thread_t thread)
1389 {
1390 /* Not running or runnable */
1391 if (!(thread->state & TH_RUN)) {
1392 return FALSE;
1393 }
1394
1395 /* Waiting on a runqueue, not currently running */
1396 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1397 /* TODO: This can also be incorrect for `handoff` cases where
1398 * the thread is never enqueued on the runq */
1399 if (thread_get_runq(thread) != PROCESSOR_NULL) {
1400 return FALSE;
1401 }
1402
1403 /*
1404 * Thread does not have a stack yet
1405 * It could be on the stack alloc queue or preparing to be invoked
1406 */
1407 if (!thread->kernel_stack) {
1408 return FALSE;
1409 }
1410
1411 /*
1412 * Thread must be running on a processor, or
1413 * about to run, or just did run. In all these
1414 * cases, an AST to the processor is needed
1415 * to guarantee that the thread is kicked out
1416 * of userspace and the processor has
1417 * context switched (and saved register state).
1418 */
1419 return TRUE;
1420 }
1421
1422 /*
1423 * thread_stop:
1424 *
1425 * Force a preemption point for a thread and wait
1426 * for it to stop running on a CPU. If a stronger
1427 * guarantee is requested, wait until no longer
1428 * runnable. Arbitrates access among
1429 * multiple stop requests. (released by unstop)
1430 *
1431 * The thread must enter a wait state and stop via a
1432 * separate means.
1433 *
1434 * Returns FALSE if interrupted.
1435 */
1436 boolean_t
thread_stop(thread_t thread,boolean_t until_not_runnable)1437 thread_stop(
1438 thread_t thread,
1439 boolean_t until_not_runnable)
1440 {
1441 wait_result_t wresult;
1442 spl_t s = splsched();
1443 boolean_t oncpu;
1444
1445 wake_lock(thread);
1446 thread_lock(thread);
1447
1448 while (thread->state & TH_SUSP) {
1449 thread->wake_active = TRUE;
1450 thread_unlock(thread);
1451
1452 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1453 wake_unlock(thread);
1454 splx(s);
1455
1456 if (wresult == THREAD_WAITING) {
1457 wresult = thread_block(THREAD_CONTINUE_NULL);
1458 }
1459
1460 if (wresult != THREAD_AWAKENED) {
1461 return FALSE;
1462 }
1463
1464 s = splsched();
1465 wake_lock(thread);
1466 thread_lock(thread);
1467 }
1468
1469 thread->state |= TH_SUSP;
1470
1471 while ((oncpu = thread_isoncpu(thread)) ||
1472 (until_not_runnable && (thread->state & TH_RUN))) {
1473 if (oncpu) {
1474 /*
1475 * TODO: chosen_processor isn't really the right
1476 * thing to IPI here. We really want `last_processor`,
1477 * but we also want to know where to send the IPI
1478 * *before* thread_invoke sets last_processor.
1479 *
1480 * rdar://47149497 (thread_stop doesn't IPI the right core)
1481 */
1482 assert(thread->state & TH_RUN);
1483 processor_t processor = thread->chosen_processor;
1484 assert(processor != PROCESSOR_NULL);
1485 cause_ast_check(processor);
1486 }
1487
1488 thread->wake_active = TRUE;
1489 thread_unlock(thread);
1490
1491 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1492 wake_unlock(thread);
1493 splx(s);
1494
1495 if (wresult == THREAD_WAITING) {
1496 wresult = thread_block(THREAD_CONTINUE_NULL);
1497 }
1498
1499 if (wresult != THREAD_AWAKENED) {
1500 thread_unstop(thread);
1501 return FALSE;
1502 }
1503
1504 s = splsched();
1505 wake_lock(thread);
1506 thread_lock(thread);
1507 }
1508
1509 thread_unlock(thread);
1510 wake_unlock(thread);
1511 splx(s);
1512
1513 /*
1514 * We return with the thread unlocked. To prevent it from
1515 * transitioning to a runnable state (or from TH_RUN to
1516 * being on the CPU), the caller must ensure the thread
1517 * is stopped via an external means (such as an AST)
1518 */
1519
1520 return TRUE;
1521 }
1522
1523 /*
1524 * thread_unstop:
1525 *
1526 * Release a previous stop request and set
1527 * the thread running if appropriate.
1528 *
1529 * Use only after a successful stop operation.
1530 */
1531 void
thread_unstop(thread_t thread)1532 thread_unstop(
1533 thread_t thread)
1534 {
1535 spl_t s = splsched();
1536
1537 wake_lock(thread);
1538 thread_lock(thread);
1539
1540 assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
1541
1542 if (thread->state & TH_SUSP) {
1543 thread->state &= ~TH_SUSP;
1544
1545 if (thread->wake_active) {
1546 thread->wake_active = FALSE;
1547 thread_unlock(thread);
1548
1549 thread_wakeup(&thread->wake_active);
1550 wake_unlock(thread);
1551 splx(s);
1552
1553 return;
1554 }
1555 }
1556
1557 thread_unlock(thread);
1558 wake_unlock(thread);
1559 splx(s);
1560 }
1561
1562 /*
1563 * thread_wait:
1564 *
1565 * Wait for a thread to stop running. (non-interruptible)
1566 *
1567 */
1568 void
thread_wait(thread_t thread,boolean_t until_not_runnable)1569 thread_wait(
1570 thread_t thread,
1571 boolean_t until_not_runnable)
1572 {
1573 wait_result_t wresult;
1574 boolean_t oncpu;
1575 processor_t processor;
1576 spl_t s = splsched();
1577
1578 wake_lock(thread);
1579 thread_lock(thread);
1580
1581 /*
1582 * Wait until not running on a CPU. If stronger requirement
1583 * desired, wait until not runnable. Assumption: if thread is
1584 * on CPU, then TH_RUN is set, so we're not waiting in any case
1585 * where the original, pure "TH_RUN" check would have let us
1586 * finish.
1587 */
1588 while ((oncpu = thread_isoncpu(thread)) ||
1589 (until_not_runnable && (thread->state & TH_RUN))) {
1590 if (oncpu) {
1591 assert(thread->state & TH_RUN);
1592 processor = thread->chosen_processor;
1593 cause_ast_check(processor);
1594 }
1595
1596 thread->wake_active = TRUE;
1597 thread_unlock(thread);
1598
1599 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1600 wake_unlock(thread);
1601 splx(s);
1602
1603 if (wresult == THREAD_WAITING) {
1604 thread_block(THREAD_CONTINUE_NULL);
1605 }
1606
1607 s = splsched();
1608 wake_lock(thread);
1609 thread_lock(thread);
1610 }
1611
1612 thread_unlock(thread);
1613 wake_unlock(thread);
1614 splx(s);
1615 }
1616
1617 /*
1618 * Routine: clear_wait_internal
1619 *
1620 * Clear the wait condition for the specified thread.
1621 * Start the thread executing if that is appropriate.
1622 * Arguments:
1623 * thread thread to awaken
1624 * result Wakeup result the thread should see
1625 * Conditions:
1626 * At splsched
1627 * the thread is locked.
1628 * Returns:
1629 * KERN_SUCCESS thread was rousted out a wait
1630 * KERN_FAILURE thread was waiting but could not be rousted
1631 * KERN_NOT_WAITING thread was not waiting
1632 */
1633 __private_extern__ kern_return_t
clear_wait_internal(thread_t thread,wait_result_t wresult)1634 clear_wait_internal(
1635 thread_t thread,
1636 wait_result_t wresult)
1637 {
1638 waitq_t waitq = thread->waitq;
1639
1640 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1641 return KERN_FAILURE;
1642 }
1643
1644 /*
1645 * Check that the thread is waiting and not waking, as a waking thread
1646 * has already cleared its waitq, and is destined to be go'ed, don't
1647 * need to do it again.
1648 */
1649 if ((thread->state & (TH_WAIT | TH_TERMINATE | TH_WAKING)) != TH_WAIT) {
1650 assert(waitq_is_null(thread->waitq));
1651 return KERN_NOT_WAITING;
1652 }
1653
1654 /* may drop and retake the thread lock */
1655 if (!waitq_is_null(waitq) && !waitq_pull_thread_locked(waitq, thread)) {
1656 return KERN_NOT_WAITING;
1657 }
1658
1659 thread_go(thread, wresult, /* handoff */ false);
1660
1661 return KERN_SUCCESS;
1662 }
1663
1664
1665 /*
1666 * clear_wait:
1667 *
1668 * Clear the wait condition for the specified thread. Start the thread
1669 * executing if that is appropriate.
1670 *
1671 * parameters:
1672 * thread thread to awaken
1673 * result Wakeup result the thread should see
1674 */
1675 __mockable kern_return_t
clear_wait(thread_t thread,wait_result_t result)1676 clear_wait(
1677 thread_t thread,
1678 wait_result_t result)
1679 {
1680 kern_return_t ret;
1681 spl_t s;
1682
1683 s = splsched();
1684 thread_lock(thread);
1685
1686 ret = clear_wait_internal(thread, result);
1687
1688 if (thread == current_thread()) {
1689 /*
1690 * The thread must be ready to wait again immediately
1691 * after clearing its own wait.
1692 */
1693 assert((thread->state & TH_WAKING) == 0);
1694 }
1695
1696 thread_unlock(thread);
1697 splx(s);
1698 return ret;
1699 }
1700
1701 /*
1702 * thread_wakeup_prim:
1703 *
1704 * Common routine for thread_wakeup, thread_wakeup_with_result,
1705 * and thread_wakeup_one.
1706 *
1707 */
1708 kern_return_t
thread_wakeup_nthreads_prim(event_t event,uint32_t nthreads,wait_result_t result)1709 thread_wakeup_nthreads_prim(
1710 event_t event,
1711 uint32_t nthreads,
1712 wait_result_t result)
1713 {
1714 if (__improbable(event == NO_EVENT)) {
1715 panic("%s() called with NO_EVENT", __func__);
1716 }
1717
1718 struct waitq *wq = global_eventq(event);
1719 uint32_t count;
1720
1721 count = waitq_wakeup64_nthreads(wq, CAST_EVENT64_T(event), result,
1722 WAITQ_WAKEUP_DEFAULT, nthreads);
1723 return count ? KERN_SUCCESS : KERN_NOT_WAITING;
1724 }
1725
1726 /*
1727 * thread_wakeup_prim:
1728 *
1729 * Common routine for thread_wakeup, thread_wakeup_with_result,
1730 * and thread_wakeup_one.
1731 *
1732 */
1733 __mockable kern_return_t
thread_wakeup_prim(event_t event,boolean_t one_thread,wait_result_t result)1734 thread_wakeup_prim(
1735 event_t event,
1736 boolean_t one_thread,
1737 wait_result_t result)
1738 {
1739 if (one_thread) {
1740 return thread_wakeup_nthreads_prim(event, 1, result);
1741 } else {
1742 return thread_wakeup_nthreads_prim(event, UINT32_MAX, result);
1743 }
1744 }
1745
1746 /*
1747 * Wakeup a specified thread if and only if it's waiting for this event
1748 */
1749 kern_return_t
thread_wakeup_thread(event_t event,thread_t thread)1750 thread_wakeup_thread(
1751 event_t event,
1752 thread_t thread)
1753 {
1754 if (__improbable(event == NO_EVENT)) {
1755 panic("%s() called with NO_EVENT", __func__);
1756 }
1757
1758 if (__improbable(thread == THREAD_NULL)) {
1759 panic("%s() called with THREAD_NULL", __func__);
1760 }
1761
1762 struct waitq *wq = global_eventq(event);
1763
1764 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1765 }
1766
1767 /*
1768 * thread_bind:
1769 *
1770 * Force the current thread to execute on the specified processor.
1771 * Takes effect after the next thread_block().
1772 *
1773 * Returns the previous binding. PROCESSOR_NULL means
1774 * not bound.
1775 *
1776 * XXX - DO NOT export this to users - XXX
1777 */
1778 processor_t
thread_bind(processor_t processor)1779 thread_bind(
1780 processor_t processor)
1781 {
1782 thread_t self = current_thread();
1783 processor_t prev;
1784 spl_t s;
1785
1786 s = splsched();
1787 thread_lock(self);
1788
1789 prev = thread_bind_internal(self, processor);
1790
1791 thread_unlock(self);
1792 splx(s);
1793
1794 return prev;
1795 }
1796
1797 void
thread_bind_during_wakeup(thread_t thread,processor_t processor)1798 thread_bind_during_wakeup(thread_t thread, processor_t processor)
1799 {
1800 assert(!ml_get_interrupts_enabled());
1801 assert((thread->state & (TH_WAIT | TH_WAKING)) == (TH_WAIT | TH_WAKING));
1802 #if MACH_ASSERT
1803 thread_lock_assert(thread, LCK_ASSERT_OWNED);
1804 #endif
1805
1806 if (thread->bound_processor != processor) {
1807 thread_bind_internal(thread, processor);
1808 }
1809 }
1810
1811 void
thread_unbind_after_queue_shutdown(thread_t thread,processor_t processor __assert_only)1812 thread_unbind_after_queue_shutdown(
1813 thread_t thread,
1814 processor_t processor __assert_only)
1815 {
1816 assert(!ml_get_interrupts_enabled());
1817
1818 thread_lock(thread);
1819
1820 if (thread->bound_processor) {
1821 bool removed;
1822
1823 assert(thread->bound_processor == processor);
1824
1825 removed = thread_run_queue_remove(thread);
1826 /*
1827 * we can always unbind even if we didn't really remove the
1828 * thread from the runqueue
1829 */
1830 thread_bind_internal(thread, PROCESSOR_NULL);
1831 if (removed) {
1832 thread_run_queue_reinsert(thread, SCHED_TAILQ);
1833 }
1834 }
1835
1836 thread_unlock(thread);
1837 }
1838
1839 /*
1840 * thread_bind_internal:
1841 *
1842 * If the specified thread is not the current thread, and it is currently
1843 * running on another CPU, a remote AST must be sent to that CPU to cause
1844 * the thread to migrate to its bound processor. Otherwise, the migration
1845 * will occur at the next quantum expiration or blocking point.
1846 *
1847 * When the thread is the current thread, and explicit thread_block() should
1848 * be used to force the current processor to context switch away and
1849 * let the thread migrate to the bound processor.
1850 *
1851 * Thread must be locked, and at splsched.
1852 */
1853
1854 static processor_t
thread_bind_internal(thread_t thread,processor_t processor)1855 thread_bind_internal(
1856 thread_t thread,
1857 processor_t processor)
1858 {
1859 processor_t prev;
1860
1861 /* <rdar://problem/15102234> */
1862 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1863 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1864 thread_assert_runq_null(thread);
1865
1866 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND),
1867 thread_tid(thread), processor ? processor->cpu_id : ~0ul, 0, 0, 0);
1868
1869 prev = thread->bound_processor;
1870 thread->bound_processor = processor;
1871
1872 return prev;
1873 }
1874
1875 /*
1876 * thread_vm_bind_group_add:
1877 *
1878 * The "VM bind group" is a special mechanism to mark a collection
1879 * of threads from the VM subsystem that, in general, should be scheduled
1880 * with only one CPU of parallelism. To accomplish this, we initially
1881 * bind all the threads to the master processor, which has the effect
1882 * that only one of the threads in the group can execute at once, including
1883 * preempting threads in the group that are a lower priority. Future
1884 * mechanisms may use more dynamic mechanisms to prevent the collection
1885 * of VM threads from using more CPU time than desired.
1886 *
1887 * The current implementation can result in priority inversions where
1888 * compute-bound priority 95 or realtime threads that happen to have
1889 * landed on the master processor prevent the VM threads from running.
1890 * When this situation is detected, we unbind the threads for one
1891 * scheduler tick to allow the scheduler to run the threads an
1892 * additional CPUs, before restoring the binding (assuming high latency
1893 * is no longer a problem).
1894 */
1895
1896 /*
1897 * The current max is provisioned for:
1898 * vm_compressor_swap_trigger_thread (92)
1899 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1900 * vm_pageout_continue (92)
1901 * memorystatus_thread (95)
1902 */
1903 #define MAX_VM_BIND_GROUP_COUNT (5)
1904 decl_simple_lock_data(static, sched_vm_group_list_lock);
1905 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1906 static int sched_vm_group_thread_count;
1907 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1908
1909 void
thread_vm_bind_group_add(void)1910 thread_vm_bind_group_add(void)
1911 {
1912 thread_t self = current_thread();
1913
1914 if (support_bootcpu_shutdown) {
1915 /*
1916 * Bind group is not supported without an always-on
1917 * processor to bind to. If we need these to coexist,
1918 * we'd need to dynamically move the group to
1919 * another processor as it shuts down, or build
1920 * a different way to run a set of threads
1921 * without parallelism.
1922 */
1923 return;
1924 }
1925
1926 thread_reference(self);
1927 self->options |= TH_OPT_SCHED_VM_GROUP;
1928
1929 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1930 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1931 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1932 simple_unlock(&sched_vm_group_list_lock);
1933
1934 thread_bind(master_processor);
1935
1936 /* Switch to bound processor if not already there */
1937 thread_block(THREAD_CONTINUE_NULL);
1938 }
1939
1940 static void
sched_vm_group_maintenance(void)1941 sched_vm_group_maintenance(void)
1942 {
1943 uint64_t ctime = mach_absolute_time();
1944 uint64_t longtime = ctime - sched_tick_interval;
1945 int i;
1946 spl_t s;
1947 boolean_t high_latency_observed = FALSE;
1948 boolean_t runnable_and_not_on_runq_observed = FALSE;
1949 boolean_t bind_target_changed = FALSE;
1950 processor_t bind_target = PROCESSOR_NULL;
1951
1952 /* Make sure nobody attempts to add new threads while we are enumerating them */
1953 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1954
1955 s = splsched();
1956
1957 for (i = 0; i < sched_vm_group_thread_count; i++) {
1958 thread_t thread = sched_vm_group_thread_list[i];
1959 assert(thread != THREAD_NULL);
1960 thread_lock(thread);
1961 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
1962 if (thread_get_runq(thread) != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1963 high_latency_observed = TRUE;
1964 } else if (thread_get_runq(thread) == PROCESSOR_NULL) {
1965 /* There are some cases where a thread be transitiong that also fall into this case */
1966 runnable_and_not_on_runq_observed = TRUE;
1967 }
1968 }
1969 thread_unlock(thread);
1970
1971 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1972 /* All the things we are looking for are true, stop looking */
1973 break;
1974 }
1975 }
1976
1977 splx(s);
1978
1979 if (sched_vm_group_temporarily_unbound) {
1980 /* If we turned off binding, make sure everything is OK before rebinding */
1981 if (!high_latency_observed) {
1982 /* rebind */
1983 bind_target_changed = TRUE;
1984 bind_target = master_processor;
1985 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1986 }
1987 } else {
1988 /*
1989 * Check if we're in a bad state, which is defined by high
1990 * latency with no core currently executing a thread. If a
1991 * single thread is making progress on a CPU, that means the
1992 * binding concept to reduce parallelism is working as
1993 * designed.
1994 */
1995 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1996 /* unbind */
1997 bind_target_changed = TRUE;
1998 bind_target = PROCESSOR_NULL;
1999 sched_vm_group_temporarily_unbound = TRUE;
2000 }
2001 }
2002
2003 if (bind_target_changed) {
2004 s = splsched();
2005 for (i = 0; i < sched_vm_group_thread_count; i++) {
2006 thread_t thread = sched_vm_group_thread_list[i];
2007 boolean_t removed;
2008 assert(thread != THREAD_NULL);
2009
2010 thread_lock(thread);
2011 removed = thread_run_queue_remove(thread);
2012 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
2013 thread_bind_internal(thread, bind_target);
2014 } else {
2015 /*
2016 * Thread was in the middle of being context-switched-to,
2017 * or was in the process of blocking. To avoid switching the bind
2018 * state out mid-flight, defer the change if possible.
2019 */
2020 if (bind_target == PROCESSOR_NULL) {
2021 thread_bind_internal(thread, bind_target);
2022 } else {
2023 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
2024 }
2025 }
2026
2027 if (removed) {
2028 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
2029 }
2030 thread_unlock(thread);
2031 }
2032 splx(s);
2033 }
2034
2035 simple_unlock(&sched_vm_group_list_lock);
2036 }
2037
2038 #if defined(__x86_64__)
2039 #define SCHED_AVOID_CPU0 1
2040 #else
2041 #define SCHED_AVOID_CPU0 0
2042 #endif
2043
2044 int sched_avoid_cpu0 = SCHED_AVOID_CPU0;
2045 int sched_backup_cpu_timeout_count = 5; /* The maximum number of 10us delays to wait before using a backup cpu */
2046 int sched_rt_n_backup_processors = SCHED_DEFAULT_BACKUP_PROCESSORS;
2047
2048 int
sched_get_rt_n_backup_processors(void)2049 sched_get_rt_n_backup_processors(void)
2050 {
2051 return sched_rt_n_backup_processors;
2052 }
2053
2054 void
sched_set_rt_n_backup_processors(int n)2055 sched_set_rt_n_backup_processors(int n)
2056 {
2057 if (n < 0) {
2058 n = 0;
2059 } else if (n > SCHED_MAX_BACKUP_PROCESSORS) {
2060 n = SCHED_MAX_BACKUP_PROCESSORS;
2061 }
2062
2063 sched_rt_n_backup_processors = n;
2064 }
2065
2066 /*
2067 * Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
2068 * rebalancing opportunity exists when a core is (instantaneously) idle, but
2069 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
2070 * IPI thrash if this core does not remain idle following the load balancing ASTs
2071 * Idle "thrash", when IPI issue is followed by idle entry/core power down
2072 * followed by a wakeup shortly thereafter.
2073 */
2074
2075 #if (DEVELOPMENT || DEBUG)
2076 int sched_smt_balance = 1;
2077 #endif
2078
2079 #if CONFIG_SCHED_SMT
2080 /* Invoked with pset locked, returns with pset unlocked */
2081 bool
sched_SMT_balance(processor_t cprocessor,processor_set_t cpset)2082 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
2083 {
2084 processor_t ast_processor = NULL;
2085
2086 #if (DEVELOPMENT || DEBUG)
2087 if (__improbable(sched_smt_balance == 0)) {
2088 goto smt_balance_exit;
2089 }
2090 #endif
2091
2092 assert(cprocessor == current_processor());
2093 if (cprocessor->is_SMT == FALSE) {
2094 goto smt_balance_exit;
2095 }
2096
2097 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
2098
2099 /* Determine if both this processor and its sibling are idle,
2100 * indicating an SMT rebalancing opportunity.
2101 */
2102 if (sib_processor->state != PROCESSOR_IDLE) {
2103 goto smt_balance_exit;
2104 }
2105
2106 processor_t sprocessor;
2107
2108 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2109 uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
2110 ~cpset->primary_map);
2111 for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
2112 sprocessor = processor_array[cpuid];
2113 if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
2114 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
2115 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2116 if (ipi_type != SCHED_IPI_NONE) {
2117 assert(sprocessor != cprocessor);
2118 ast_processor = sprocessor;
2119 break;
2120 }
2121 }
2122 }
2123
2124 smt_balance_exit:
2125 pset_unlock(cpset);
2126
2127 if (ast_processor) {
2128 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
2129 sched_ipi_perform(ast_processor, ipi_type);
2130 }
2131 return false;
2132 }
2133 #else /* CONFIG_SCHED_SMT */
2134 /* Invoked with pset locked, returns with pset unlocked */
2135 bool
sched_SMT_balance(__unused processor_t cprocessor,__unused processor_set_t cpset)2136 sched_SMT_balance(__unused processor_t cprocessor, __unused processor_set_t cpset)
2137 {
2138 pset_unlock(cpset);
2139 return false;
2140 }
2141 #endif /* CONFIG_SCHED_SMT */
2142
2143 int
pset_available_cpu_count(processor_set_t pset)2144 pset_available_cpu_count(processor_set_t pset)
2145 {
2146 return bit_count(pset_available_cpumap(pset));
2147 }
2148
2149 bool
pset_is_recommended(processor_set_t pset)2150 pset_is_recommended(processor_set_t pset)
2151 {
2152 if (!pset) {
2153 return false;
2154 }
2155 return pset_available_cpu_count(pset) > 0;
2156 }
2157
2158 bool
pset_type_is_recommended(processor_set_t pset)2159 pset_type_is_recommended(processor_set_t pset)
2160 {
2161 if (!pset) {
2162 return false;
2163 }
2164 pset_map_t recommended_psets = os_atomic_load(&pset->node->pset_recommended_map, relaxed);
2165 return bit_count(recommended_psets) > 0;
2166 }
2167
2168 static cpumap_t
pset_available_but_not_running_cpumap(processor_set_t pset)2169 pset_available_but_not_running_cpumap(processor_set_t pset)
2170 {
2171 return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
2172 pset->recommended_bitmask;
2173 }
2174
2175 bool
pset_has_stealable_threads(processor_set_t pset)2176 pset_has_stealable_threads(processor_set_t pset)
2177 {
2178 pset_assert_locked(pset);
2179
2180 cpumap_t avail_map = pset_available_but_not_running_cpumap(pset);
2181 #if CONFIG_SCHED_SMT
2182 /*
2183 * Secondary CPUs never steal, so allow stealing of threads if there are more threads than
2184 * available primary CPUs
2185 */
2186 avail_map &= pset->primary_map;
2187 #endif /* CONFIG_SCHED_SMT */
2188
2189 return (pset->pset_runq.count > 0) && ((pset->pset_runq.count + rt_runq_count(pset)) > bit_count(avail_map));
2190 }
2191
2192 static void
clear_pending_AST_bits(processor_set_t pset,processor_t processor,__kdebug_only const int trace_point_number)2193 clear_pending_AST_bits(processor_set_t pset, processor_t processor, __kdebug_only const int trace_point_number)
2194 {
2195 /* Acknowledge any pending IPIs here with pset lock held */
2196 pset_assert_locked(pset);
2197 if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2198 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END,
2199 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, trace_point_number);
2200 }
2201 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2202
2203 #if defined(CONFIG_SCHED_DEFERRED_AST)
2204 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
2205 #endif
2206 }
2207
2208 /*
2209 * Called with pset locked, on a processor that is committing to run a new thread
2210 * Will transition an idle or dispatching processor to running as it picks up
2211 * the first new thread from the idle thread.
2212 */
2213 static void
pset_commit_processor_to_new_thread(processor_set_t pset,processor_t processor,thread_t new_thread)2214 pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
2215 {
2216 pset_assert_locked(pset);
2217
2218 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2219 assert(current_thread() == processor->idle_thread);
2220
2221 /*
2222 * Dispatching processor is now committed to running new_thread,
2223 * so change its state to PROCESSOR_RUNNING.
2224 */
2225 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
2226 } else {
2227 assert(processor->state == PROCESSOR_RUNNING);
2228 }
2229
2230 processor_state_update_from_thread(processor, new_thread, true);
2231
2232 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2233 bit_set(pset->realtime_map, processor->cpu_id);
2234 } else {
2235 bit_clear(pset->realtime_map, processor->cpu_id);
2236 }
2237 pset_update_rt_stealable_state(pset);
2238
2239 pset_node_t node = pset->node;
2240
2241 if (bit_count(node->pset_map) == 1) {
2242 /* Node has only a single pset, so skip node pset map updates */
2243 return;
2244 }
2245
2246 cpumap_t avail_map = pset_available_cpumap(pset);
2247
2248 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2249 if ((avail_map & pset->realtime_map) == avail_map) {
2250 /* No more non-RT CPUs in this pset */
2251 atomic_bit_clear(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2252 }
2253 #if CONFIG_SCHED_SMT
2254 avail_map &= pset->primary_map;
2255 if ((avail_map & pset->realtime_map) == avail_map) {
2256 /* No more non-RT primary CPUs in this pset */
2257 atomic_bit_clear(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2258 }
2259 #endif /* CONFIG_SCHED_SMT */
2260 } else {
2261 if ((avail_map & pset->realtime_map) != avail_map) {
2262 if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
2263 atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2264 }
2265 }
2266 #if CONFIG_SCHED_SMT
2267 avail_map &= pset->primary_map;
2268 if ((avail_map & pset->realtime_map) != avail_map) {
2269 if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
2270 atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2271 }
2272 }
2273 #endif /* CONFIG_SCHED_SMT */
2274 }
2275 }
2276
2277 #if CONFIG_SCHED_SMT
2278 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups);
2279 static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups);
2280 #else /* !CONFIG_SCHED_SMT */
2281 processor_t pset_choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool skip_spills);
2282 #endif /* !CONFIG_SCHED_SMT */
2283 static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup);
2284
2285 static bool
other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset,uint64_t earliest_deadline)2286 other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset, uint64_t earliest_deadline)
2287 {
2288 pset_map_t pset_map = stealing_pset->node->pset_map;
2289
2290 bit_clear(pset_map, stealing_pset->pset_id);
2291
2292 for (int pset_id = lsb_first(pset_map); pset_id >= 0; pset_id = lsb_next(pset_map, pset_id)) {
2293 processor_set_t nset = pset_array[pset_id];
2294
2295 if (rt_deadline_add(os_atomic_load(&nset->stealable_rt_threads_earliest_deadline, relaxed), rt_deadline_epsilon) < earliest_deadline) {
2296 return true;
2297 }
2298 }
2299
2300 return false;
2301 }
2302
2303 /*
2304 * backup processor - used by choose_processor to send a backup IPI to in case the preferred processor can't immediately respond
2305 * followup processor - used in thread_select when there are still threads on the run queue and available processors
2306 * spill processor - a processor in a different processor set that is signalled to steal a thread from this run queue
2307 */
2308 typedef enum {
2309 none,
2310 backup,
2311 followup,
2312 spill
2313 } next_processor_type_t;
2314
2315 #undef LOOP_COUNT
2316 #ifdef LOOP_COUNT
2317 int max_loop_count[MAX_SCHED_CPUS] = { 0 };
2318 #endif
2319
2320 /*
2321 * thread_select:
2322 *
2323 * Select a new thread for the current processor to execute.
2324 *
2325 * May select the current thread, which must be locked.
2326 */
2327 static thread_t
thread_select(thread_t thread,processor_t processor,ast_t * reason)2328 thread_select(thread_t thread,
2329 processor_t processor,
2330 ast_t *reason)
2331 {
2332 processor_set_t pset = processor->processor_set;
2333 thread_t new_thread = THREAD_NULL;
2334
2335 assert(processor == current_processor());
2336 assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2337
2338 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_START,
2339 0, pset->pending_AST_URGENT_cpu_mask, 0, 0);
2340
2341 __kdebug_only int idle_reason = 0;
2342 __kdebug_only int delay_count = 0;
2343
2344 #if CONFIG_SCHED_SMT
2345 int timeout_count = sched_backup_cpu_timeout_count;
2346 if ((sched_avoid_cpu0 == 1) && (processor->cpu_id == 0)) {
2347 /* Prefer cpu0 as backup */
2348 timeout_count--;
2349 } else if ((sched_avoid_cpu0 == 2) && (processor->processor_primary != processor)) {
2350 /* Prefer secondary cpu as backup */
2351 timeout_count--;
2352 }
2353 #endif /* CONFIG_SCHED_SMT */
2354 bool pending_AST_URGENT = false;
2355 bool pending_AST_PREEMPT = false;
2356
2357 #ifdef LOOP_COUNT
2358 int loop_count = -1;
2359 #endif
2360
2361 do {
2362 /*
2363 * Update the priority.
2364 */
2365 if (SCHED(can_update_priority)(thread)) {
2366 SCHED(update_priority)(thread);
2367 }
2368
2369 pset_lock(pset);
2370
2371 restart:
2372 #ifdef LOOP_COUNT
2373 loop_count++;
2374 if (loop_count > max_loop_count[processor->cpu_id]) {
2375 max_loop_count[processor->cpu_id] = loop_count;
2376 if (bit_count(loop_count) == 1) {
2377 kprintf("[%d]%s>max_loop_count = %d\n", processor->cpu_id, __FUNCTION__, loop_count);
2378 }
2379 }
2380 #endif
2381 pending_AST_URGENT = bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
2382 pending_AST_PREEMPT = bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2383
2384 processor_state_update_from_thread(processor, thread, true);
2385
2386 idle_reason = 0;
2387
2388 processor_t ast_processor = PROCESSOR_NULL;
2389 processor_t next_rt_processor = PROCESSOR_NULL;
2390 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2391 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2392
2393 assert(processor->state != PROCESSOR_OFF_LINE);
2394
2395 /*
2396 * Bound threads are dispatched to a processor without going through
2397 * choose_processor(), so in those cases we must continue trying to dequeue work
2398 * as we are the only option.
2399 */
2400 if (!SCHED(processor_bound_count)(processor)) {
2401 if (!processor->is_recommended) {
2402 /*
2403 * The performance controller has provided a hint to not dispatch more threads,
2404 */
2405 idle_reason = 1;
2406 goto send_followup_ipi_before_idle;
2407 } else if (rt_runq_count(pset)) {
2408 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, false);
2409 /* Give the current RT thread a chance to complete */
2410 ok_to_run_realtime_thread |= (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice);
2411 #if CONFIG_SCHED_SMT
2412 /*
2413 * On Intel we want to avoid SMT secondary processors and processor 0
2414 * but allow them to be used as backup processors in case the preferred chosen
2415 * processor is delayed by interrupts or processor stalls. So if it is
2416 * not ok_to_run_realtime_thread as preferred (sched_ok_to_run_realtime_thread(pset, processor, as_backup=false))
2417 * but ok_to_run_realtime_thread as backup (sched_ok_to_run_realtime_thread(pset, processor, as_backup=true))
2418 * we delay up to (timeout_count * 10us) to give the preferred processor chance
2419 * to grab the thread before the (current) backup processor does.
2420 *
2421 * timeout_count defaults to 5 but can be tuned using sysctl kern.sched_backup_cpu_timeout_count
2422 * on DEVELOPMENT || DEBUG kernels. It is also adjusted (see above) depending on whether we want to use
2423 * cpu0 before secondary cpus or not.
2424 */
2425 if (!ok_to_run_realtime_thread) {
2426 if (sched_ok_to_run_realtime_thread(pset, processor, true)) {
2427 if (timeout_count-- > 0) {
2428 pset_unlock(pset);
2429 thread_unlock(thread);
2430 delay(10);
2431 delay_count++;
2432 thread_lock(thread);
2433 pset_lock(pset);
2434 goto restart;
2435 }
2436 ok_to_run_realtime_thread = true;
2437 }
2438 }
2439 #endif /* CONFIG_SCHED_SMT */
2440 if (!ok_to_run_realtime_thread) {
2441 idle_reason = 2;
2442 goto send_followup_ipi_before_idle;
2443 }
2444 }
2445 #if CONFIG_SCHED_SMT
2446 else if (processor->processor_primary != processor) {
2447 /*
2448 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
2449 * we should look for work only under the same conditions that choose_processor()
2450 * would have assigned work, which is when all primary processors have been assigned work.
2451 */
2452 if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
2453 /* There are idle primaries */
2454 idle_reason = 3;
2455 goto idle;
2456 }
2457 }
2458 #endif /* CONFIG_SCHED_SMT */
2459 }
2460
2461 /*
2462 * Test to see if the current thread should continue
2463 * to run on this processor. Must not be attempting to wait, and not
2464 * bound to a different processor, nor be in the wrong
2465 * processor set, nor be forced to context switch by TH_SUSP.
2466 *
2467 * Note that there are never any RT threads in the regular runqueue.
2468 *
2469 * This code is very insanely tricky.
2470 */
2471
2472 /* i.e. not waiting, not TH_SUSP'ed */
2473 bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
2474
2475 /*
2476 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
2477 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
2478 * <rdar://problem/47907700>
2479 *
2480 * A yielding thread shouldn't be forced to context switch.
2481 */
2482
2483 bool is_yielding = (*reason & AST_YIELD) == AST_YIELD;
2484
2485 #if CONFIG_SCHED_SMT
2486 bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
2487 #endif /* CONFIG_SCHED_SMT */
2488
2489 bool affinity_mismatch = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
2490
2491 bool bound_elsewhere = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
2492
2493 bool avoid_processor = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread, *reason);
2494
2495 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, true);
2496
2497 bool current_thread_can_keep_running = (
2498 still_running
2499 #if CONFIG_SCHED_SMT
2500 && !needs_smt_rebalance
2501 #endif /* CONFIG_SCHED_SMT */
2502 && !affinity_mismatch
2503 && !bound_elsewhere
2504 && !avoid_processor);
2505 if (current_thread_can_keep_running) {
2506 /*
2507 * This thread is eligible to keep running on this processor.
2508 *
2509 * RT threads with un-expired quantum stay on processor,
2510 * unless there's a valid RT thread with an earlier deadline
2511 * and it is still ok_to_run_realtime_thread.
2512 */
2513 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
2514 /*
2515 * Pick a new RT thread only if ok_to_run_realtime_thread
2516 * (but the current thread is allowed to complete).
2517 */
2518 if (ok_to_run_realtime_thread) {
2519 if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
2520 goto pick_new_rt_thread;
2521 }
2522 if (rt_runq_priority(pset) > thread->sched_pri) {
2523 if (sched_rt_runq_strict_priority) {
2524 /* The next RT thread is better, so pick it off the runqueue. */
2525 goto pick_new_rt_thread;
2526 }
2527
2528 /*
2529 * See if the current lower priority thread can continue to run without causing
2530 * the higher priority thread on the runq queue to miss its deadline.
2531 */
2532 thread_t hi_thread = rt_runq_first(&pset->rt_runq);
2533 if (thread->realtime.computation + hi_thread->realtime.computation + rt_deadline_epsilon >= hi_thread->realtime.constraint) {
2534 /* The next RT thread is better, so pick it off the runqueue. */
2535 goto pick_new_rt_thread;
2536 }
2537 } else if ((rt_runq_count(pset) > 0) && (rt_deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < thread->realtime.deadline)) {
2538 /* The next RT thread is better, so pick it off the runqueue. */
2539 goto pick_new_rt_thread;
2540 }
2541 if (other_psets_have_earlier_rt_threads_pending(pset, thread->realtime.deadline)) {
2542 goto pick_new_rt_thread;
2543 }
2544 }
2545
2546 /* This is still the best RT thread to run. */
2547 processor->deadline = thread->realtime.deadline;
2548
2549 sched_update_pset_load_average(pset, 0);
2550
2551 clear_pending_AST_bits(pset, processor, 1);
2552
2553 next_rt_processor = PROCESSOR_NULL;
2554 next_rt_ipi_type = SCHED_IPI_NONE;
2555
2556 bool pset_unlocked = false;
2557 next_processor_type_t nptype = none;
2558 #if CONFIG_SCHED_EDGE
2559 if (rt_pset_has_stealable_threads(pset)) {
2560 nptype = spill;
2561 pset_unlocked = rt_choose_next_processor_for_spill_IPI(pset, processor, &next_rt_processor, &next_rt_ipi_type);
2562 }
2563 #endif /* CONFIG_SCHED_EDGE */
2564 if (nptype == none && rt_pset_needs_a_followup_IPI(pset)) {
2565 nptype = followup;
2566 rt_choose_next_processor_for_followup_IPI(pset, processor, &next_rt_processor, &next_rt_ipi_type);
2567 }
2568 if (!pset_unlocked) {
2569 pset_unlock(pset);
2570 }
2571
2572 if (next_rt_processor) {
2573 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
2574 next_rt_processor->cpu_id, next_rt_processor->state, nptype, 2);
2575 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2576 }
2577
2578 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2579 (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 1);
2580 return thread;
2581 }
2582
2583 if ((rt_runq_count(pset) == 0) &&
2584 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
2585 /* This thread is still the highest priority runnable (non-idle) thread */
2586 processor->deadline = RT_DEADLINE_NONE;
2587
2588 sched_update_pset_load_average(pset, 0);
2589
2590 clear_pending_AST_bits(pset, processor, 2);
2591
2592 pset_unlock(pset);
2593
2594 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2595 (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 2);
2596 return thread;
2597 }
2598 } else {
2599 /*
2600 * This processor must context switch.
2601 * If it's due to a rebalance, we should aggressively find this thread a new home.
2602 */
2603 bool ast_rebalance = affinity_mismatch || bound_elsewhere || avoid_processor;
2604 #if CONFIG_SCHED_SMT
2605 ast_rebalance = ast_rebalance || needs_smt_rebalance;
2606 #endif /* CONFIG_SCHED_SMT */
2607 if (ast_rebalance) {
2608 *reason |= AST_REBALANCE;
2609 }
2610 }
2611
2612 #if CONFIG_SCHED_SMT
2613 bool secondary_forced_idle = ((processor->processor_secondary != PROCESSOR_NULL) &&
2614 (thread_no_smt(thread) || (thread->sched_pri >= BASEPRI_RTQUEUES)) &&
2615 (processor->processor_secondary->state == PROCESSOR_IDLE));
2616 #endif /* CONFIG_SCHED_SMT */
2617
2618 /* OK, so we're not going to run the current thread. Look at the RT queue. */
2619 if (ok_to_run_realtime_thread) {
2620 pick_new_rt_thread:
2621 /* sched_rt_choose_thread may drop and re-take the processor's pset lock. */
2622 new_thread = sched_rt_choose_thread(processor);
2623 pset_assert_locked(pset);
2624 if (new_thread != THREAD_NULL) {
2625 processor->deadline = new_thread->realtime.deadline;
2626 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2627
2628 clear_pending_AST_bits(pset, processor, 3);
2629
2630 #if CONFIG_SCHED_SMT
2631 if (processor->processor_secondary != NULL) {
2632 processor_t sprocessor = processor->processor_secondary;
2633 if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2634 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2635 ast_processor = sprocessor;
2636 }
2637 }
2638 #endif /* CONFIG_SCHED_SMT */
2639 }
2640 }
2641
2642 send_followup_ipi_before_idle:
2643 /* This might not have been cleared if we didn't call sched_rt_choose_thread() */
2644 rt_clear_pending_spill(processor, 5);
2645 next_processor_type_t nptype = none;
2646 bool pset_unlocked = false;
2647 #if CONFIG_SCHED_EDGE
2648 if (rt_pset_has_stealable_threads(pset)) {
2649 nptype = spill;
2650 pset_unlocked = rt_choose_next_processor_for_spill_IPI(pset, processor, &next_rt_processor, &next_rt_ipi_type);
2651 }
2652 #endif /* CONFIG_SCHED_EDGE */
2653 if (nptype == none && rt_pset_needs_a_followup_IPI(pset)) {
2654 nptype = followup;
2655 rt_choose_next_processor_for_followup_IPI(pset, processor, &next_rt_processor, &next_rt_ipi_type);
2656 }
2657
2658 assert(new_thread || !ast_processor);
2659 if (new_thread || next_rt_processor) {
2660 if (!pset_unlocked) {
2661 pset_unlock(pset);
2662 pset_unlocked = true;
2663 }
2664 if (ast_processor == next_rt_processor) {
2665 ast_processor = PROCESSOR_NULL;
2666 ipi_type = SCHED_IPI_NONE;
2667 }
2668
2669 if (ast_processor) {
2670 sched_ipi_perform(ast_processor, ipi_type);
2671 }
2672
2673 if (next_rt_processor) {
2674 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
2675 next_rt_processor->cpu_id, next_rt_processor->state, nptype, 3);
2676 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2677 }
2678
2679 if (new_thread) {
2680 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2681 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 3);
2682 return new_thread;
2683 }
2684 }
2685
2686 if (pset_unlocked) {
2687 pset_lock(pset);
2688 }
2689
2690 if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2691 /* Things changed while we dropped the lock */
2692 goto restart;
2693 }
2694
2695 if (processor->is_recommended) {
2696 bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
2697 if (sched_ok_to_run_realtime_thread(pset, processor, true) && (spill_pending || rt_runq_count(pset))) {
2698 /* Things changed while we dropped the lock */
2699 goto restart;
2700 }
2701
2702 #if CONFIG_SCHED_SMT
2703 if ((processor->processor_primary != processor) && (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES)) {
2704 /* secondary can only run realtime thread */
2705 if (idle_reason == 0) {
2706 idle_reason = 4;
2707 }
2708 goto idle;
2709 }
2710 #endif /* CONFIG_SCHED_SMT */
2711 } else if (!SCHED(processor_bound_count)(processor)) {
2712 /* processor not recommended and no bound threads */
2713 if (idle_reason == 0) {
2714 idle_reason = 5;
2715 }
2716 goto idle;
2717 }
2718
2719 processor->deadline = RT_DEADLINE_NONE;
2720
2721 /* No RT threads, so let's look at the regular threads. */
2722 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, current_thread_can_keep_running ? thread : THREAD_NULL, *reason)) != THREAD_NULL) {
2723 if (new_thread != thread) {
2724 /* Going to context-switch */
2725 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2726
2727 clear_pending_AST_bits(pset, processor, 4);
2728
2729 ast_processor = PROCESSOR_NULL;
2730 ipi_type = SCHED_IPI_NONE;
2731
2732 #if CONFIG_SCHED_SMT
2733 processor_t sprocessor = processor->processor_secondary;
2734 if (sprocessor != NULL) {
2735 if (sprocessor->state == PROCESSOR_RUNNING) {
2736 if (thread_no_smt(new_thread)) {
2737 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2738 ast_processor = sprocessor;
2739 }
2740 } else if (secondary_forced_idle && !thread_no_smt(new_thread) && pset_has_stealable_threads(pset)) {
2741 ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_PREEMPT);
2742 ast_processor = sprocessor;
2743 }
2744 }
2745 #endif /* CONFIG_SCHED_SMT */
2746
2747 pset_unlock(pset);
2748
2749 if (ast_processor) {
2750 sched_ipi_perform(ast_processor, ipi_type);
2751 }
2752 } else {
2753 /* Will continue running the current thread */
2754 clear_pending_AST_bits(pset, processor, 4);
2755 pset_unlock(pset);
2756 }
2757
2758 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2759 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 4);
2760 return new_thread;
2761 }
2762
2763 if (processor->must_idle) {
2764 processor->must_idle = false;
2765 *reason |= AST_REBALANCE;
2766 idle_reason = 6;
2767 goto idle;
2768 }
2769
2770 if (SCHED(steal_thread_enabled)(pset)
2771 #if CONFIG_SCHED_SMT
2772 && (processor->processor_primary == processor)
2773 #endif /* CONFIG_SCHED_SMT */
2774 ) {
2775 /*
2776 * No runnable threads, attempt to steal
2777 * from other processors. Returns with pset lock dropped.
2778 */
2779
2780 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
2781 pset_lock(pset);
2782 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2783 if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2784 /*
2785 * A realtime thread choose this processor while it was DISPATCHING
2786 * and the pset lock was dropped
2787 */
2788 ast_on(AST_URGENT | AST_PREEMPT);
2789 }
2790
2791 clear_pending_AST_bits(pset, processor, 5);
2792
2793 pset_unlock(pset);
2794
2795 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2796 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 5);
2797 return new_thread;
2798 }
2799
2800 /*
2801 * If other threads have appeared, shortcut
2802 * around again.
2803 */
2804 if (SCHED(processor_bound_count)(processor)) {
2805 continue;
2806 }
2807 if (processor->is_recommended) {
2808 if (!SCHED(processor_queue_empty)(processor) || (sched_ok_to_run_realtime_thread(pset, processor, true) && (rt_runq_count(pset) > 0))) {
2809 continue;
2810 }
2811 }
2812
2813 pset_lock(pset);
2814 }
2815
2816 idle:
2817 /* Someone selected this processor while we had dropped the lock */
2818 if ((!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) ||
2819 (!pending_AST_PREEMPT && bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id))) {
2820 goto restart;
2821 }
2822
2823 if ((idle_reason == 0) && current_thread_can_keep_running) {
2824 /* This thread is the only runnable (non-idle) thread */
2825 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2826 processor->deadline = thread->realtime.deadline;
2827 } else {
2828 processor->deadline = RT_DEADLINE_NONE;
2829 }
2830
2831 sched_update_pset_load_average(pset, 0);
2832
2833 clear_pending_AST_bits(pset, processor, 6);
2834
2835 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2836 (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 6);
2837 pset_unlock(pset);
2838 return thread;
2839 }
2840
2841 /*
2842 * Nothing is runnable, or this processor must be forced idle,
2843 * so set this processor idle if it was running.
2844 */
2845 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
2846 pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
2847 processor_state_update_idle(processor);
2848 }
2849 pset_update_rt_stealable_state(pset);
2850
2851 clear_pending_AST_bits(pset, processor, 7);
2852
2853 /* Invoked with pset locked, returns with pset unlocked */
2854 processor->next_idle_short = SCHED(processor_balance)(processor, pset);
2855
2856 new_thread = processor->idle_thread;
2857 } while (new_thread == THREAD_NULL);
2858
2859 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2860 (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 10 + idle_reason);
2861 return new_thread;
2862 }
2863
2864 /*
2865 * thread_invoke
2866 *
2867 * Called at splsched with neither thread locked.
2868 *
2869 * Perform a context switch and start executing the new thread.
2870 *
2871 * Returns FALSE when the context switch didn't happen.
2872 * The reference to the new thread is still consumed.
2873 *
2874 * "self" is what is currently running on the processor,
2875 * "thread" is the new thread to context switch to
2876 * (which may be the same thread in some cases)
2877 */
2878 static boolean_t
thread_invoke(thread_t self,thread_t thread,ast_t reason)2879 thread_invoke(
2880 thread_t self,
2881 thread_t thread,
2882 ast_t reason)
2883 {
2884 if (__improbable(get_preemption_level() != 0)) {
2885 int pl = get_preemption_level();
2886 panic("thread_invoke: preemption_level %d, possible cause: %s",
2887 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2888 "blocking while holding a spinlock, or within interrupt context"));
2889 }
2890
2891 thread_continue_t continuation = self->continuation;
2892 void *parameter = self->parameter;
2893
2894 struct recount_snap snap = { 0 };
2895 recount_snapshot(&snap);
2896 uint64_t ctime = snap.rsn_time_mach;
2897
2898 check_monotonic_time(ctime);
2899
2900 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2901 commpage_update_mach_approximate_time(ctime);
2902 #endif
2903
2904 if (ctime < thread->last_made_runnable_time) {
2905 panic("Non-monotonic time: invoke at 0x%llx, runnable at 0x%llx",
2906 ctime, thread->last_made_runnable_time);
2907 }
2908
2909 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2910 if (!((thread->state & TH_IDLE) != 0 ||
2911 ((reason & AST_HANDOFF) && self->sched_mode == TH_MODE_REALTIME))) {
2912 sched_timeshare_consider_maintenance(ctime, true);
2913 }
2914 #endif
2915
2916 recount_log_switch_thread(&snap);
2917
2918 processor_t processor = current_processor();
2919
2920 if (!processor->processor_online) {
2921 panic("Invalid attempt to context switch an offline processor");
2922 }
2923
2924 assert_thread_magic(self);
2925 assert(self == current_thread());
2926 thread_assert_runq_null(self);
2927 assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2928
2929 thread_lock(thread);
2930
2931 assert_thread_magic(thread);
2932 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
2933 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor);
2934 thread_assert_runq_null(thread);
2935
2936 /* Update SFI class based on other factors */
2937 thread->sfi_class = sfi_thread_classify(thread);
2938
2939 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2940 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2941 /*
2942 * In case a base_pri update happened between the timestamp and
2943 * taking the thread lock
2944 */
2945 if (ctime <= thread->last_basepri_change_time) {
2946 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
2947 }
2948
2949 /* Allow realtime threads to hang onto a stack. */
2950 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
2951 self->reserved_stack = self->kernel_stack;
2952 }
2953
2954 /* Prepare for spin debugging */
2955 #if SCHED_HYGIENE_DEBUG
2956 ml_spin_debug_clear(thread);
2957 #endif
2958
2959 if (continuation != NULL) {
2960 if (!thread->kernel_stack) {
2961 /*
2962 * If we are using a privileged stack,
2963 * check to see whether we can exchange it with
2964 * that of the other thread.
2965 */
2966 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
2967 goto need_stack;
2968 }
2969
2970 /*
2971 * Context switch by performing a stack handoff.
2972 * Requires both threads to be parked in a continuation.
2973 */
2974 continuation = thread->continuation;
2975 parameter = thread->parameter;
2976
2977 processor->active_thread = thread;
2978 processor_state_update_from_thread(processor, thread, false);
2979
2980 if (thread->last_processor != processor && thread->last_processor != NULL) {
2981 if (thread->last_processor->processor_set != processor->processor_set) {
2982 thread->ps_switch++;
2983 }
2984 thread->p_switch++;
2985 }
2986 thread->last_processor = processor;
2987 thread->c_switch++;
2988 ast_context(thread);
2989
2990 thread_unlock(thread);
2991
2992 self->reason = reason;
2993
2994 processor->last_dispatch = ctime;
2995 self->last_run_time = ctime;
2996 timer_update(&thread->runnable_timer, ctime);
2997 recount_switch_thread(&snap, self, get_threadtask(self));
2998
2999 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3000 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
3001 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3002
3003 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
3004 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3005 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3006 }
3007
3008 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3009
3010 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3011
3012 #if KPERF
3013 kperf_off_cpu(self);
3014 #endif /* KPERF */
3015
3016 /*
3017 * This is where we actually switch thread identity,
3018 * and address space if required. However, register
3019 * state is not switched - this routine leaves the
3020 * stack and register state active on the current CPU.
3021 */
3022 TLOG(1, "thread_invoke: calling stack_handoff\n");
3023 stack_handoff(self, thread);
3024
3025 /* 'self' is now off core */
3026 assert(thread == current_thread_volatile());
3027
3028 DTRACE_SCHED(on__cpu);
3029
3030 #if KPERF
3031 kperf_on_cpu(thread, continuation, NULL);
3032 #endif /* KPERF */
3033
3034
3035 recount_log_switch_thread_on(&snap);
3036
3037 thread_dispatch(self, thread);
3038
3039 #if KASAN
3040 /* Old thread's stack has been moved to the new thread, so explicitly
3041 * unpoison it. */
3042 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3043 #endif
3044
3045 thread->continuation = thread->parameter = NULL;
3046
3047 boolean_t enable_interrupts = TRUE;
3048
3049 /* idle thread needs to stay interrupts-disabled */
3050 if ((thread->state & TH_IDLE)) {
3051 enable_interrupts = FALSE;
3052 }
3053
3054 assert(continuation);
3055 call_continuation(continuation, parameter,
3056 thread->wait_result, enable_interrupts);
3057 /*NOTREACHED*/
3058 } else if (thread == self) {
3059 /* same thread but with continuation */
3060 ast_context(self);
3061
3062 thread_unlock(self);
3063
3064 #if KPERF
3065 kperf_on_cpu(thread, continuation, NULL);
3066 #endif /* KPERF */
3067
3068 recount_log_switch_thread_on(&snap);
3069
3070 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3071 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3072 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3073
3074 #if KASAN
3075 /* stack handoff to self - no thread_dispatch(), so clear the stack
3076 * and free the fakestack directly */
3077 #if KASAN_CLASSIC
3078 kasan_fakestack_drop(self);
3079 kasan_fakestack_gc(self);
3080 #endif /* KASAN_CLASSIC */
3081 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
3082 #endif /* KASAN */
3083
3084 self->continuation = self->parameter = NULL;
3085
3086 boolean_t enable_interrupts = TRUE;
3087
3088 /* idle thread needs to stay interrupts-disabled */
3089 if ((self->state & TH_IDLE)) {
3090 enable_interrupts = FALSE;
3091 }
3092
3093 call_continuation(continuation, parameter,
3094 self->wait_result, enable_interrupts);
3095 /*NOTREACHED*/
3096 }
3097 } else {
3098 /*
3099 * Check that the other thread has a stack
3100 */
3101 if (!thread->kernel_stack) {
3102 need_stack:
3103 if (!stack_alloc_try(thread)) {
3104 thread_unlock(thread);
3105 thread_stack_enqueue(thread);
3106 return FALSE;
3107 }
3108 } else if (thread == self) {
3109 ast_context(self);
3110 thread_unlock(self);
3111
3112 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3113 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3114 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3115
3116 return TRUE;
3117 }
3118 }
3119
3120 /*
3121 * Context switch by full context save.
3122 */
3123 processor->active_thread = thread;
3124 processor_state_update_from_thread(processor, thread, false);
3125
3126 if (thread->last_processor != processor && thread->last_processor != NULL) {
3127 if (thread->last_processor->processor_set != processor->processor_set) {
3128 thread->ps_switch++;
3129 }
3130 thread->p_switch++;
3131 }
3132 thread->last_processor = processor;
3133 thread->c_switch++;
3134 ast_context(thread);
3135
3136 thread_unlock(thread);
3137
3138 self->reason = reason;
3139
3140 processor->last_dispatch = ctime;
3141 self->last_run_time = ctime;
3142 timer_update(&thread->runnable_timer, ctime);
3143 recount_switch_thread(&snap, self, get_threadtask(self));
3144
3145 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3146 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3147 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3148
3149 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
3150 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3151 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3152 }
3153
3154 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3155
3156 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3157
3158 #if KPERF
3159 kperf_off_cpu(self);
3160 #endif /* KPERF */
3161
3162 /*
3163 * This is where we actually switch register context,
3164 * and address space if required. We will next run
3165 * as a result of a subsequent context switch.
3166 *
3167 * Once registers are switched and the processor is running "thread",
3168 * the stack variables and non-volatile registers will contain whatever
3169 * was there the last time that thread blocked. No local variables should
3170 * be used after this point, except for the special case of "thread", which
3171 * the platform layer returns as the previous thread running on the processor
3172 * via the function call ABI as a return register, and "self", which may have
3173 * been stored on the stack or a non-volatile register, but a stale idea of
3174 * what was on the CPU is newly-accurate because that thread is again
3175 * running on the CPU.
3176 *
3177 * If one of the threads is using a continuation, thread_continue
3178 * is used to stitch up its context.
3179 *
3180 * If we are invoking a thread which is resuming from a continuation,
3181 * the CPU will invoke thread_continue next.
3182 *
3183 * If the current thread is parking in a continuation, then its state
3184 * won't be saved and the stack will be discarded. When the stack is
3185 * re-allocated, it will be configured to resume from thread_continue.
3186 */
3187
3188 assert(continuation == self->continuation);
3189 thread = machine_switch_context(self, continuation, thread);
3190 assert(self == current_thread_volatile());
3191 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
3192
3193 assert(continuation == NULL && self->continuation == NULL);
3194
3195 DTRACE_SCHED(on__cpu);
3196
3197 #if KPERF
3198 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
3199 #endif /* KPERF */
3200
3201
3202 /* Previous snap on the old stack is gone. */
3203 recount_log_switch_thread_on(NULL);
3204
3205 /* We have been resumed and are set to run. */
3206 thread_dispatch(thread, self);
3207
3208 return TRUE;
3209 }
3210
3211 #if defined(CONFIG_SCHED_DEFERRED_AST)
3212 /*
3213 * pset_cancel_deferred_dispatch:
3214 *
3215 * Cancels all ASTs that we can cancel for the given processor set
3216 * if the current processor is running the last runnable thread in the
3217 * system.
3218 *
3219 * This function assumes the current thread is runnable. This must
3220 * be called with the pset unlocked.
3221 */
3222 static void
pset_cancel_deferred_dispatch(processor_set_t pset,processor_t processor)3223 pset_cancel_deferred_dispatch(
3224 processor_set_t pset,
3225 processor_t processor)
3226 {
3227 processor_t active_processor = NULL;
3228 uint32_t sampled_sched_run_count;
3229
3230 pset_lock(pset);
3231 sampled_sched_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
3232
3233 /*
3234 * If we have emptied the run queue, and our current thread is runnable, we
3235 * should tell any processors that are still DISPATCHING that they will
3236 * probably not have any work to do. In the event that there are no
3237 * pending signals that we can cancel, this is also uninteresting.
3238 *
3239 * In the unlikely event that another thread becomes runnable while we are
3240 * doing this (sched_run_count is atomically updated, not guarded), the
3241 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
3242 * in order to dispatch it to a processor in our pset. So, the other
3243 * codepath will wait while we squash all cancelable ASTs, get the pset
3244 * lock, and then dispatch the freshly runnable thread. So this should be
3245 * correct (we won't accidentally have a runnable thread that hasn't been
3246 * dispatched to an idle processor), if not ideal (we may be restarting the
3247 * dispatch process, which could have some overhead).
3248 */
3249
3250 if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
3251 uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
3252 pset->pending_deferred_AST_cpu_mask &
3253 ~pset->pending_AST_URGENT_cpu_mask);
3254 for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
3255 active_processor = processor_array[cpuid];
3256 /*
3257 * If a processor is DISPATCHING, it could be because of
3258 * a cancelable signal.
3259 *
3260 * IF the processor is not our
3261 * current processor (the current processor should not
3262 * be DISPATCHING, so this is a bit paranoid), AND there
3263 * is a cancelable signal pending on the processor, AND
3264 * there is no non-cancelable signal pending (as there is
3265 * no point trying to backtrack on bringing the processor
3266 * up if a signal we cannot cancel is outstanding), THEN
3267 * it should make sense to roll back the processor state
3268 * to the IDLE state.
3269 *
3270 * If the racey nature of this approach (as the signal
3271 * will be arbitrated by hardware, and can fire as we
3272 * roll back state) results in the core responding
3273 * despite being pushed back to the IDLE state, it
3274 * should be no different than if the core took some
3275 * interrupt while IDLE.
3276 */
3277 if (active_processor != processor) {
3278 /*
3279 * Squash all of the processor state back to some
3280 * reasonable facsimile of PROCESSOR_IDLE.
3281 */
3282
3283 processor_state_update_idle(active_processor);
3284 active_processor->deadline = RT_DEADLINE_NONE;
3285 pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
3286 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
3287 machine_signal_idle_cancel(active_processor);
3288 }
3289 }
3290 }
3291
3292 pset_unlock(pset);
3293 }
3294 #else
3295 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
3296 #endif
3297
3298 static void
thread_csw_callout(thread_t old,thread_t new,uint64_t timestamp)3299 thread_csw_callout(
3300 thread_t old,
3301 thread_t new,
3302 uint64_t timestamp)
3303 {
3304 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
3305 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
3306 machine_switch_perfcontrol_context(event, timestamp, 0,
3307 same_pri_latency, old, new);
3308 }
3309
3310
3311 /*
3312 * thread_dispatch:
3313 *
3314 * Handle threads at context switch. Re-dispatch other thread
3315 * if still running, otherwise update run state and perform
3316 * special actions. Update quantum for other thread and begin
3317 * the quantum for ourselves.
3318 *
3319 * "thread" is the old thread that we have switched away from.
3320 * "self" is the new current thread that we have context switched to
3321 *
3322 * Called at splsched.
3323 *
3324 */
3325 void
thread_dispatch(thread_t thread,thread_t self)3326 thread_dispatch(
3327 thread_t thread,
3328 thread_t self)
3329 {
3330 processor_t processor = self->last_processor;
3331 bool was_idle = false;
3332 bool processor_bootstrap = (thread == THREAD_NULL);
3333
3334 assert(processor == current_processor());
3335 assert(self == current_thread_volatile());
3336 assert(thread != self);
3337
3338 if (thread != THREAD_NULL) {
3339 /*
3340 * Do the perfcontrol callout for context switch.
3341 * The reason we do this here is:
3342 * - thread_dispatch() is called from various places that are not
3343 * the direct context switch path for eg. processor shutdown etc.
3344 * So adding the callout here covers all those cases.
3345 * - We want this callout as early as possible to be close
3346 * to the timestamp taken in thread_invoke()
3347 * - We want to avoid holding the thread lock while doing the
3348 * callout
3349 * - We do not want to callout if "thread" is NULL.
3350 */
3351 thread_csw_callout(thread, self, processor->last_dispatch);
3352
3353 #if KASAN
3354 if (thread->continuation != NULL) {
3355 /*
3356 * Thread has a continuation and the normal stack is going away.
3357 * Unpoison the stack and mark all fakestack objects as unused.
3358 */
3359 #if KASAN_CLASSIC
3360 kasan_fakestack_drop(thread);
3361 #endif /* KASAN_CLASSIC */
3362 if (thread->kernel_stack) {
3363 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3364 }
3365 }
3366
3367
3368 #if KASAN_CLASSIC
3369 /*
3370 * Free all unused fakestack objects.
3371 */
3372 kasan_fakestack_gc(thread);
3373 #endif /* KASAN_CLASSIC */
3374 #endif /* KASAN */
3375
3376 /*
3377 * If blocked at a continuation, discard
3378 * the stack.
3379 */
3380 if (thread->continuation != NULL && thread->kernel_stack != 0) {
3381 stack_free(thread);
3382 }
3383
3384 if (thread->state & TH_IDLE) {
3385 was_idle = true;
3386 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3387 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3388 (uintptr_t)thread_tid(thread), 0, thread->state,
3389 sched_run_buckets[TH_BUCKET_RUN], 0);
3390 } else {
3391 int64_t consumed;
3392 int64_t remainder = 0;
3393
3394 if (processor->quantum_end > processor->last_dispatch) {
3395 remainder = processor->quantum_end -
3396 processor->last_dispatch;
3397 }
3398
3399 consumed = thread->quantum_remaining - remainder;
3400
3401 if ((thread->reason & AST_LEDGER) == 0) {
3402 /*
3403 * Bill CPU time to both the task and
3404 * the individual thread.
3405 */
3406 ledger_credit_thread(thread, thread->t_ledger,
3407 task_ledgers.cpu_time, consumed);
3408 ledger_credit_thread(thread, thread->t_threadledger,
3409 thread_ledgers.cpu_time, consumed);
3410 if (thread->t_bankledger) {
3411 ledger_credit_thread(thread, thread->t_bankledger,
3412 bank_ledgers.cpu_time,
3413 (consumed - thread->t_deduct_bank_ledger_time));
3414 }
3415 thread->t_deduct_bank_ledger_time = 0;
3416 if (consumed > 0) {
3417 /*
3418 * This should never be negative, but in traces we are seeing some instances
3419 * of consumed being negative.
3420 * <rdar://problem/57782596> thread_dispatch() thread CPU consumed calculation sometimes results in negative value
3421 */
3422 sched_update_pset_avg_execution_time(current_processor()->processor_set, consumed, processor->last_dispatch, thread->th_sched_bucket);
3423 }
3424 }
3425
3426 /* For the thread that we just context switched away from, figure
3427 * out if we have expired the wq quantum and set the AST if we have
3428 */
3429 if (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) {
3430 thread_evaluate_workqueue_quantum_expiry(thread);
3431 }
3432
3433 if (__improbable(thread->rwlock_count != 0)) {
3434 smr_mark_active_trackers_stalled(thread);
3435 }
3436
3437 /*
3438 * Pairs with task_restartable_ranges_synchronize
3439 */
3440 wake_lock(thread);
3441 thread_lock(thread);
3442
3443 /*
3444 * Same as ast_check(), in case we missed the IPI
3445 */
3446 thread_reset_pcs_ack_IPI(thread);
3447
3448 /*
3449 * Apply a priority floor if the thread holds a kernel resource
3450 * or explicitly requested it.
3451 * Do this before checking starting_pri to avoid overpenalizing
3452 * repeated rwlock blockers.
3453 */
3454 if (__improbable(thread->rwlock_count != 0)) {
3455 lck_rw_set_promotion_locked(thread);
3456 }
3457 if (__improbable(thread->priority_floor_count != 0)) {
3458 thread_floor_boost_set_promotion_locked(thread);
3459 }
3460
3461 boolean_t keep_quantum = processor->first_timeslice;
3462
3463 /*
3464 * Treat a thread which has dropped priority since it got on core
3465 * as having expired its quantum.
3466 */
3467 if (processor->starting_pri > thread->sched_pri) {
3468 keep_quantum = FALSE;
3469 }
3470
3471 /* Compute remainder of current quantum. */
3472 if (keep_quantum &&
3473 processor->quantum_end > processor->last_dispatch) {
3474 thread->quantum_remaining = (uint32_t)remainder;
3475 } else {
3476 thread->quantum_remaining = 0;
3477 }
3478
3479 if (thread->sched_mode == TH_MODE_REALTIME) {
3480 /*
3481 * Cancel the deadline if the thread has
3482 * consumed the entire quantum.
3483 */
3484 if (thread->quantum_remaining == 0) {
3485 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CANCEL_RT_DEADLINE) | DBG_FUNC_NONE,
3486 (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
3487 thread->realtime.deadline = RT_DEADLINE_QUANTUM_EXPIRED;
3488 }
3489 } else {
3490 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3491 /*
3492 * For non-realtime threads treat a tiny
3493 * remaining quantum as an expired quantum
3494 * but include what's left next time.
3495 */
3496 if (thread->quantum_remaining < min_std_quantum) {
3497 thread->reason |= AST_QUANTUM;
3498 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
3499 }
3500 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3501 }
3502
3503 /*
3504 * If we are doing a direct handoff then
3505 * take the remainder of the quantum.
3506 */
3507 if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
3508 self->quantum_remaining = thread->quantum_remaining;
3509 thread->reason |= AST_QUANTUM;
3510 thread->quantum_remaining = 0;
3511 }
3512
3513 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
3514
3515 if (!(thread->state & TH_WAIT)) {
3516 /*
3517 * Still runnable.
3518 */
3519 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
3520
3521 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
3522
3523 ast_t reason = thread->reason;
3524 sched_options_t options = SCHED_NONE;
3525
3526 if (reason & AST_REBALANCE) {
3527 options |= SCHED_REBALANCE;
3528 if (reason & AST_QUANTUM) {
3529 /*
3530 * Having gone to the trouble of forcing this thread off a less preferred core,
3531 * we should force the preferable core to reschedule immediately to give this
3532 * thread a chance to run instead of just sitting on the run queue where
3533 * it may just be stolen back by the idle core we just forced it off.
3534 * But only do this at the end of a quantum to prevent cascading effects.
3535 */
3536 options |= SCHED_STIR_POT;
3537 }
3538 }
3539
3540 if (reason & AST_QUANTUM) {
3541 options |= SCHED_TAILQ;
3542 } else if (reason & AST_PREEMPT) {
3543 options |= SCHED_HEADQ;
3544 } else {
3545 options |= (SCHED_PREEMPT | SCHED_TAILQ);
3546 }
3547
3548 thread_setrun(thread, options);
3549
3550 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3551 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3552 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
3553 sched_run_buckets[TH_BUCKET_RUN], 0);
3554
3555 if (thread->wake_active) {
3556 thread->wake_active = FALSE;
3557 thread_unlock(thread);
3558
3559 thread_wakeup(&thread->wake_active);
3560 } else {
3561 thread_unlock(thread);
3562 }
3563
3564 wake_unlock(thread);
3565 } else {
3566 /*
3567 * Waiting.
3568 */
3569 boolean_t should_terminate = FALSE;
3570 uint32_t new_run_count;
3571 int thread_state = thread->state;
3572
3573 /* Only the first call to thread_dispatch
3574 * after explicit termination should add
3575 * the thread to the termination queue
3576 */
3577 if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
3578 should_terminate = TRUE;
3579 thread_state |= TH_TERMINATE2;
3580 }
3581
3582 timer_stop(&thread->runnable_timer, processor->last_dispatch);
3583
3584 thread_state &= ~TH_RUN;
3585 thread->state = thread_state;
3586
3587 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
3588 thread->chosen_processor = PROCESSOR_NULL;
3589
3590 new_run_count = SCHED(run_count_decr)(thread);
3591
3592 #if CONFIG_SCHED_AUTO_JOIN
3593 if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0) {
3594 work_interval_auto_join_unwind(thread);
3595 }
3596 #endif /* CONFIG_SCHED_AUTO_JOIN */
3597
3598 #if CONFIG_SCHED_SFI
3599 if (thread->reason & AST_SFI) {
3600 thread->wait_sfi_begin_time = processor->last_dispatch;
3601 }
3602 #endif
3603 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
3604
3605 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3606 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3607 (uintptr_t)thread_tid(thread), thread->reason, thread_state,
3608 new_run_count, 0);
3609
3610 if (thread_state & TH_WAIT_REPORT) {
3611 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
3612 }
3613
3614 if (thread->wake_active) {
3615 thread->wake_active = FALSE;
3616 thread_unlock(thread);
3617
3618 thread_wakeup(&thread->wake_active);
3619 } else {
3620 thread_unlock(thread);
3621 }
3622
3623 wake_unlock(thread);
3624
3625 if (should_terminate) {
3626 thread_terminate_enqueue(thread);
3627 }
3628 }
3629 }
3630 /*
3631 * The thread could have been added to the termination queue, so it's
3632 * unsafe to use after this point.
3633 */
3634 thread = THREAD_NULL;
3635 }
3636
3637 int urgency = THREAD_URGENCY_NONE;
3638 uint64_t latency = 0;
3639
3640 /* Update (new) current thread and reprogram running timers */
3641 thread_lock(self);
3642
3643 if (!(self->state & TH_IDLE)) {
3644 uint64_t arg1, arg2;
3645
3646 #if CONFIG_SCHED_SFI
3647 ast_t new_ast;
3648
3649 new_ast = sfi_thread_needs_ast(self, NULL);
3650
3651 if (new_ast != AST_NONE) {
3652 ast_on(new_ast);
3653 }
3654 #endif
3655
3656 if (processor->last_dispatch < self->last_made_runnable_time) {
3657 panic("Non-monotonic time: dispatch at 0x%llx, runnable at 0x%llx",
3658 processor->last_dispatch, self->last_made_runnable_time);
3659 }
3660
3661 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
3662
3663 latency = processor->last_dispatch - self->last_made_runnable_time;
3664 assert(latency >= self->same_pri_latency);
3665
3666 urgency = thread_get_urgency(self, &arg1, &arg2);
3667
3668 thread_tell_urgency(urgency, arg1, arg2, latency, self);
3669
3670 /*
3671 * Start a new CPU limit interval if the previous one has
3672 * expired. This should happen before initializing a new
3673 * quantum.
3674 */
3675 if (cpulimit_affects_quantum &&
3676 thread_cpulimit_interval_has_expired(processor->last_dispatch)) {
3677 thread_cpulimit_restart(processor->last_dispatch);
3678 }
3679
3680 /*
3681 * Get a new quantum if none remaining.
3682 */
3683 if (self->quantum_remaining == 0) {
3684 thread_quantum_init(self, processor->last_dispatch);
3685 }
3686
3687 /*
3688 * Set up quantum timer and timeslice.
3689 */
3690 processor->quantum_end = processor->last_dispatch +
3691 self->quantum_remaining;
3692
3693 running_timer_setup(processor, RUNNING_TIMER_QUANTUM, self,
3694 processor->quantum_end, processor->last_dispatch);
3695 if (was_idle) {
3696 /*
3697 * kperf's running timer is active whenever the idle thread for a
3698 * CPU is not running.
3699 */
3700 kperf_running_setup(processor, processor->last_dispatch);
3701 }
3702 running_timers_activate(processor);
3703 processor->first_timeslice = TRUE;
3704 } else {
3705 if (!processor_bootstrap) {
3706 running_timers_deactivate(processor);
3707 }
3708 processor->first_timeslice = FALSE;
3709 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
3710 }
3711
3712 assert(self->block_hint == kThreadWaitNone);
3713 self->computation_epoch = processor->last_dispatch;
3714 /*
3715 * This relies on the interrupt time being tallied up to the thread in the
3716 * exception handler epilogue, which is before AST context where preemption
3717 * is considered (and the scheduler is potentially invoked to
3718 * context switch, here).
3719 */
3720 self->computation_interrupt_epoch = recount_current_thread_interrupt_time_mach();
3721 self->reason = AST_NONE;
3722 processor->starting_pri = self->sched_pri;
3723
3724 thread_unlock(self);
3725
3726 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
3727 processor->last_dispatch);
3728
3729 #if defined(CONFIG_SCHED_DEFERRED_AST)
3730 /*
3731 * TODO: Can we state that redispatching our old thread is also
3732 * uninteresting?
3733 */
3734 if ((os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) == 1) && !(self->state & TH_IDLE)) {
3735 pset_cancel_deferred_dispatch(processor->processor_set, processor);
3736 }
3737 #endif
3738 }
3739
3740 /*
3741 * thread_block_reason:
3742 *
3743 * Forces a reschedule, blocking the caller if a wait
3744 * has been asserted.
3745 *
3746 * If a continuation is specified, then thread_invoke will
3747 * attempt to discard the thread's kernel stack. When the
3748 * thread resumes, it will execute the continuation function
3749 * on a new kernel stack.
3750 */
3751 __mockable wait_result_t
thread_block_reason(thread_continue_t continuation,void * parameter,ast_t reason)3752 thread_block_reason(
3753 thread_continue_t continuation,
3754 void *parameter,
3755 ast_t reason)
3756 {
3757 thread_t self = current_thread();
3758 processor_t processor;
3759 thread_t new_thread;
3760 spl_t s;
3761
3762 s = splsched();
3763
3764 processor = current_processor();
3765
3766 /* If we're explicitly yielding, force a subsequent quantum */
3767 if (reason & AST_YIELD) {
3768 processor->first_timeslice = FALSE;
3769 }
3770
3771 /* We're handling all scheduling AST's */
3772 ast_off(AST_SCHEDULING);
3773
3774 clear_pending_nonurgent_preemption(processor);
3775
3776 #if PROC_REF_DEBUG
3777 if ((continuation != NULL) && (get_threadtask(self) != kernel_task)) {
3778 uthread_assert_zero_proc_refcount(get_bsdthread_info(self));
3779 }
3780 #endif
3781
3782 #if CONFIG_EXCLAVES
3783 if (continuation != NULL) {
3784 assert3u(self->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
3785 }
3786 #endif /* CONFIG_EXCLAVES */
3787
3788 self->continuation = continuation;
3789 self->parameter = parameter;
3790
3791 if (self->state & ~(TH_RUN | TH_IDLE)) {
3792 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3793 MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
3794 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
3795 }
3796
3797 do {
3798 thread_lock(self);
3799 new_thread = thread_select(self, processor, &reason);
3800 thread_unlock(self);
3801 } while (!thread_invoke(self, new_thread, reason));
3802
3803 splx(s);
3804
3805 return self->wait_result;
3806 }
3807
3808 /*
3809 * thread_block:
3810 *
3811 * Block the current thread if a wait has been asserted.
3812 */
3813 wait_result_t
thread_block(thread_continue_t continuation)3814 thread_block(
3815 thread_continue_t continuation)
3816 {
3817 return thread_block_reason(continuation, NULL, AST_NONE);
3818 }
3819
3820 wait_result_t
thread_block_parameter(thread_continue_t continuation,void * parameter)3821 thread_block_parameter(
3822 thread_continue_t continuation,
3823 void *parameter)
3824 {
3825 return thread_block_reason(continuation, parameter, AST_NONE);
3826 }
3827
3828 /*
3829 * thread_run:
3830 *
3831 * Switch directly from the current thread to the
3832 * new thread, handing off our quantum if appropriate.
3833 *
3834 * New thread must be runnable, and not on a run queue.
3835 *
3836 * Called at splsched.
3837 */
3838 int
thread_run(thread_t self,thread_continue_t continuation,void * parameter,thread_t new_thread)3839 thread_run(
3840 thread_t self,
3841 thread_continue_t continuation,
3842 void *parameter,
3843 thread_t new_thread)
3844 {
3845 ast_t reason = AST_NONE;
3846
3847 if ((self->state & TH_IDLE) == 0) {
3848 reason = AST_HANDOFF;
3849 }
3850
3851 /* Must not get here without a chosen processor */
3852 assert(new_thread->chosen_processor);
3853
3854 self->continuation = continuation;
3855 self->parameter = parameter;
3856
3857 while (!thread_invoke(self, new_thread, reason)) {
3858 /* the handoff failed, so we have to fall back to the normal block path */
3859 processor_t processor = current_processor();
3860
3861 reason = AST_NONE;
3862
3863 thread_lock(self);
3864 new_thread = thread_select(self, processor, &reason);
3865 thread_unlock(self);
3866 }
3867
3868 return self->wait_result;
3869 }
3870
3871 /*
3872 * thread_continue:
3873 *
3874 * Called at splsched when a thread first receives
3875 * a new stack after a continuation.
3876 *
3877 * Called with THREAD_NULL as the old thread when
3878 * invoked by machine_load_context.
3879 */
3880 void
thread_continue(thread_t thread)3881 thread_continue(
3882 thread_t thread)
3883 {
3884 thread_t self = current_thread();
3885 thread_continue_t continuation;
3886 void *parameter;
3887
3888 DTRACE_SCHED(on__cpu);
3889
3890 continuation = self->continuation;
3891 parameter = self->parameter;
3892
3893 assert(continuation != NULL);
3894
3895 #if KPERF
3896 kperf_on_cpu(self, continuation, NULL);
3897 #endif
3898
3899
3900 thread_dispatch(thread, self);
3901
3902 self->continuation = self->parameter = NULL;
3903
3904 #if SCHED_HYGIENE_DEBUG
3905 /* Reset interrupt-masked spin debugging timeout */
3906 ml_spin_debug_clear(self);
3907 #endif
3908
3909 TLOG(1, "thread_continue: calling call_continuation\n");
3910
3911 boolean_t enable_interrupts = TRUE;
3912
3913 /* bootstrap thread, idle thread need to stay interrupts-disabled */
3914 if (thread == THREAD_NULL || (self->state & TH_IDLE)) {
3915 enable_interrupts = FALSE;
3916 }
3917
3918 #if KASAN_TBI
3919 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
3920 #endif /* KASAN_TBI */
3921
3922
3923 call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
3924 /*NOTREACHED*/
3925 }
3926
3927 void
thread_quantum_init(thread_t thread,uint64_t now)3928 thread_quantum_init(thread_t thread, uint64_t now)
3929 {
3930 uint64_t new_quantum = 0;
3931
3932 switch (thread->sched_mode) {
3933 case TH_MODE_REALTIME:
3934 new_quantum = thread->realtime.computation;
3935 new_quantum = MIN(new_quantum, max_unsafe_rt_computation);
3936 break;
3937
3938 case TH_MODE_FIXED:
3939 new_quantum = SCHED(initial_quantum_size)(thread);
3940 new_quantum = MIN(new_quantum, max_unsafe_fixed_computation);
3941 break;
3942
3943 default:
3944 new_quantum = SCHED(initial_quantum_size)(thread);
3945 break;
3946 }
3947
3948 if (cpulimit_affects_quantum) {
3949 const uint64_t cpulimit_remaining = thread_cpulimit_remaining(now);
3950
3951 /*
3952 * If there's no remaining CPU time, the ledger system will
3953 * notice and put the thread to sleep.
3954 */
3955 if (cpulimit_remaining > 0) {
3956 new_quantum = MIN(new_quantum, cpulimit_remaining);
3957 }
3958 }
3959
3960 assert3u(new_quantum, <, UINT32_MAX);
3961 assert3u(new_quantum, >, 0);
3962
3963 thread->quantum_remaining = (uint32_t)new_quantum;
3964 }
3965
3966 uint32_t
sched_timeshare_initial_quantum_size(thread_t thread)3967 sched_timeshare_initial_quantum_size(thread_t thread)
3968 {
3969 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
3970 return bg_quantum;
3971 } else {
3972 return std_quantum;
3973 }
3974 }
3975
3976 /*
3977 * run_queue_init:
3978 *
3979 * Initialize a run queue before first use.
3980 */
3981 void
run_queue_init(run_queue_t rq)3982 run_queue_init(
3983 run_queue_t rq)
3984 {
3985 rq->highq = NOPRI;
3986 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
3987 rq->bitmap[i] = 0;
3988 }
3989 rq->urgency = rq->count = 0;
3990 for (int i = 0; i < NRQS; i++) {
3991 circle_queue_init(&rq->queues[i]);
3992 }
3993 }
3994
3995 /*
3996 * run_queue_dequeue:
3997 *
3998 * Perform a dequeue operation on a run queue,
3999 * and return the resulting thread.
4000 *
4001 * The run queue must be locked (see thread_run_queue_remove()
4002 * for more info), and not empty.
4003 */
4004 thread_t
run_queue_dequeue(run_queue_t rq,sched_options_t options)4005 run_queue_dequeue(
4006 run_queue_t rq,
4007 sched_options_t options)
4008 {
4009 thread_t thread;
4010 circle_queue_t queue = &rq->queues[rq->highq];
4011
4012 if (options & SCHED_HEADQ) {
4013 thread = cqe_dequeue_head(queue, struct thread, runq_links);
4014 } else {
4015 thread = cqe_dequeue_tail(queue, struct thread, runq_links);
4016 }
4017
4018 assert(thread != THREAD_NULL);
4019 assert_thread_magic(thread);
4020
4021 thread_clear_runq(thread);
4022 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4023 rq->count--;
4024 if (SCHED(priority_is_urgent)(rq->highq)) {
4025 rq->urgency--; assert(rq->urgency >= 0);
4026 }
4027 if (circle_queue_empty(queue)) {
4028 bitmap_clear(rq->bitmap, rq->highq);
4029 rq->highq = bitmap_first(rq->bitmap, NRQS);
4030 }
4031
4032 return thread;
4033 }
4034
4035 /*
4036 * run_queue_enqueue:
4037 *
4038 * Perform a enqueue operation on a run queue.
4039 *
4040 * The run queue must be locked (see thread_run_queue_remove()
4041 * for more info).
4042 */
4043 boolean_t
run_queue_enqueue(run_queue_t rq,thread_t thread,sched_options_t options)4044 run_queue_enqueue(
4045 run_queue_t rq,
4046 thread_t thread,
4047 sched_options_t options)
4048 {
4049 circle_queue_t queue = &rq->queues[thread->sched_pri];
4050 boolean_t result = FALSE;
4051
4052 assert_thread_magic(thread);
4053
4054 if (circle_queue_empty(queue)) {
4055 circle_enqueue_tail(queue, &thread->runq_links);
4056
4057 rq_bitmap_set(rq->bitmap, thread->sched_pri);
4058 if (thread->sched_pri > rq->highq) {
4059 rq->highq = thread->sched_pri;
4060 result = TRUE;
4061 }
4062 } else {
4063 if (options & SCHED_TAILQ) {
4064 circle_enqueue_tail(queue, &thread->runq_links);
4065 } else {
4066 circle_enqueue_head(queue, &thread->runq_links);
4067 }
4068 }
4069 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4070 rq->urgency++;
4071 }
4072 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4073 rq->count++;
4074
4075 return result;
4076 }
4077
4078 /*
4079 * run_queue_remove:
4080 *
4081 * Remove a specific thread from a runqueue.
4082 *
4083 * The run queue must be locked.
4084 */
4085 void
run_queue_remove(run_queue_t rq,thread_t thread)4086 run_queue_remove(
4087 run_queue_t rq,
4088 thread_t thread)
4089 {
4090 circle_queue_t queue = &rq->queues[thread->sched_pri];
4091
4092 thread_assert_runq_nonnull(thread);
4093 assert_thread_magic(thread);
4094
4095 circle_dequeue(queue, &thread->runq_links);
4096 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4097 rq->count--;
4098 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4099 rq->urgency--; assert(rq->urgency >= 0);
4100 }
4101
4102 if (circle_queue_empty(queue)) {
4103 /* update run queue status */
4104 bitmap_clear(rq->bitmap, thread->sched_pri);
4105 rq->highq = bitmap_first(rq->bitmap, NRQS);
4106 }
4107
4108 thread_clear_runq(thread);
4109 }
4110
4111 /*
4112 * run_queue_peek
4113 *
4114 * Peek at the runq and return the highest
4115 * priority thread from the runq.
4116 *
4117 * The run queue must be locked.
4118 */
4119 thread_t
run_queue_peek(run_queue_t rq)4120 run_queue_peek(
4121 run_queue_t rq)
4122 {
4123 if (rq->count > 0) {
4124 circle_queue_t queue = &rq->queues[rq->highq];
4125 thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
4126 assert_thread_magic(thread);
4127 return thread;
4128 } else {
4129 return THREAD_NULL;
4130 }
4131 }
4132
4133 /*
4134 * realtime_setrun:
4135 *
4136 * Dispatch a thread for realtime execution.
4137 *
4138 * Thread must be locked. Associated pset must
4139 * be locked, and is returned unlocked.
4140 */
4141 static void
realtime_setrun(processor_t chosen_processor,thread_t thread)4142 realtime_setrun(
4143 processor_t chosen_processor,
4144 thread_t thread)
4145 {
4146 processor_set_t pset = chosen_processor->processor_set;
4147 pset_assert_locked(pset);
4148 bool pset_is_locked = true;
4149
4150 int n_backup = 0;
4151
4152 if (thread->realtime.constraint <= rt_constraint_threshold) {
4153 n_backup = sched_rt_n_backup_processors;
4154 }
4155 assert((n_backup >= 0) && (n_backup <= SCHED_MAX_BACKUP_PROCESSORS));
4156
4157 int existing_backups = bit_count(pset->pending_AST_URGENT_cpu_mask) - rt_runq_count(pset);
4158 if (existing_backups > 0) {
4159 n_backup = n_backup - existing_backups;
4160 if (n_backup < 0) {
4161 n_backup = 0;
4162 }
4163 }
4164
4165 sched_ipi_type_t ipi_type[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4166 processor_t ipi_processor[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4167
4168 thread->chosen_processor = chosen_processor;
4169
4170 /* <rdar://problem/15102234> */
4171 assert(thread->bound_processor == PROCESSOR_NULL);
4172
4173 rt_runq_insert(chosen_processor, pset, thread);
4174
4175 processor_t processor = chosen_processor;
4176
4177 int count = 0;
4178 for (int i = 0; i <= n_backup; i++) {
4179 if (i == 0) {
4180 ipi_type[i] = SCHED_IPI_NONE;
4181 ipi_processor[i] = processor;
4182 count++;
4183
4184 ast_t preempt = AST_NONE;
4185 if (thread->sched_pri > processor->current_pri) {
4186 preempt = (AST_PREEMPT | AST_URGENT);
4187 } else if (thread->sched_pri == processor->current_pri) {
4188 if (rt_deadline_add(thread->realtime.deadline, rt_deadline_epsilon) < processor->deadline) {
4189 preempt = (AST_PREEMPT | AST_URGENT);
4190 }
4191 }
4192
4193 if (preempt != AST_NONE) {
4194 if (processor->state == PROCESSOR_IDLE) {
4195 if (processor == current_processor()) {
4196 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
4197 ast_on(preempt);
4198
4199 if ((preempt & AST_URGENT) == AST_URGENT) {
4200 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4201 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4202 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 1);
4203 }
4204 }
4205
4206 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4207 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4208 }
4209 } else {
4210 ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4211 }
4212 } else if (processor->state == PROCESSOR_DISPATCHING) {
4213 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4214 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4215 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 2);
4216 }
4217 } else {
4218 if (processor == current_processor()) {
4219 ast_on(preempt);
4220
4221 if ((preempt & AST_URGENT) == AST_URGENT) {
4222 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4223 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4224 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 3);
4225 }
4226 }
4227
4228 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4229 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4230 }
4231 } else {
4232 ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4233 }
4234 }
4235 } else {
4236 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
4237 }
4238 } else {
4239 if (!pset_is_locked) {
4240 pset_lock(pset);
4241 }
4242 ipi_type[i] = SCHED_IPI_NONE;
4243 ipi_processor[i] = PROCESSOR_NULL;
4244 rt_choose_next_processor_for_followup_IPI(pset, chosen_processor, &ipi_processor[i], &ipi_type[i]);
4245 if (ipi_processor[i] == PROCESSOR_NULL) {
4246 break;
4247 }
4248 count++;
4249
4250 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
4251 ipi_processor[i]->cpu_id, ipi_processor[i]->state, backup, 1);
4252 #if CONFIG_SCHED_SMT
4253 #define p_is_good(p) (((p)->processor_primary == (p)) && ((sched_avoid_cpu0 != 1) || ((p)->cpu_id != 0)))
4254 if (n_backup == SCHED_DEFAULT_BACKUP_PROCESSORS_SMT) {
4255 processor_t p0 = ipi_processor[0];
4256 processor_t p1 = ipi_processor[1];
4257 assert(p0 && p1);
4258 if (p_is_good(p0) && p_is_good(p1)) {
4259 /*
4260 * Both the chosen processor and the first backup are non-cpu0 primaries,
4261 * so there is no need for a 2nd backup processor.
4262 */
4263 break;
4264 }
4265 }
4266 #endif /* CONFIG_SCHED_SMT */
4267 }
4268 }
4269
4270 if (pset_is_locked) {
4271 pset_unlock(pset);
4272 }
4273
4274 assert((count > 0) && (count <= (n_backup + 1)));
4275 for (int i = 0; i < count; i++) {
4276 assert(ipi_processor[i] != PROCESSOR_NULL);
4277 sched_ipi_perform(ipi_processor[i], ipi_type[i]);
4278 }
4279 }
4280
4281
4282 sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset,processor_t dst,thread_t thread,__unused sched_ipi_event_t event)4283 sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
4284 thread_t thread, __unused sched_ipi_event_t event)
4285 {
4286 #if defined(CONFIG_SCHED_DEFERRED_AST)
4287 #if CONFIG_THREAD_GROUPS
4288 if (thread) {
4289 struct thread_group *tg = thread_group_get(thread);
4290 if (thread_group_uses_immediate_ipi(tg)) {
4291 return SCHED_IPI_IMMEDIATE;
4292 }
4293 }
4294 #endif /* CONFIG_THREAD_GROUPS */
4295 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
4296 return SCHED_IPI_DEFERRED;
4297 }
4298 #else /* CONFIG_SCHED_DEFERRED_AST */
4299 (void) thread;
4300 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
4301 #endif /* CONFIG_SCHED_DEFERRED_AST */
4302 return SCHED_IPI_NONE;
4303 }
4304
4305 /* Requires the destination pset lock to be held */
4306 sched_ipi_type_t
sched_ipi_action(processor_t dst,thread_t thread,sched_ipi_event_t event)4307 sched_ipi_action(processor_t dst, thread_t thread, sched_ipi_event_t event)
4308 {
4309 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
4310 assert(dst != NULL);
4311
4312 processor_set_t pset = dst->processor_set;
4313 if (current_processor() == dst) {
4314 return SCHED_IPI_NONE;
4315 }
4316
4317 bool dst_idle = (dst->state == PROCESSOR_IDLE);
4318 if (dst_idle) {
4319 pset_update_processor_state(pset, dst, PROCESSOR_DISPATCHING);
4320 }
4321
4322 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
4323 switch (ipi_type) {
4324 case SCHED_IPI_NONE:
4325 return SCHED_IPI_NONE;
4326 #if defined(CONFIG_SCHED_DEFERRED_AST)
4327 case SCHED_IPI_DEFERRED:
4328 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
4329 break;
4330 #endif /* CONFIG_SCHED_DEFERRED_AST */
4331 default:
4332 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
4333 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4334 dst->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 4);
4335 }
4336 bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
4337 break;
4338 }
4339 return ipi_type;
4340 }
4341
4342 sched_ipi_type_t
sched_ipi_policy(processor_t dst,thread_t thread,boolean_t dst_idle,sched_ipi_event_t event)4343 sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
4344 {
4345 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
4346 boolean_t deferred_ipi_supported = false;
4347 processor_set_t pset = dst->processor_set;
4348
4349 #if defined(CONFIG_SCHED_DEFERRED_AST)
4350 deferred_ipi_supported = true;
4351 #endif /* CONFIG_SCHED_DEFERRED_AST */
4352
4353 switch (event) {
4354 case SCHED_IPI_EVENT_SPILL:
4355 case SCHED_IPI_EVENT_SMT_REBAL:
4356 case SCHED_IPI_EVENT_REBALANCE:
4357 case SCHED_IPI_EVENT_BOUND_THR:
4358 case SCHED_IPI_EVENT_RT_PREEMPT:
4359 /*
4360 * The RT preempt, spill, SMT rebalance, rebalance and the bound thread
4361 * scenarios use immediate IPIs always.
4362 */
4363 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
4364 break;
4365 case SCHED_IPI_EVENT_PREEMPT:
4366 /* In the preemption case, use immediate IPIs for RT threads */
4367 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
4368 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
4369 break;
4370 }
4371
4372 /*
4373 * For Non-RT threads preemption,
4374 * If the core is active, use immediate IPIs.
4375 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
4376 */
4377 if (deferred_ipi_supported && dst_idle) {
4378 return sched_ipi_deferred_policy(pset, dst, thread, event);
4379 }
4380 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
4381 break;
4382 default:
4383 panic("Unrecognized scheduler IPI event type %d", event);
4384 }
4385 assert(ipi_type != SCHED_IPI_NONE);
4386 return ipi_type;
4387 }
4388
4389 void
sched_ipi_perform(processor_t dst,sched_ipi_type_t ipi)4390 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
4391 {
4392 switch (ipi) {
4393 case SCHED_IPI_NONE:
4394 break;
4395 case SCHED_IPI_IDLE:
4396 machine_signal_idle(dst);
4397 break;
4398 case SCHED_IPI_IMMEDIATE:
4399 cause_ast_check(dst);
4400 break;
4401 case SCHED_IPI_DEFERRED:
4402 machine_signal_idle_deferred(dst);
4403 break;
4404 default:
4405 panic("Unrecognized scheduler IPI type: %d", ipi);
4406 }
4407 }
4408
4409 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4410
4411 boolean_t
priority_is_urgent(int priority)4412 priority_is_urgent(int priority)
4413 {
4414 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
4415 }
4416
4417 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
4418
4419 /*
4420 * processor_setrun:
4421 *
4422 * Dispatch a thread for execution on a
4423 * processor.
4424 *
4425 * Thread must be locked. Associated pset must
4426 * be locked, and is returned unlocked.
4427 */
4428 static void
processor_setrun(processor_t processor,thread_t thread,sched_options_t options)4429 processor_setrun(
4430 processor_t processor,
4431 thread_t thread,
4432 sched_options_t options)
4433 {
4434 processor_set_t pset = processor->processor_set;
4435 pset_assert_locked(pset);
4436 ast_t preempt = AST_NONE;
4437 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
4438
4439 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
4440
4441 thread->chosen_processor = processor;
4442
4443 /*
4444 * Set preemption mode.
4445 */
4446 #if defined(CONFIG_SCHED_DEFERRED_AST)
4447 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
4448 #endif
4449 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
4450 preempt = (AST_PREEMPT | AST_URGENT);
4451 } else if (processor->current_is_eagerpreempt) {
4452 preempt = (AST_PREEMPT | AST_URGENT);
4453 } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
4454 if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
4455 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
4456 } else {
4457 preempt = AST_NONE;
4458 }
4459 } else {
4460 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
4461 }
4462
4463 if ((options & SCHED_STIR_POT) ||
4464 ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE))) {
4465 /*
4466 * Having gone to the trouble of forcing this thread off a less preferred core,
4467 * we should force the preferable core to reschedule immediately to give this
4468 * thread a chance to run instead of just sitting on the run queue where
4469 * it may just be stolen back by the idle core we just forced it off.
4470 */
4471 preempt |= AST_PREEMPT;
4472 }
4473
4474 SCHED(processor_enqueue)(processor, thread, options);
4475 sched_update_pset_load_average(pset, 0);
4476
4477 if (preempt != AST_NONE) {
4478 if (processor->state == PROCESSOR_IDLE) {
4479 ipi_action = eExitIdle;
4480 } else if (processor->state == PROCESSOR_DISPATCHING) {
4481 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4482 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4483 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 5);
4484 }
4485 } else if (processor->state == PROCESSOR_RUNNING &&
4486 (thread->sched_pri >= processor->current_pri)) {
4487 ipi_action = eInterruptRunning;
4488 }
4489 } else {
4490 /*
4491 * New thread is not important enough to preempt what is running, but
4492 * special processor states may need special handling
4493 */
4494 if (processor->state == PROCESSOR_IDLE) {
4495 ipi_action = eExitIdle;
4496 } else if (processor->state == PROCESSOR_DISPATCHING) {
4497 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4498 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4499 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 6);
4500 }
4501 }
4502 }
4503
4504 if (ipi_action != eDoNothing) {
4505 if (processor == current_processor()) {
4506 if (ipi_action == eExitIdle) {
4507 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
4508 }
4509 if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
4510 ast_on(preempt);
4511 }
4512
4513 if ((preempt & AST_URGENT) == AST_URGENT) {
4514 if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4515 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4516 processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 7);
4517 }
4518 } else {
4519 if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4520 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 7);
4521 }
4522 }
4523
4524 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4525 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4526 } else {
4527 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4528 }
4529 } else {
4530 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
4531 ipi_type = sched_ipi_action(processor, thread, event);
4532 }
4533 }
4534
4535 pset_unlock(pset);
4536 sched_ipi_perform(processor, ipi_type);
4537
4538 if (ipi_action != eDoNothing && processor == current_processor()) {
4539 ast_t new_preempt = update_pending_nonurgent_preemption(processor, preempt);
4540 ast_on(new_preempt);
4541 }
4542 }
4543
4544 /*
4545 * choose_next_pset:
4546 *
4547 * Return the next sibling pset containing
4548 * available processors.
4549 *
4550 * Returns the original pset if none other is
4551 * suitable.
4552 */
4553 static processor_set_t
choose_next_pset(processor_set_t pset)4554 choose_next_pset(
4555 processor_set_t pset)
4556 {
4557 processor_set_t nset = pset;
4558
4559 do {
4560 nset = next_pset(nset);
4561
4562 /*
4563 * Sometimes during startup the pset_map can contain a bit
4564 * for a pset that isn't fully published in pset_array because
4565 * the pset_map read isn't an acquire load.
4566 *
4567 * In order to avoid needing an acquire barrier here, just bail
4568 * out.
4569 */
4570 if (nset == PROCESSOR_SET_NULL) {
4571 return pset;
4572 }
4573 } while (nset->online_processor_count < 1 && nset != pset);
4574
4575 return nset;
4576 }
4577
4578 #if CONFIG_SCHED_SMT
4579 /*
4580 * choose_processor_smt:
4581 *
4582 * SMT-aware implementation of choose_processor.
4583 */
4584 processor_t
choose_processor_smt(processor_set_t starting_pset,processor_t processor,thread_t thread,__unused sched_options_t * options)4585 choose_processor_smt(
4586 processor_set_t starting_pset,
4587 processor_t processor,
4588 thread_t thread,
4589 __unused sched_options_t *options)
4590 {
4591 processor_set_t pset = starting_pset;
4592 processor_set_t nset;
4593
4594 assert(thread->sched_pri <= MAXPRI);
4595
4596 /*
4597 * Prefer the hinted processor, when appropriate.
4598 */
4599
4600 /* Fold last processor hint from secondary processor to its primary */
4601 if (processor != PROCESSOR_NULL) {
4602 processor = processor->processor_primary;
4603 }
4604
4605 /*
4606 * Only consult platform layer if pset is active, which
4607 * it may not be in some cases when a multi-set system
4608 * is going to sleep.
4609 */
4610 if (pset->online_processor_count) {
4611 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
4612 processor_t mc_processor = machine_choose_processor(pset, processor);
4613 if (mc_processor != PROCESSOR_NULL) {
4614 processor = mc_processor->processor_primary;
4615 }
4616 }
4617 }
4618
4619 /*
4620 * At this point, we may have a processor hint, and we may have
4621 * an initial starting pset. If the hint is not in the pset, or
4622 * if the hint is for a processor in an invalid state, discard
4623 * the hint.
4624 */
4625 if (processor != PROCESSOR_NULL) {
4626 if (processor->processor_set != pset) {
4627 processor = PROCESSOR_NULL;
4628 } else if (!processor->is_recommended) {
4629 processor = PROCESSOR_NULL;
4630 } else {
4631 switch (processor->state) {
4632 case PROCESSOR_START:
4633 case PROCESSOR_PENDING_OFFLINE:
4634 case PROCESSOR_OFF_LINE:
4635 /*
4636 * Hint is for a processor that cannot support running new threads.
4637 */
4638 processor = PROCESSOR_NULL;
4639 break;
4640 case PROCESSOR_IDLE:
4641 /*
4642 * Hint is for an idle processor. Assume it is no worse than any other
4643 * idle processor. The platform layer had an opportunity to provide
4644 * the "least cost idle" processor above.
4645 */
4646 if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
4647 uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->primary_map & pset->recommended_bitmask);
4648 uint64_t non_avoided_idle_primary_map = idle_primary_map & ~pset->perfcontrol_cpu_migration_bitmask;
4649 /*
4650 * If the rotation bitmask to force a migration is set for this core and there's an idle core that
4651 * that needn't be avoided, don't continue running on the same core.
4652 */
4653 if (!(bit_test(processor->processor_set->perfcontrol_cpu_migration_bitmask, processor->cpu_id) && non_avoided_idle_primary_map != 0)) {
4654 return processor;
4655 }
4656 }
4657 processor = PROCESSOR_NULL;
4658 break;
4659 case PROCESSOR_RUNNING:
4660 case PROCESSOR_DISPATCHING:
4661 /*
4662 * Hint is for an active CPU. This fast-path allows
4663 * realtime threads to preempt non-realtime threads
4664 * to regain their previous executing processor.
4665 */
4666 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4667 if (processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
4668 return processor;
4669 }
4670 processor = PROCESSOR_NULL;
4671 }
4672
4673 /* Otherwise, use hint as part of search below */
4674 break;
4675 default:
4676 processor = PROCESSOR_NULL;
4677 break;
4678 }
4679 }
4680 }
4681
4682 /*
4683 * Iterate through the processor sets to locate
4684 * an appropriate processor. Seed results with
4685 * a last-processor hint, if available, so that
4686 * a search must find something strictly better
4687 * to replace it.
4688 *
4689 * A primary/secondary pair of SMT processors are
4690 * "unpaired" if the primary is busy but its
4691 * corresponding secondary is idle (so the physical
4692 * core has full use of its resources).
4693 */
4694
4695 assert(pset == starting_pset);
4696 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4697 return SCHED(rt_choose_processor)(pset, processor, thread);
4698 }
4699
4700 /* No realtime threads from this point on */
4701 assert(thread->sched_pri < BASEPRI_RTQUEUES);
4702
4703 integer_t lowest_priority = MAXPRI + 1;
4704 integer_t lowest_secondary_priority = MAXPRI + 1;
4705 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
4706 integer_t lowest_idle_secondary_priority = MAXPRI + 1;
4707 integer_t lowest_count = INT_MAX;
4708 processor_t lp_processor = PROCESSOR_NULL;
4709 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
4710 processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
4711 processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
4712 processor_t lc_processor = PROCESSOR_NULL;
4713
4714 if (processor != PROCESSOR_NULL) {
4715 /* All other states should be enumerated above. */
4716 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
4717 assert(thread->sched_pri < BASEPRI_RTQUEUES);
4718
4719 lowest_priority = processor->current_pri;
4720 lp_processor = processor;
4721
4722 lowest_count = SCHED(processor_runq_count)(processor);
4723 lc_processor = processor;
4724 }
4725
4726 do {
4727 /*
4728 * Choose an idle processor, in pset traversal order
4729 */
4730 uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->primary_map & pset->recommended_bitmask);
4731 uint64_t preferred_idle_primary_map = idle_primary_map & pset->perfcontrol_cpu_preferred_bitmask;
4732
4733 /* there shouldn't be a pending AST if the processor is idle */
4734 assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4735
4736 /*
4737 * Look at the preferred cores first.
4738 */
4739 int cpuid = lsb_next(preferred_idle_primary_map, pset->cpu_preferred_last_chosen);
4740 if (cpuid < 0) {
4741 cpuid = lsb_first(preferred_idle_primary_map);
4742 }
4743 if (cpuid >= 0) {
4744 processor = processor_array[cpuid];
4745 pset->cpu_preferred_last_chosen = cpuid;
4746 return processor;
4747 }
4748
4749 /*
4750 * Look at the cores that don't need to be avoided next.
4751 */
4752 if (pset->perfcontrol_cpu_migration_bitmask != 0) {
4753 uint64_t non_avoided_idle_primary_map = idle_primary_map & ~pset->perfcontrol_cpu_migration_bitmask;
4754 cpuid = lsb_next(non_avoided_idle_primary_map, pset->cpu_preferred_last_chosen);
4755 if (cpuid < 0) {
4756 cpuid = lsb_first(non_avoided_idle_primary_map);
4757 }
4758 if (cpuid >= 0) {
4759 processor = processor_array[cpuid];
4760 pset->cpu_preferred_last_chosen = cpuid;
4761 return processor;
4762 }
4763 }
4764
4765 /*
4766 * Fall back to any remaining idle cores if none of the preferred ones and non-avoided ones are available.
4767 */
4768 cpuid = lsb_first(idle_primary_map);
4769 if (cpuid >= 0) {
4770 processor = processor_array[cpuid];
4771 return processor;
4772 }
4773
4774 /*
4775 * Otherwise, enumerate active and idle processors to find primary candidates
4776 * with lower priority/etc.
4777 */
4778
4779 uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
4780 pset->recommended_bitmask &
4781 ~pset->pending_AST_URGENT_cpu_mask);
4782
4783 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
4784 active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
4785 }
4786
4787 active_map = bit_ror64(active_map, (pset->last_chosen + 1));
4788 for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
4789 cpuid = ((rotid + pset->last_chosen + 1) & 63);
4790 processor = processor_array[cpuid];
4791
4792 integer_t cpri = processor->current_pri;
4793 processor_t primary = processor->processor_primary;
4794 if (primary != processor) {
4795 /* If primary is running a NO_SMT thread, don't choose its secondary */
4796 if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
4797 if (cpri < lowest_secondary_priority) {
4798 lowest_secondary_priority = cpri;
4799 lp_paired_secondary_processor = processor;
4800 }
4801 }
4802 } else {
4803 if (cpri < lowest_priority) {
4804 lowest_priority = cpri;
4805 lp_processor = processor;
4806 }
4807 }
4808
4809 integer_t ccount = SCHED(processor_runq_count)(processor);
4810 if (ccount < lowest_count) {
4811 lowest_count = ccount;
4812 lc_processor = processor;
4813 }
4814 }
4815
4816 /*
4817 * For SMT configs, these idle secondary processors must have active primary. Otherwise
4818 * the idle primary would have short-circuited the loop above
4819 */
4820 uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
4821 ~pset->primary_map &
4822 pset->recommended_bitmask);
4823
4824 /* there shouldn't be a pending AST if the processor is idle */
4825 assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4826 assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
4827
4828 for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
4829 processor = processor_array[cpuid];
4830
4831 processor_t cprimary = processor->processor_primary;
4832
4833 integer_t primary_pri = cprimary->current_pri;
4834
4835 /*
4836 * TODO: This should also make the same decisions
4837 * as secondary_can_run_realtime_thread
4838 *
4839 * TODO: Keep track of the pending preemption priority
4840 * of the primary to make this more accurate.
4841 */
4842
4843 /* If the primary is running a no-smt thread, then don't choose its secondary */
4844 if (cprimary->state == PROCESSOR_RUNNING &&
4845 processor_active_thread_no_smt(cprimary)) {
4846 continue;
4847 }
4848
4849 /*
4850 * Find the idle secondary processor with the lowest priority primary
4851 *
4852 * We will choose this processor as a fallback if we find no better
4853 * primary to preempt.
4854 */
4855 if (primary_pri < lowest_idle_secondary_priority) {
4856 lp_idle_secondary_processor = processor;
4857 lowest_idle_secondary_priority = primary_pri;
4858 }
4859
4860 /* Find the the lowest priority active primary with idle secondary */
4861 if (primary_pri < lowest_unpaired_primary_priority) {
4862 /* If the primary processor is offline or starting up, it's not a candidate for this path */
4863 if (cprimary->state != PROCESSOR_RUNNING &&
4864 cprimary->state != PROCESSOR_DISPATCHING) {
4865 continue;
4866 }
4867
4868 if (!cprimary->is_recommended) {
4869 continue;
4870 }
4871
4872 /* if the primary is pending preemption, don't try to re-preempt it */
4873 if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
4874 continue;
4875 }
4876
4877 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
4878 bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
4879 continue;
4880 }
4881
4882 lowest_unpaired_primary_priority = primary_pri;
4883 lp_unpaired_primary_processor = cprimary;
4884 }
4885 }
4886
4887 /*
4888 * We prefer preempting a primary processor over waking up its secondary.
4889 * The secondary will then be woken up by the preempted thread.
4890 */
4891 if (thread->sched_pri > lowest_unpaired_primary_priority) {
4892 pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
4893 return lp_unpaired_primary_processor;
4894 }
4895
4896 /*
4897 * We prefer preempting a lower priority active processor over directly
4898 * waking up an idle secondary.
4899 * The preempted thread will then find the idle secondary.
4900 */
4901 if (thread->sched_pri > lowest_priority) {
4902 pset->last_chosen = lp_processor->cpu_id;
4903 return lp_processor;
4904 }
4905
4906 /*
4907 * lc_processor is used to indicate the best processor set run queue
4908 * on which to enqueue a thread when all available CPUs are busy with
4909 * higher priority threads, so try to make sure it is initialized.
4910 */
4911 if (lc_processor == PROCESSOR_NULL) {
4912 cpumap_t available_map = pset_available_cpumap(pset);
4913 cpuid = lsb_first(available_map);
4914 if (cpuid >= 0) {
4915 lc_processor = processor_array[cpuid];
4916 lowest_count = SCHED(processor_runq_count)(lc_processor);
4917 }
4918 }
4919
4920 /*
4921 * Move onto the next processor set.
4922 *
4923 * If all primary processors in this pset are running a higher
4924 * priority thread, move on to next pset. Only when we have
4925 * exhausted the search for primary processors do we
4926 * fall back to secondaries.
4927 */
4928 #if CONFIG_SCHED_EDGE
4929 /*
4930 * The edge scheduler expects a CPU to be selected from the pset it passed in
4931 * as the starting pset for non-RT workloads. The edge migration algorithm
4932 * should already have considered idle CPUs and loads to decide the starting_pset;
4933 * which means that this loop can be short-circuted.
4934 */
4935 nset = starting_pset;
4936 #else /* CONFIG_SCHED_EDGE */
4937 nset = next_pset(pset);
4938 #endif /* CONFIG_SCHED_EDGE */
4939
4940 if (nset != starting_pset) {
4941 pset = change_locked_pset(pset, nset);
4942 }
4943 } while (nset != starting_pset);
4944
4945 /*
4946 * Make sure that we pick a running processor,
4947 * and that the correct processor set is locked.
4948 * Since we may have unlocked the candidate processor's
4949 * pset, it may have changed state.
4950 *
4951 * All primary processors are running a higher priority
4952 * thread, so the only options left are enqueuing on
4953 * the secondary processor that would perturb the least priority
4954 * primary, or the least busy primary.
4955 */
4956
4957 /* lowest_priority is evaluated in the main loops above */
4958 if (lp_idle_secondary_processor != PROCESSOR_NULL) {
4959 processor = lp_idle_secondary_processor;
4960 } else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
4961 processor = lp_paired_secondary_processor;
4962 } else if (lc_processor != PROCESSOR_NULL) {
4963 processor = lc_processor;
4964 } else {
4965 processor = PROCESSOR_NULL;
4966 }
4967
4968 if (processor) {
4969 pset = change_locked_pset(pset, processor->processor_set);
4970 /* Check that chosen processor is still usable */
4971 cpumap_t available_map = pset_available_cpumap(pset);
4972 if (bit_test(available_map, processor->cpu_id)) {
4973 pset->last_chosen = processor->cpu_id;
4974 return processor;
4975 }
4976
4977 /* processor is no longer usable */
4978 processor = PROCESSOR_NULL;
4979 }
4980
4981 pset_assert_locked(pset);
4982 pset_unlock(pset);
4983 return PROCESSOR_NULL;
4984 }
4985 #else /* !CONFIG_SCHED_SMT */
4986 /*
4987 * choose_processor:
4988 *
4989 * Choose a processor for the thread, beginning at
4990 * the pset. Accepts an optional processor hint in
4991 * the pset.
4992 *
4993 * Returns a processor, possibly from a different pset.
4994 *
4995 * The thread must be locked. The pset must be locked,
4996 * and the resulting pset is locked on return.
4997 */
4998 processor_t
choose_processor(processor_set_t starting_pset,processor_t processor,thread_t thread,__unused sched_options_t * options)4999 choose_processor(
5000 processor_set_t starting_pset,
5001 processor_t processor,
5002 thread_t thread,
5003 __unused sched_options_t *options)
5004 {
5005 processor_set_t pset = starting_pset;
5006 processor_set_t nset;
5007
5008 assert3u(thread->sched_pri, <=, MAXPRI);
5009
5010 /*
5011 * At this point, we may have a processor hint, and we may have
5012 * an initial starting pset. If the hint is not in the pset, or
5013 * if the hint is for a processor in an invalid state, discard
5014 * the hint.
5015 */
5016 if (processor != PROCESSOR_NULL) {
5017 if (processor->processor_set != pset) {
5018 processor = PROCESSOR_NULL;
5019 } else if (!processor->is_recommended) {
5020 processor = PROCESSOR_NULL;
5021 } else {
5022 switch (processor->state) {
5023 case PROCESSOR_START:
5024 case PROCESSOR_PENDING_OFFLINE:
5025 case PROCESSOR_OFF_LINE:
5026 /*
5027 * Hint is for a processor that cannot support running new threads.
5028 */
5029 processor = PROCESSOR_NULL;
5030 break;
5031 case PROCESSOR_IDLE:
5032 /*
5033 * Hint is for an idle processor. Assume it is no worse than any other
5034 * idle processor. The platform layer had an opportunity to provide
5035 * the "least cost idle" processor above.
5036 */
5037 if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5038 uint64_t idle_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->recommended_bitmask);
5039 uint64_t non_avoided_idle_map = idle_map & ~pset->perfcontrol_cpu_migration_bitmask;
5040 /*
5041 * If the rotation bitmask to force a migration is set for this core and there's an idle core that
5042 * that needn't be avoided, don't continue running on the same core.
5043 */
5044 if (!(bit_test(processor->processor_set->perfcontrol_cpu_migration_bitmask, processor->cpu_id) && non_avoided_idle_map != 0)) {
5045 return processor;
5046 }
5047 }
5048 processor = PROCESSOR_NULL;
5049 break;
5050 case PROCESSOR_RUNNING:
5051 case PROCESSOR_DISPATCHING:
5052 /*
5053 * Hint is for an active CPU. This fast-path allows
5054 * realtime threads to preempt non-realtime threads
5055 * to regain their previous executing processor.
5056 */
5057 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5058 if (processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5059 return processor;
5060 }
5061 processor = PROCESSOR_NULL;
5062 }
5063
5064 /* Otherwise, use hint as part of search below */
5065 break;
5066 default:
5067 processor = PROCESSOR_NULL;
5068 break;
5069 }
5070 }
5071 }
5072
5073 /*
5074 * Iterate through the processor sets to locate
5075 * an appropriate processor. Seed results with
5076 * a last-processor hint, if available, so that
5077 * a search must find something strictly better
5078 * to replace it.
5079 */
5080
5081 assert(pset == starting_pset);
5082 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5083 return SCHED(rt_choose_processor)(pset, processor, thread);
5084 }
5085
5086 /* No realtime threads from this point on */
5087 assert(thread->sched_pri < BASEPRI_RTQUEUES);
5088
5089 integer_t lowest_priority = MAXPRI + 1;
5090 integer_t lowest_count = INT_MAX;
5091 processor_t lp_processor = PROCESSOR_NULL;
5092 processor_t lc_processor = PROCESSOR_NULL;
5093
5094 if (processor != PROCESSOR_NULL) {
5095 /* All other states should be enumerated above. */
5096 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
5097 assert(thread->sched_pri < BASEPRI_RTQUEUES);
5098
5099 lowest_priority = processor->current_pri;
5100 lp_processor = processor;
5101
5102 lowest_count = SCHED(processor_runq_count)(processor);
5103 lc_processor = processor;
5104 }
5105
5106
5107 do {
5108 /*
5109 * Choose an idle processor, in pset traversal order
5110 */
5111 uint64_t idle_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->recommended_bitmask);
5112 uint64_t preferred_idle_map = idle_map & pset->perfcontrol_cpu_preferred_bitmask;
5113
5114 /* there shouldn't be a pending AST if the processor is idle */
5115 assert((idle_map & pset->pending_AST_URGENT_cpu_mask) == 0);
5116
5117 /*
5118 * Look at the preferred cores first.
5119 */
5120 int cpuid = lsb_next(preferred_idle_map, pset->cpu_preferred_last_chosen);
5121 if (cpuid < 0) {
5122 cpuid = lsb_first(preferred_idle_map);
5123 }
5124 if (cpuid >= 0) {
5125 processor = processor_array[cpuid];
5126 pset->cpu_preferred_last_chosen = cpuid;
5127 return processor;
5128 }
5129
5130 /*
5131 * Look at the cores that don't need to be avoided next.
5132 */
5133 if (pset->perfcontrol_cpu_migration_bitmask != 0) {
5134 uint64_t non_avoided_idle_map = idle_map & ~pset->perfcontrol_cpu_migration_bitmask;
5135 cpuid = lsb_next(non_avoided_idle_map, pset->cpu_preferred_last_chosen);
5136 if (cpuid < 0) {
5137 cpuid = lsb_first(non_avoided_idle_map);
5138 }
5139 if (cpuid >= 0) {
5140 processor = processor_array[cpuid];
5141 pset->cpu_preferred_last_chosen = cpuid;
5142 return processor;
5143 }
5144 }
5145
5146 /*
5147 * Fall back to any remaining idle cores if none of the preferred ones and non-avoided ones are available.
5148 */
5149 cpuid = lsb_first(idle_map);
5150 if (cpuid >= 0) {
5151 processor = processor_array[cpuid];
5152 return processor;
5153 }
5154
5155 /*
5156 * Otherwise, enumerate active and idle processors to find primary candidates
5157 * with lower priority/etc.
5158 */
5159
5160 uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
5161 pset->recommended_bitmask &
5162 ~pset->pending_AST_URGENT_cpu_mask);
5163
5164 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
5165 active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
5166 }
5167
5168 active_map = bit_ror64(active_map, (pset->last_chosen + 1));
5169 for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
5170 cpuid = ((rotid + pset->last_chosen + 1) & 63);
5171 processor = processor_array[cpuid];
5172
5173 integer_t cpri = processor->current_pri;
5174 if (cpri < lowest_priority) {
5175 lowest_priority = cpri;
5176 lp_processor = processor;
5177 }
5178
5179 integer_t ccount = SCHED(processor_runq_count)(processor);
5180 if (ccount < lowest_count) {
5181 lowest_count = ccount;
5182 lc_processor = processor;
5183 }
5184 }
5185
5186 /*
5187 * We prefer preempting a lower priority active processor over directly
5188 * waking up an idle secondary.
5189 * The preempted thread will then find the idle secondary.
5190 */
5191 if (thread->sched_pri > lowest_priority) {
5192 pset->last_chosen = lp_processor->cpu_id;
5193 return lp_processor;
5194 }
5195
5196 /*
5197 * lc_processor is used to indicate the best processor set run queue
5198 * on which to enqueue a thread when all available CPUs are busy with
5199 * higher priority threads, so try to make sure it is initialized.
5200 */
5201 if (lc_processor == PROCESSOR_NULL) {
5202 cpumap_t available_map = pset_available_cpumap(pset);
5203 cpuid = lsb_first(available_map);
5204 if (cpuid >= 0) {
5205 lc_processor = processor_array[cpuid];
5206 lowest_count = SCHED(processor_runq_count)(lc_processor);
5207 }
5208 }
5209
5210 /*
5211 * Move onto the next processor set.
5212 *
5213 * If all primary processors in this pset are running a higher
5214 * priority thread, move on to next pset. Only when we have
5215 * exhausted the search for primary processors do we
5216 * fall back to secondaries.
5217 */
5218 #if CONFIG_SCHED_EDGE
5219 /*
5220 * The edge scheduler expects a CPU to be selected from the pset it passed in
5221 * as the starting pset for non-RT workloads. The edge migration algorithm
5222 * should already have considered idle CPUs and loads to decide the starting_pset;
5223 * which means that this loop can be short-circuted.
5224 */
5225 nset = starting_pset;
5226 #else /* CONFIG_SCHED_EDGE */
5227 nset = next_pset(pset);
5228 #endif /* CONFIG_SCHED_EDGE */
5229
5230 if (nset != starting_pset) {
5231 pset = change_locked_pset(pset, nset);
5232 }
5233 } while (nset != starting_pset);
5234
5235 processor = lc_processor;
5236
5237 if (processor) {
5238 pset = change_locked_pset(pset, processor->processor_set);
5239 /* Check that chosen processor is still usable */
5240 cpumap_t available_map = pset_available_cpumap(pset);
5241 if (bit_test(available_map, processor->cpu_id)) {
5242 pset->last_chosen = processor->cpu_id;
5243 return processor;
5244 }
5245
5246 /* processor is no longer usable */
5247 processor = PROCESSOR_NULL;
5248 }
5249
5250 pset_assert_locked(pset);
5251 pset_unlock(pset);
5252 return PROCESSOR_NULL;
5253 }
5254 #endif /* !CONFIG_SCHED_SMT */
5255
5256
5257
5258 /*
5259 * Default implementation of SCHED(choose_node)()
5260 * for single node systems
5261 */
5262 pset_node_t
sched_choose_node(__unused thread_t thread)5263 sched_choose_node(__unused thread_t thread)
5264 {
5265 return &pset_node0;
5266 }
5267
5268 /*
5269 * choose_starting_pset:
5270 *
5271 * Choose a starting processor set for the thread.
5272 * May return a processor hint within the pset.
5273 *
5274 * Returns a starting processor set, to be used by
5275 * choose_processor.
5276 *
5277 * The thread must be locked. The resulting pset is unlocked on return,
5278 * and is chosen without taking any pset locks.
5279 */
5280 processor_set_t
choose_starting_pset(pset_node_t node,thread_t thread,processor_t * processor_hint)5281 choose_starting_pset(pset_node_t node, thread_t thread, processor_t *processor_hint)
5282 {
5283 processor_set_t pset;
5284 processor_t processor = PROCESSOR_NULL;
5285
5286 if (thread->affinity_set != AFFINITY_SET_NULL) {
5287 /*
5288 * Use affinity set policy hint.
5289 */
5290 pset = thread->affinity_set->aset_pset;
5291 } else if (thread->last_processor != PROCESSOR_NULL) {
5292 /*
5293 * Simple (last processor) affinity case.
5294 */
5295 processor = thread->last_processor;
5296 pset = processor->processor_set;
5297 } else {
5298 /*
5299 * No Affinity case:
5300 *
5301 * Utilitize a per task hint to spread threads
5302 * among the available processor sets.
5303 * NRG this seems like the wrong thing to do.
5304 * See also task->pset_hint = pset in thread_setrun()
5305 */
5306 pset = get_threadtask(thread)->pset_hint;
5307 if (pset == PROCESSOR_SET_NULL) {
5308 pset = current_processor()->processor_set;
5309 }
5310
5311 pset = choose_next_pset(pset);
5312 }
5313
5314 if (!bit_test(node->pset_map, pset->pset_id)) {
5315 /* pset is not from this node so choose one that is */
5316 int id = lsb_first(node->pset_map);
5317 if (id < 0) {
5318 /* startup race, so check again under the node lock */
5319 lck_spin_lock(&pset_node_lock);
5320 if (bit_test(node->pset_map, pset->pset_id)) {
5321 id = pset->pset_id;
5322 } else {
5323 id = lsb_first(node->pset_map);
5324 }
5325 lck_spin_unlock(&pset_node_lock);
5326 }
5327 assert(id >= 0);
5328 pset = pset_array[id];
5329 }
5330
5331 if (bit_count(node->pset_map) == 1) {
5332 /* Only a single pset in this node */
5333 goto out;
5334 }
5335
5336 bool avoid_cpu0 = false;
5337
5338 #if defined(__x86_64__)
5339 if ((thread->sched_pri >= BASEPRI_RTQUEUES) && sched_avoid_cpu0) {
5340 /* Avoid the pset containing cpu0 */
5341 avoid_cpu0 = true;
5342 /* Assert that cpu0 is in pset0. I expect this to be true on __x86_64__ */
5343 assert(bit_test(pset_array[0]->cpu_bitmask, 0));
5344 }
5345 #endif
5346
5347 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5348 pset_map_t rt_target_map;
5349 #if CONFIG_SCHED_SMT
5350 rt_target_map = atomic_load(&node->pset_non_rt_primary_map);
5351 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
5352 if (avoid_cpu0) {
5353 rt_target_map = bit_ror64(rt_target_map, 1);
5354 }
5355 int rotid = lsb_first(rt_target_map);
5356 if (rotid >= 0) {
5357 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
5358 pset = pset_array[id];
5359 goto out;
5360 }
5361 }
5362 if (!pset->is_SMT || !sched_allow_rt_smt) {
5363 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
5364 goto out;
5365 }
5366 #endif /* CONFIG_SCHED_SMT*/
5367 rt_target_map = atomic_load(&node->pset_non_rt_map);
5368 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
5369 if (avoid_cpu0) {
5370 rt_target_map = bit_ror64(rt_target_map, 1);
5371 }
5372 int rotid = lsb_first(rt_target_map);
5373 if (rotid >= 0) {
5374 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
5375 pset = pset_array[id];
5376 goto out;
5377 }
5378 }
5379 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
5380 } else {
5381 pset_map_t idle_map = atomic_load(&node->pset_idle_map);
5382 if (!bit_test(idle_map, pset->pset_id)) {
5383 int next_idle_pset_id = lsb_first(idle_map);
5384 if (next_idle_pset_id >= 0) {
5385 pset = pset_array[next_idle_pset_id];
5386 }
5387 }
5388 }
5389
5390 out:
5391 if ((processor != PROCESSOR_NULL) && (processor->processor_set != pset)) {
5392 processor = PROCESSOR_NULL;
5393 }
5394 if (processor != PROCESSOR_NULL) {
5395 *processor_hint = processor;
5396 }
5397
5398 assert(pset != NULL);
5399 return pset;
5400 }
5401
5402 /*
5403 * thread_setrun:
5404 *
5405 * Dispatch thread for execution, onto an idle
5406 * processor or run queue, and signal a preemption
5407 * as appropriate.
5408 *
5409 * Thread must be locked.
5410 */
5411 void
thread_setrun(thread_t thread,sched_options_t options)5412 thread_setrun(
5413 thread_t thread,
5414 sched_options_t options)
5415 {
5416 processor_t processor = PROCESSOR_NULL;
5417 processor_set_t pset;
5418
5419 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
5420 thread_assert_runq_null(thread);
5421
5422 #if CONFIG_PREADOPT_TG
5423 /* We know that the thread is not in the runq by virtue of being in this
5424 * function and the thread is not self since we are running. We can safely
5425 * resolve the thread group hierarchy and modify the thread's thread group
5426 * here. */
5427 thread_resolve_and_enforce_thread_group_hierarchy_if_needed(thread);
5428 #endif
5429
5430 /*
5431 * Update priority if needed.
5432 */
5433 if (SCHED(can_update_priority)(thread)) {
5434 SCHED(update_priority)(thread);
5435 }
5436 thread->sfi_class = sfi_thread_classify(thread);
5437
5438 if (thread->bound_processor == PROCESSOR_NULL) {
5439 /*
5440 * Unbound case.
5441 *
5442 * Usually, this loop will only be executed once,
5443 * but if CLPC derecommends a processor after it has been chosen,
5444 * or if a processor is shut down after it is chosen,
5445 * choose_processor() may return NULL, so a retry
5446 * may be necessary. A single retry will usually
5447 * be enough, and we can't afford to retry too many times
5448 * because interrupts are disabled.
5449 */
5450 #define CHOOSE_PROCESSOR_MAX_RETRIES 3
5451 for (int retry = 0; retry <= CHOOSE_PROCESSOR_MAX_RETRIES; retry++) {
5452 processor_t processor_hint = PROCESSOR_NULL;
5453 pset_node_t node = SCHED(choose_node)(thread);
5454 processor_set_t starting_pset = choose_starting_pset(node, thread, &processor_hint);
5455
5456 pset_lock(starting_pset);
5457
5458 processor = SCHED(choose_processor)(starting_pset, processor_hint, thread, &options);
5459 if (processor != PROCESSOR_NULL) {
5460 pset = processor->processor_set;
5461 pset_assert_locked(pset);
5462 break;
5463 }
5464 }
5465 /*
5466 * If choose_processor() still returns NULL,
5467 * which is very unlikely, we need a fallback.
5468 */
5469 if (processor == PROCESSOR_NULL) {
5470 bool unlock_available_cores_lock = false;
5471 if (sched_all_cpus_offline()) {
5472 /*
5473 * There are no available processors
5474 * because we're in final system shutdown.
5475 * Enqueue on the master processor and we'll
5476 * handle it when it powers back up.
5477 */
5478 processor = master_processor;
5479 } else if (support_bootcpu_shutdown) {
5480 /*
5481 * Grab the sched_available_cores_lock to select
5482 * some available processor and prevent it from
5483 * becoming offline while we enqueue the thread.
5484 *
5485 * This is very close to a lock inversion, but
5486 * places that do call thread_setrun with this
5487 * lock held know that the current cpu will be
5488 * schedulable, so we won't fall out of
5489 * choose_processor.
5490 */
5491 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
5492 unlock_available_cores_lock = true;
5493
5494 int last_resort_cpu = sched_last_resort_cpu();
5495
5496 processor = processor_array[last_resort_cpu];
5497 } else {
5498 /*
5499 * The master processor is never shut down, always safe to choose.
5500 */
5501 processor = master_processor;
5502 }
5503 pset = processor->processor_set;
5504 pset_lock(pset);
5505 assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
5506 if (unlock_available_cores_lock) {
5507 simple_unlock(&sched_available_cores_lock);
5508 }
5509 }
5510 task_t task = get_threadtask(thread);
5511 if (!(task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE)) {
5512 task->pset_hint = pset; /* NRG this is done without holding the task lock */
5513 }
5514 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
5515 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
5516 assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
5517 } else {
5518 /*
5519 * Bound case:
5520 *
5521 * Unconditionally dispatch on the processor.
5522 */
5523 processor = thread->bound_processor;
5524 pset = processor->processor_set;
5525 pset_lock(pset);
5526
5527 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
5528 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
5529 }
5530
5531 /*
5532 * Dispatch the thread on the chosen processor.
5533 * TODO: This should be based on sched_mode, not sched_pri
5534 */
5535 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5536 realtime_setrun(processor, thread);
5537 } else {
5538 processor_setrun(processor, thread, options);
5539 }
5540 /* pset is now unlocked */
5541 if (thread->bound_processor == PROCESSOR_NULL) {
5542 SCHED(check_spill)(pset, thread);
5543 }
5544 }
5545
5546 processor_set_t
task_choose_pset(task_t task)5547 task_choose_pset(
5548 task_t task)
5549 {
5550 processor_set_t pset = task->pset_hint;
5551
5552 if (pset != PROCESSOR_SET_NULL) {
5553 pset = choose_next_pset(pset);
5554 }
5555
5556 return pset;
5557 }
5558
5559 /*
5560 * Check for a preemption point in
5561 * the current context.
5562 *
5563 * Called at splsched with thread locked.
5564 */
5565 ast_t
csw_check(thread_t thread,processor_t processor,ast_t check_reason)5566 csw_check(
5567 thread_t thread,
5568 processor_t processor,
5569 ast_t check_reason)
5570 {
5571 processor_set_t pset = processor->processor_set;
5572
5573 assert(thread == processor->active_thread);
5574
5575 pset_lock(pset);
5576
5577 processor_state_update_from_thread(processor, thread, true);
5578
5579 ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
5580
5581 /* Acknowledge the IPI if we decided not to preempt */
5582
5583 if ((preempt & AST_URGENT) == 0) {
5584 if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5585 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 8);
5586 }
5587 }
5588
5589 if ((preempt & AST_PREEMPT) == 0) {
5590 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
5591 }
5592
5593 pset_unlock(pset);
5594
5595 return update_pending_nonurgent_preemption(processor, preempt);
5596 }
5597
5598 void
clear_pending_nonurgent_preemption(processor_t processor)5599 clear_pending_nonurgent_preemption(processor_t processor)
5600 {
5601 if (!processor->pending_nonurgent_preemption) {
5602 return;
5603 }
5604
5605 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE) | DBG_FUNC_END);
5606
5607 processor->pending_nonurgent_preemption = false;
5608 running_timer_clear(processor, RUNNING_TIMER_PREEMPT);
5609 }
5610
5611 ast_t
update_pending_nonurgent_preemption(processor_t processor,ast_t reason)5612 update_pending_nonurgent_preemption(processor_t processor, ast_t reason)
5613 {
5614 if ((reason & (AST_URGENT | AST_PREEMPT)) != (AST_PREEMPT)) {
5615 clear_pending_nonurgent_preemption(processor);
5616 return reason;
5617 }
5618
5619 if (nonurgent_preemption_timer_abs == 0) {
5620 /* Preemption timer not enabled */
5621 return reason;
5622 }
5623
5624 if (current_thread()->state & TH_IDLE) {
5625 /* idle threads don't need nonurgent preemption */
5626 return reason;
5627 }
5628
5629 if (processor->pending_nonurgent_preemption) {
5630 /* Timer is already armed, no need to do it again */
5631 return reason;
5632 }
5633
5634 if (ml_did_interrupt_userspace()) {
5635 /*
5636 * We're preempting userspace here, so we don't need
5637 * to defer the preemption. Force AST_URGENT
5638 * so that we can avoid arming this timer without risking
5639 * ast_taken_user deciding to spend too long in kernel
5640 * space to handle other ASTs.
5641 */
5642
5643 return reason | AST_URGENT;
5644 }
5645
5646 /*
5647 * We've decided to do a nonurgent preemption when running in
5648 * kernelspace. We defer the preemption until reaching userspace boundary
5649 * to give a grace period for locks etc to be dropped and to reach
5650 * a clean preemption point, so that the preempting thread doesn't
5651 * always immediately hit the lock that the waking thread still holds.
5652 *
5653 * Arm a timer to enforce that the preemption executes within a bounded
5654 * time if the thread doesn't block or return to userspace quickly.
5655 */
5656
5657 processor->pending_nonurgent_preemption = true;
5658 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE) | DBG_FUNC_START,
5659 reason);
5660
5661 uint64_t now = mach_absolute_time();
5662
5663 uint64_t deadline = now + nonurgent_preemption_timer_abs;
5664
5665 running_timer_enter(processor, RUNNING_TIMER_PREEMPT, NULL,
5666 deadline, now);
5667
5668 return reason;
5669 }
5670
5671 /*
5672 * Check for preemption at splsched with
5673 * pset locked and processor as the current
5674 * processor.
5675 */
5676 ast_t
csw_check_locked(thread_t thread,processor_t processor,processor_set_t pset,ast_t check_reason)5677 csw_check_locked(
5678 thread_t thread,
5679 processor_t processor,
5680 processor_set_t pset,
5681 ast_t check_reason)
5682 {
5683 assert(processor == current_processor());
5684 /*
5685 * If the current thread is running on a processor that is no longer recommended,
5686 * urgently preempt it, at which point thread_select() should
5687 * try to idle the processor and re-dispatch the thread to a recommended processor.
5688 */
5689 if (!processor->is_recommended) {
5690 return check_reason | AST_PREEMPT | AST_URGENT;
5691 }
5692
5693 if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
5694 return check_reason | AST_PREEMPT | AST_URGENT;
5695 }
5696
5697 if (rt_runq_count(pset) > 0) {
5698 if ((rt_runq_priority(pset) > processor->current_pri) || !processor->first_timeslice) {
5699 return check_reason | AST_PREEMPT | AST_URGENT;
5700 } else if (rt_deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < processor->deadline) {
5701 return check_reason | AST_PREEMPT | AST_URGENT;
5702 } else {
5703 return check_reason | AST_PREEMPT;
5704 }
5705 }
5706
5707 ast_t result = SCHED(processor_csw_check)(processor);
5708 if (result != AST_NONE) {
5709 return check_reason | result | (thread_is_eager_preempt(thread) ? AST_URGENT : AST_NONE);
5710 }
5711
5712 /*
5713 * Same for avoid-processor
5714 *
5715 * TODO: Should these set AST_REBALANCE?
5716 */
5717 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread, check_reason)) {
5718 return check_reason | AST_PREEMPT;
5719 }
5720
5721 #if CONFIG_SCHED_SMT
5722 /*
5723 * Even though we could continue executing on this processor, a
5724 * secondary SMT core should try to shed load to another primary core.
5725 *
5726 * TODO: Should this do the same check that thread_select does? i.e.
5727 * if no bound threads target this processor, and idle primaries exist, preempt
5728 * The case of RT threads existing is already taken care of above
5729 */
5730
5731 if (processor->current_pri < BASEPRI_RTQUEUES &&
5732 processor->processor_primary != processor) {
5733 return check_reason | AST_PREEMPT;
5734 }
5735 #endif /* CONFIG_SCHED_SMT*/
5736
5737 if (thread->state & TH_SUSP) {
5738 return check_reason | AST_PREEMPT;
5739 }
5740
5741 #if CONFIG_SCHED_SFI
5742 /*
5743 * Current thread may not need to be preempted, but maybe needs
5744 * an SFI wait?
5745 */
5746 result = sfi_thread_needs_ast(thread, NULL);
5747 if (result != AST_NONE) {
5748 return result;
5749 }
5750 #endif
5751
5752 return AST_NONE;
5753 }
5754
5755 /*
5756 * Handle preemption IPI or IPI in response to setting an AST flag
5757 * Triggered by cause_ast_check
5758 * Called at splsched
5759 */
5760 void
ast_check(processor_t processor)5761 ast_check(processor_t processor)
5762 {
5763 smr_ack_ipi();
5764
5765 if (processor->state != PROCESSOR_RUNNING) {
5766 return;
5767 }
5768
5769 SCHED_DEBUG_AST_CHECK_KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED,
5770 MACH_SCHED_AST_CHECK) | DBG_FUNC_START);
5771
5772 thread_t thread = processor->active_thread;
5773
5774 assert(thread == current_thread());
5775
5776 /*
5777 * Pairs with task_restartable_ranges_synchronize
5778 */
5779 thread_lock(thread);
5780
5781 thread_reset_pcs_ack_IPI(thread);
5782
5783 /*
5784 * Propagate thread ast to processor.
5785 * (handles IPI in response to setting AST flag)
5786 */
5787 ast_propagate(thread);
5788
5789 /*
5790 * Stash the old urgency and perfctl values to find out if
5791 * csw_check updates them.
5792 */
5793 thread_urgency_t old_urgency = processor->current_urgency;
5794 perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
5795
5796 ast_t preempt;
5797
5798 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
5799 ast_on(preempt);
5800 }
5801
5802 if (old_urgency != processor->current_urgency) {
5803 /*
5804 * Urgency updates happen with the thread lock held (ugh).
5805 * TODO: This doesn't notice QoS changes...
5806 */
5807 uint64_t urgency_param1, urgency_param2;
5808
5809 thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
5810 thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
5811 }
5812
5813 thread_unlock(thread);
5814
5815 if (old_perfctl_class != processor->current_perfctl_class) {
5816 /*
5817 * We updated the perfctl class of this thread from another core.
5818 * Let CLPC know that the currently running thread has a new
5819 * class.
5820 */
5821
5822 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
5823 mach_approximate_time(), 0, thread);
5824 }
5825
5826 SCHED_DEBUG_AST_CHECK_KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED,
5827 MACH_SCHED_AST_CHECK) | DBG_FUNC_END, preempt);
5828 }
5829
5830
5831 void
thread_preempt_expire(timer_call_param_t p0,__unused timer_call_param_t p1)5832 thread_preempt_expire(
5833 timer_call_param_t p0,
5834 __unused timer_call_param_t p1)
5835 {
5836 processor_t processor = p0;
5837
5838 assert(processor == current_processor());
5839 assert(p1 == NULL);
5840
5841 thread_t thread = current_thread();
5842
5843 /*
5844 * This is set and cleared by the current core, so we will
5845 * never see a race with running timer expiration
5846 */
5847 assert(processor->pending_nonurgent_preemption);
5848
5849 clear_pending_nonurgent_preemption(processor);
5850
5851 thread_lock(thread);
5852
5853 /*
5854 * Check again to see if it's still worth a
5855 * context switch, but this time force enable kernel preemption
5856 */
5857
5858 ast_t preempt = csw_check(thread, processor, AST_URGENT);
5859
5860 if (preempt) {
5861 ast_on(preempt);
5862 }
5863
5864 thread_unlock(thread);
5865
5866 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE), preempt);
5867 }
5868
5869 void
perfcontrol_timer_expire(timer_call_param_t p0,__unused timer_call_param_t p1)5870 perfcontrol_timer_expire(
5871 timer_call_param_t p0,
5872 __unused timer_call_param_t p1
5873 )
5874 {
5875 processor_t processor = p0;
5876 uint64_t now = mach_absolute_time();
5877 /* Default behavior is to cancel the timer */
5878 uint64_t timeout_ticks = EndOfAllTime;
5879 machine_perfcontrol_running_timer_expire(now, 0, processor->cpu_id, &timeout_ticks);
5880 if (timeout_ticks == EndOfAllTime) {
5881 running_timer_clear(processor, RUNNING_TIMER_PERFCONTROL);
5882 } else {
5883 uint64_t deadline = now + timeout_ticks;
5884 running_timer_setup(processor, RUNNING_TIMER_PERFCONTROL, NULL, deadline, now);
5885 }
5886 }
5887
5888 /*
5889 * set_sched_pri:
5890 *
5891 * Set the scheduled priority of the specified thread.
5892 *
5893 * This may cause the thread to change queues.
5894 *
5895 * Thread must be locked.
5896 */
5897 void
set_sched_pri(thread_t thread,int16_t new_priority,set_sched_pri_options_t options)5898 set_sched_pri(
5899 thread_t thread,
5900 int16_t new_priority,
5901 set_sched_pri_options_t options)
5902 {
5903 bool is_current_thread = (thread == current_thread());
5904 bool removed_from_runq = false;
5905 bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
5906
5907 int16_t old_priority = thread->sched_pri;
5908
5909 /* If we're already at this priority, no need to mess with the runqueue */
5910 if (new_priority == old_priority) {
5911 #if CONFIG_SCHED_CLUTCH
5912 /* For the first thread in the system, the priority is correct but
5913 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
5914 * scheduler relies on the bucket being set for all threads, update
5915 * its bucket here.
5916 */
5917 if (thread->th_sched_bucket == TH_BUCKET_RUN) {
5918 assert(thread == vm_pageout_scan_thread);
5919 SCHED(update_thread_bucket)(thread);
5920 }
5921 #endif /* CONFIG_SCHED_CLUTCH */
5922
5923 return;
5924 }
5925
5926 if (is_current_thread) {
5927 assert(thread->state & TH_RUN);
5928 thread_assert_runq_null(thread);
5929 } else {
5930 removed_from_runq = thread_run_queue_remove(thread);
5931 }
5932
5933 thread->sched_pri = new_priority;
5934
5935 #if CONFIG_SCHED_CLUTCH
5936 /*
5937 * Since for the clutch scheduler, the thread's bucket determines its runq
5938 * in the hierarchy, it is important to update the bucket when the thread
5939 * lock is held and the thread has been removed from the runq hierarchy.
5940 *
5941 * If the thread's bucket has changed, this will consume sched_tick_delta()
5942 * in order to account CPU time with the correct scheduling bucket.
5943 */
5944 SCHED(update_thread_bucket)(thread);
5945
5946 #endif /* CONFIG_SCHED_CLUTCH */
5947
5948 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
5949 (uintptr_t)thread_tid(thread),
5950 thread->base_pri,
5951 thread->sched_pri,
5952 thread->sched_usage,
5953 0);
5954
5955 if (removed_from_runq) {
5956 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
5957 } else if (is_current_thread) {
5958 processor_t processor = thread->last_processor;
5959 assert(processor == current_processor());
5960
5961 thread_urgency_t old_urgency = processor->current_urgency;
5962
5963 /*
5964 * When dropping in priority, check if the thread no longer belongs on core.
5965 * If a thread raises its own priority, don't aggressively rebalance it.
5966 * <rdar://problem/31699165>
5967 *
5968 * csw_check does a processor_state_update_from_thread, but
5969 * we should do our own if we're being lazy.
5970 */
5971 if (!lazy_update && new_priority < old_priority) {
5972 ast_t preempt;
5973
5974 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
5975 ast_on(preempt);
5976 }
5977 } else {
5978 processor_state_update_from_thread(processor, thread, false);
5979 }
5980
5981 /*
5982 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
5983 * class alterations from user space to occur relatively infrequently, hence
5984 * those are lazily handled. QoS classes have distinct priority bands, and QoS
5985 * inheritance is expected to involve priority changes.
5986 */
5987 if (processor->current_urgency != old_urgency) {
5988 uint64_t urgency_param1, urgency_param2;
5989
5990 thread_urgency_t new_urgency = thread_get_urgency(thread,
5991 &urgency_param1, &urgency_param2);
5992
5993 thread_tell_urgency(new_urgency, urgency_param1,
5994 urgency_param2, 0, thread);
5995 }
5996
5997 /* TODO: only call this if current_perfctl_class changed */
5998 uint64_t ctime = mach_approximate_time();
5999 machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
6000 } else if (thread->state & TH_RUN) {
6001 processor_t processor = thread->last_processor;
6002
6003 if (!lazy_update &&
6004 processor != PROCESSOR_NULL &&
6005 processor != current_processor() &&
6006 processor->active_thread == thread) {
6007 cause_ast_check(processor);
6008 }
6009 }
6010 }
6011
6012 /*
6013 * thread_run_queue_remove_for_handoff
6014 *
6015 * Pull a thread or its (recursive) push target out of the runqueue
6016 * so that it is ready for thread_run()
6017 *
6018 * Called at splsched
6019 *
6020 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6021 * This may be different than the thread that was passed in.
6022 */
6023 thread_t
thread_run_queue_remove_for_handoff(thread_t thread)6024 thread_run_queue_remove_for_handoff(thread_t thread)
6025 {
6026 thread_t pulled_thread = THREAD_NULL;
6027
6028 thread_lock(thread);
6029
6030 /*
6031 * Check that the thread is not bound to a different processor,
6032 * NO_SMT flag is not set on the thread, cluster type of
6033 * processor matches with thread if the thread is pinned to a
6034 * particular cluster and that realtime is not involved.
6035 *
6036 * Next, pull it off its run queue. If it doesn't come, it's not eligible.
6037 */
6038 processor_t processor = current_processor();
6039 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6040 #if CONFIG_SCHED_SMT
6041 && (!thread_no_smt(thread))
6042 #endif /* CONFIG_SCHED_SMT */
6043 && (processor->current_pri < BASEPRI_RTQUEUES)
6044 && (thread->sched_pri < BASEPRI_RTQUEUES)
6045 #if __AMP__
6046 && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6047 processor->processor_set->pset_id == thread->th_bound_cluster_id)
6048 #endif /* __AMP__ */
6049 ) {
6050 if (thread_run_queue_remove(thread)) {
6051 pulled_thread = thread;
6052 }
6053 }
6054
6055 thread_unlock(thread);
6056
6057 return pulled_thread;
6058 }
6059
6060 /*
6061 * thread_prepare_for_handoff
6062 *
6063 * Make the thread ready for handoff.
6064 * If the thread was runnable then pull it off the runq, if the thread could
6065 * not be pulled, return NULL.
6066 *
6067 * If the thread was woken up from wait for handoff, make sure it is not bound to
6068 * different processor.
6069 *
6070 * Called at splsched
6071 *
6072 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6073 * This may be different than the thread that was passed in.
6074 */
6075 thread_t
thread_prepare_for_handoff(thread_t thread,thread_handoff_option_t option)6076 thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option)
6077 {
6078 thread_t pulled_thread = THREAD_NULL;
6079
6080 if (option & THREAD_HANDOFF_SETRUN_NEEDED) {
6081 processor_t processor = current_processor();
6082 thread_lock(thread);
6083
6084 /*
6085 * Check that the thread is not bound to a different processor,
6086 * NO_SMT flag is not set on the thread and cluster type of
6087 * processor matches with thread if the thread is pinned to a
6088 * particular cluster. Call setrun instead if above conditions
6089 * are not satisfied.
6090 */
6091 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6092 #if CONFIG_SCHED_SMT
6093 && (!thread_no_smt(thread))
6094 #endif /* CONFIG_SCHED_SMT */
6095 #if __AMP__
6096 && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6097 processor->processor_set->pset_id == thread->th_bound_cluster_id)
6098 #endif /* __AMP__ */
6099 ) {
6100 pulled_thread = thread;
6101 } else {
6102 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
6103 }
6104 thread_unlock(thread);
6105 } else {
6106 pulled_thread = thread_run_queue_remove_for_handoff(thread);
6107 }
6108
6109 return pulled_thread;
6110 }
6111
6112 /*
6113 * thread_run_queue_remove:
6114 *
6115 * Remove a thread from its current run queue and
6116 * return TRUE if successful.
6117 *
6118 * Thread must be locked.
6119 *
6120 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
6121 * run queues because the caller locked the thread. Otherwise
6122 * the thread is on a run queue, but could be chosen for dispatch
6123 * and removed by another processor under a different lock, which
6124 * will set thread->runq to PROCESSOR_NULL.
6125 *
6126 * Hence the thread select path must not rely on anything that could
6127 * be changed under the thread lock after calling this function,
6128 * most importantly thread->sched_pri.
6129 */
6130 boolean_t
thread_run_queue_remove(thread_t thread)6131 thread_run_queue_remove(
6132 thread_t thread)
6133 {
6134 boolean_t removed = FALSE;
6135
6136 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
6137 /* Thread isn't runnable */
6138 thread_assert_runq_null(thread);
6139 return FALSE;
6140 }
6141
6142 processor_t processor = thread_get_runq(thread);
6143 if (processor == PROCESSOR_NULL) {
6144 /*
6145 * The thread is either not on the runq,
6146 * or is in the midst of being removed from the runq.
6147 *
6148 * runq is set to NULL under the pset lock, not the thread
6149 * lock, so the thread may still be in the process of being dequeued
6150 * from the runq. It will wait in invoke for the thread lock to be
6151 * dropped.
6152 */
6153
6154 return FALSE;
6155 }
6156
6157 if (thread->sched_pri < BASEPRI_RTQUEUES) {
6158 return SCHED(processor_queue_remove)(processor, thread);
6159 }
6160
6161 processor_set_t pset = processor->processor_set;
6162
6163 pset_lock(pset);
6164
6165 /*
6166 * Must re-read the thread runq after acquiring the pset lock, in
6167 * case another core swooped in before us to dequeue the thread.
6168 */
6169 if (thread_get_runq_locked(thread) != PROCESSOR_NULL) {
6170 /*
6171 * Thread is on the RT run queue and we have a lock on
6172 * that run queue.
6173 */
6174 rt_runq_remove(&pset->rt_runq, thread);
6175 pset_update_rt_stealable_state(pset);
6176
6177 removed = TRUE;
6178 }
6179
6180 pset_unlock(pset);
6181
6182 return removed;
6183 }
6184
6185 /*
6186 * Put the thread back where it goes after a thread_run_queue_remove
6187 *
6188 * Thread must have been removed under the same thread lock hold
6189 *
6190 * thread locked, at splsched
6191 */
6192 void
thread_run_queue_reinsert(thread_t thread,sched_options_t options)6193 thread_run_queue_reinsert(thread_t thread, sched_options_t options)
6194 {
6195 thread_assert_runq_null(thread);
6196 assert(thread->state & (TH_RUN));
6197
6198 thread_setrun(thread, options);
6199 }
6200
6201 void
sys_override_cpu_throttle(boolean_t enable_override)6202 sys_override_cpu_throttle(boolean_t enable_override)
6203 {
6204 if (enable_override) {
6205 cpu_throttle_enabled = 0;
6206 } else {
6207 cpu_throttle_enabled = 1;
6208 }
6209 }
6210
6211 thread_urgency_t
thread_get_urgency(thread_t thread,uint64_t * arg1,uint64_t * arg2)6212 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
6213 {
6214 uint64_t urgency_param1 = 0, urgency_param2 = 0;
6215 task_t task = get_threadtask_early(thread);
6216
6217 thread_urgency_t urgency;
6218
6219 if (thread == NULL || task == TASK_NULL || (thread->state & TH_IDLE)) {
6220 urgency_param1 = 0;
6221 urgency_param2 = 0;
6222
6223 urgency = THREAD_URGENCY_NONE;
6224 } else if (thread->sched_mode == TH_MODE_REALTIME) {
6225 urgency_param1 = thread->realtime.period;
6226 urgency_param2 = thread->realtime.deadline;
6227
6228 urgency = THREAD_URGENCY_REAL_TIME;
6229 } else if (cpu_throttle_enabled &&
6230 (thread->sched_pri <= MAXPRI_THROTTLE) &&
6231 (thread->base_pri <= MAXPRI_THROTTLE)) {
6232 /*
6233 * Threads that are running at low priority but are not
6234 * tagged with a specific QoS are separated out from
6235 * the "background" urgency. Performance management
6236 * subsystem can decide to either treat these threads
6237 * as normal threads or look at other signals like thermal
6238 * levels for optimal power/perf tradeoffs for a platform.
6239 */
6240 boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
6241 boolean_t task_is_suppressed = (proc_get_effective_task_policy(task, TASK_POLICY_SUP_ACTIVE) == 0x1);
6242
6243 /*
6244 * Background urgency applied when thread priority is
6245 * MAXPRI_THROTTLE or lower and thread is not promoted
6246 * and thread has a QoS specified
6247 */
6248 urgency_param1 = thread->sched_pri;
6249 urgency_param2 = thread->base_pri;
6250
6251 if (thread_lacks_qos && !task_is_suppressed) {
6252 urgency = THREAD_URGENCY_LOWPRI;
6253 } else {
6254 urgency = THREAD_URGENCY_BACKGROUND;
6255 }
6256 } else {
6257 /* For otherwise unclassified threads, report throughput QoS parameters */
6258 urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
6259 urgency_param2 = proc_get_effective_task_policy(task, TASK_POLICY_THROUGH_QOS);
6260 urgency = THREAD_URGENCY_NORMAL;
6261 }
6262
6263 if (arg1 != NULL) {
6264 *arg1 = urgency_param1;
6265 }
6266 if (arg2 != NULL) {
6267 *arg2 = urgency_param2;
6268 }
6269
6270 return urgency;
6271 }
6272
6273 perfcontrol_class_t
thread_get_perfcontrol_class(thread_t thread)6274 thread_get_perfcontrol_class(thread_t thread)
6275 {
6276 /* Special case handling */
6277 if (thread->state & TH_IDLE) {
6278 return PERFCONTROL_CLASS_IDLE;
6279 }
6280
6281 if (thread->sched_mode == TH_MODE_REALTIME) {
6282 return PERFCONTROL_CLASS_REALTIME;
6283 }
6284
6285 /* perfcontrol_class based on base_pri */
6286 if (thread->base_pri <= MAXPRI_THROTTLE) {
6287 return PERFCONTROL_CLASS_BACKGROUND;
6288 } else if (thread->base_pri <= BASEPRI_UTILITY) {
6289 return PERFCONTROL_CLASS_UTILITY;
6290 } else if (thread->base_pri <= BASEPRI_DEFAULT) {
6291 return PERFCONTROL_CLASS_NONUI;
6292 } else if (thread->base_pri <= BASEPRI_USER_INITIATED) {
6293 return PERFCONTROL_CLASS_USER_INITIATED;
6294 } else if (thread->base_pri <= BASEPRI_FOREGROUND) {
6295 return PERFCONTROL_CLASS_UI;
6296 } else {
6297 if (get_threadtask(thread) == kernel_task) {
6298 /*
6299 * Classify Above UI kernel threads as PERFCONTROL_CLASS_KERNEL.
6300 * All other lower priority kernel threads should be treated
6301 * as regular threads for performance control purposes.
6302 */
6303 return PERFCONTROL_CLASS_KERNEL;
6304 }
6305 return PERFCONTROL_CLASS_ABOVEUI;
6306 }
6307 }
6308
6309 /*
6310 * This is the processor idle loop, which just looks for other threads
6311 * to execute. Processor idle threads invoke this without supplying a
6312 * current thread to idle without an asserted wait state.
6313 *
6314 * Returns a the next thread to execute if dispatched directly.
6315 */
6316
6317 #if 0
6318 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
6319 #else
6320 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
6321 #endif
6322
6323 #if (DEVELOPMENT || DEBUG)
6324 int sched_idle_delay_cpuid = -1;
6325 #endif
6326
6327 thread_t
processor_idle(thread_t thread,processor_t processor)6328 processor_idle(
6329 thread_t thread,
6330 processor_t processor)
6331 {
6332 processor_set_t pset = processor->processor_set;
6333 struct recount_snap snap = { 0 };
6334
6335 (void)splsched();
6336
6337 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6338 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
6339 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
6340
6341 SCHED_STATS_INC(idle_transitions);
6342 assert(processor->running_timers_active == false);
6343
6344 recount_snapshot(&snap);
6345 recount_processor_idle(&processor->pr_recount, &snap);
6346
6347 while (1) {
6348 /*
6349 * Ensure that updates to my processor and pset state,
6350 * made by the IPI source processor before sending the IPI,
6351 * are visible on this processor now (even though we don't
6352 * take the pset lock yet).
6353 */
6354 atomic_thread_fence(memory_order_acquire);
6355
6356 if (processor->state != PROCESSOR_IDLE) {
6357 break;
6358 }
6359 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
6360 break;
6361 }
6362 #if defined(CONFIG_SCHED_DEFERRED_AST)
6363 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
6364 break;
6365 }
6366 #endif
6367 if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
6368 break;
6369 }
6370
6371 if (
6372 processor->is_recommended
6373 #if CONFIG_SCHED_SMT
6374 && (processor->processor_primary == processor)
6375 #endif /* CONFIG_SCHED_SMT */
6376 ) {
6377 if (rt_runq_count(pset)) {
6378 break;
6379 }
6380 } else {
6381 if (SCHED(processor_bound_count)(processor)) {
6382 break;
6383 }
6384 }
6385
6386 IDLE_KERNEL_DEBUG_CONSTANT(
6387 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
6388
6389 machine_track_platform_idle(TRUE);
6390
6391 machine_idle();
6392 /* returns with interrupts enabled */
6393
6394 machine_track_platform_idle(FALSE);
6395
6396 #if (DEVELOPMENT || DEBUG)
6397 if (processor->cpu_id == sched_idle_delay_cpuid) {
6398 delay(500);
6399 }
6400 #endif
6401
6402 (void)splsched();
6403
6404 atomic_thread_fence(memory_order_acquire);
6405
6406 IDLE_KERNEL_DEBUG_CONSTANT(
6407 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
6408
6409 /*
6410 * Check if we should call sched_timeshare_consider_maintenance() here.
6411 * The CPU was woken out of idle due to an interrupt and we should do the
6412 * call only if the processor is still idle. If the processor is non-idle,
6413 * the threads running on the processor would do the call as part of
6414 * context swithing.
6415 */
6416 if (processor->state == PROCESSOR_IDLE) {
6417 sched_timeshare_consider_maintenance(mach_absolute_time(), true);
6418 }
6419
6420 if (!SCHED(processor_queue_empty)(processor)) {
6421 #if CONFIG_SCHED_SMT
6422 /* Secondary SMT processors respond to directed wakeups
6423 * exclusively. Some platforms induce 'spurious' SMT wakeups.
6424 */
6425 if (processor->processor_primary == processor) {
6426 break;
6427 }
6428 #else /* CONFIG_SCHED_SMT*/
6429 break;
6430 #endif /* CONFIG_SCHED_SMT*/
6431 }
6432 }
6433
6434 recount_snapshot(&snap);
6435 recount_processor_run(&processor->pr_recount, &snap);
6436 smr_cpu_join(processor, snap.rsn_time_mach);
6437
6438 ast_t reason = AST_NONE;
6439
6440 /* We're handling all scheduling AST's */
6441 ast_off(AST_SCHEDULING);
6442
6443 /*
6444 * thread_select will move the processor from dispatching to running,
6445 * or put it in idle if there's nothing to do.
6446 */
6447 thread_t cur_thread = current_thread();
6448
6449 thread_lock(cur_thread);
6450 thread_t new_thread = thread_select(cur_thread, processor, &reason);
6451 thread_unlock(cur_thread);
6452
6453 assert(processor->running_timers_active == false);
6454
6455 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6456 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
6457 (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
6458
6459 return new_thread;
6460 }
6461
6462 /*
6463 * Each processor has a dedicated thread which
6464 * executes the idle loop when there is no suitable
6465 * previous context.
6466 *
6467 * This continuation is entered with interrupts disabled.
6468 */
6469 void
idle_thread(__assert_only void * parameter,__unused wait_result_t result)6470 idle_thread(__assert_only void* parameter,
6471 __unused wait_result_t result)
6472 {
6473 assert(ml_get_interrupts_enabled() == FALSE);
6474 assert(parameter == NULL);
6475
6476 processor_t processor = current_processor();
6477
6478 smr_cpu_leave(processor, processor->last_dispatch);
6479
6480 /*
6481 * Ensure that anything running in idle context triggers
6482 * preemption-disabled checks.
6483 */
6484 disable_preemption_without_measurements();
6485
6486 /*
6487 * Enable interrupts temporarily to handle any pending interrupts
6488 * or IPIs before deciding to sleep
6489 */
6490 spllo();
6491
6492 thread_t new_thread = processor_idle(THREAD_NULL, processor);
6493 /* returns with interrupts disabled */
6494
6495 enable_preemption();
6496
6497 if (new_thread != THREAD_NULL) {
6498 thread_run(processor->idle_thread,
6499 idle_thread, NULL, new_thread);
6500 /*NOTREACHED*/
6501 }
6502
6503 thread_block(idle_thread);
6504 /*NOTREACHED*/
6505 }
6506
6507 void
idle_thread_create(processor_t processor,thread_continue_t continuation)6508 idle_thread_create(
6509 processor_t processor,
6510 thread_continue_t continuation)
6511 {
6512 kern_return_t result;
6513 thread_t thread;
6514 spl_t s;
6515 char name[MAXTHREADNAMESIZE];
6516
6517 result = kernel_thread_create(continuation, NULL, MAXPRI_KERNEL, &thread);
6518 if (result != KERN_SUCCESS) {
6519 panic("idle_thread_create failed: %d", result);
6520 }
6521
6522 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
6523 thread_set_thread_name(thread, name);
6524
6525 s = splsched();
6526 thread_lock(thread);
6527 thread->bound_processor = processor;
6528 thread->chosen_processor = processor;
6529 processor->idle_thread = thread;
6530 thread->sched_pri = thread->base_pri = IDLEPRI;
6531 thread->state = (TH_RUN | TH_IDLE);
6532 thread->options |= TH_OPT_IDLE_THREAD;
6533 thread->last_made_runnable_time = thread->last_basepri_change_time = mach_absolute_time();
6534 thread_unlock(thread);
6535 splx(s);
6536
6537 thread_deallocate(thread);
6538 }
6539
6540 /*
6541 * sched_startup:
6542 *
6543 * Kicks off scheduler services.
6544 *
6545 * Called at splsched.
6546 */
6547 void
sched_startup(void)6548 sched_startup(void)
6549 {
6550 kern_return_t result;
6551 thread_t thread;
6552
6553 simple_lock_init(&sched_vm_group_list_lock, 0);
6554
6555 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
6556 NULL, MAXPRI_KERNEL, &thread);
6557 if (result != KERN_SUCCESS) {
6558 panic("sched_startup");
6559 }
6560
6561 thread_deallocate(thread);
6562
6563 assert_thread_magic(thread);
6564
6565 /*
6566 * Yield to the sched_init_thread once, to
6567 * initialize our own thread after being switched
6568 * back to.
6569 *
6570 * The current thread is the only other thread
6571 * active at this point.
6572 */
6573 thread_block(THREAD_CONTINUE_NULL);
6574
6575 assert_thread_magic(thread);
6576 }
6577
6578 #if __arm64__
6579 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
6580 #endif /* __arm64__ */
6581
6582
6583 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
6584
6585 static _Atomic uint64_t sched_maintenance_deadline;
6586 /* Exclusively read/written by sched_timeshare_maintenance_continue */
6587 static uint64_t sched_tick_last_abstime;
6588
6589
6590 /*
6591 * sched_init_thread:
6592 *
6593 * Perform periodic bookkeeping functions about ten
6594 * times per second.
6595 */
6596 void
sched_timeshare_maintenance_continue(void)6597 sched_timeshare_maintenance_continue(void)
6598 {
6599 uint64_t sched_tick_ctime, late_time, sched_tick_delta;
6600
6601 struct sched_update_scan_context scan_context = {
6602 .earliest_bg_make_runnable_time = UINT64_MAX,
6603 .earliest_normal_make_runnable_time = UINT64_MAX,
6604 .earliest_rt_make_runnable_time = UINT64_MAX
6605 };
6606
6607 sched_tick_ctime = mach_absolute_time();
6608
6609 if (__improbable(sched_tick_last_abstime == 0)) {
6610 sched_tick_last_abstime = sched_tick_ctime;
6611 late_time = 0;
6612 sched_tick_delta = 1;
6613 } else {
6614 late_time = sched_tick_ctime - sched_tick_last_abstime;
6615 sched_tick_delta = late_time / sched_tick_interval;
6616 /* Ensure a delta of 1, since the interval could be slightly
6617 * smaller than the sched_tick_interval due to dispatch
6618 * latencies.
6619 */
6620 sched_tick_delta = MAX(sched_tick_delta, 1);
6621
6622 /* In the event interrupt latencies or platform
6623 * idle events that advanced the timebase resulted
6624 * in periods where no threads were dispatched,
6625 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
6626 * iterations.
6627 */
6628 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
6629
6630 sched_tick_last_abstime = sched_tick_ctime;
6631 }
6632
6633 scan_context.sched_tick_last_abstime = sched_tick_last_abstime;
6634 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
6635 sched_tick_delta, late_time, 0, 0, 0);
6636
6637 /* Add a number of pseudo-ticks corresponding to the elapsed interval
6638 * This could be greater than 1 if substantial intervals where
6639 * all processors are idle occur, which rarely occurs in practice.
6640 */
6641
6642 os_atomic_add(&sched_tick, (uint32_t)sched_tick_delta, relaxed);
6643
6644 update_vm_info();
6645
6646 /*
6647 * Compute various averages.
6648 */
6649 compute_averages(sched_tick_delta);
6650
6651 /*
6652 * Scan the run queues for threads which
6653 * may need to be updated, and find the earliest runnable thread on the runqueue
6654 * to report its latency.
6655 */
6656 SCHED(thread_update_scan)(&scan_context);
6657
6658 /* rt_runq_scan also records pset bitmasks. */
6659 SCHED(rt_runq_scan)(&scan_context);
6660
6661 uint64_t ctime = mach_absolute_time();
6662
6663 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
6664 ctime - scan_context.earliest_bg_make_runnable_time : 0;
6665
6666 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
6667 ctime - scan_context.earliest_normal_make_runnable_time : 0;
6668
6669 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
6670 ctime - scan_context.earliest_rt_make_runnable_time : 0;
6671
6672 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
6673
6674 /*
6675 * Check to see if the special sched VM group needs attention.
6676 */
6677 sched_vm_group_maintenance();
6678
6679 #if __arm64__
6680 /* Check to see if the recommended cores failsafe is active */
6681 sched_recommended_cores_maintenance();
6682 #endif /* __arm64__ */
6683
6684
6685 #if DEBUG || DEVELOPMENT
6686 #if __x86_64__
6687 #include <i386/misc_protos.h>
6688 /* Check for long-duration interrupts */
6689 mp_interrupt_watchdog();
6690 #endif /* __x86_64__ */
6691 #endif /* DEBUG || DEVELOPMENT */
6692
6693 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
6694 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
6695 sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
6696
6697 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
6698 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
6699 /*NOTREACHED*/
6700 }
6701
6702 static uint64_t sched_maintenance_wakeups;
6703
6704 /*
6705 * Determine if the set of routines formerly driven by a maintenance timer
6706 * must be invoked, based on a deadline comparison. Signals the scheduler
6707 * maintenance thread on deadline expiration. Must be invoked at an interval
6708 * lower than the "sched_tick_interval", currently accomplished by
6709 * invocation via the quantum expiration timer and at context switch time.
6710 * Performance matters: this routine reuses a timestamp approximating the
6711 * current absolute time received from the caller, and should perform
6712 * no more than a comparison against the deadline in the common case.
6713 */
6714 void
sched_timeshare_consider_maintenance(uint64_t ctime,bool safe_point)6715 sched_timeshare_consider_maintenance(uint64_t ctime, bool safe_point)
6716 {
6717 uint64_t deadline = os_atomic_load(&sched_maintenance_deadline, relaxed);
6718
6719 if (__improbable(ctime >= deadline)) {
6720 if (__improbable(current_thread() == sched_maintenance_thread)) {
6721 return;
6722 }
6723 OSMemoryBarrier();
6724
6725 uint64_t ndeadline = ctime + sched_tick_interval;
6726
6727 if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline, deadline, ndeadline, seq_cst))) {
6728 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
6729 sched_maintenance_wakeups++;
6730 smr_maintenance(ctime);
6731 }
6732 }
6733
6734 smr_cpu_tick(ctime, safe_point);
6735
6736 #if !CONFIG_SCHED_CLUTCH
6737 /*
6738 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
6739 * scheduler, the load is maintained at the thread group and bucket level.
6740 */
6741 uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed);
6742
6743 if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
6744 uint64_t new_deadline = 0;
6745 if (os_atomic_cmpxchg(&sched_load_compute_deadline, load_compute_deadline, new_deadline, relaxed)) {
6746 compute_sched_load();
6747 new_deadline = ctime + sched_load_compute_interval_abs;
6748 os_atomic_store_wide(&sched_load_compute_deadline, new_deadline, relaxed);
6749 }
6750 }
6751 #endif /* CONFIG_SCHED_CLUTCH */
6752
6753 #if __arm64__
6754 uint64_t perf_deadline = os_atomic_load(&sched_perfcontrol_callback_deadline, relaxed);
6755
6756 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
6757 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
6758 if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline, perf_deadline, 0, relaxed)) {
6759 machine_perfcontrol_deadline_passed(perf_deadline);
6760 }
6761 }
6762 #endif /* __arm64__ */
6763 }
6764
6765 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
6766
6767 void
sched_init_thread(void)6768 sched_init_thread(void)
6769 {
6770 thread_block(THREAD_CONTINUE_NULL);
6771
6772 thread_t thread = current_thread();
6773
6774 thread_set_thread_name(thread, "sched_maintenance_thread");
6775
6776 sched_maintenance_thread = thread;
6777
6778 SCHED(maintenance_continuation)();
6779
6780 /*NOTREACHED*/
6781 }
6782
6783 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
6784
6785 /*
6786 * thread_update_scan / runq_scan:
6787 *
6788 * Scan the run queues to account for timesharing threads
6789 * which need to be updated.
6790 *
6791 * Scanner runs in two passes. Pass one squirrels likely
6792 * threads away in an array, pass two does the update.
6793 *
6794 * This is necessary because the run queue is locked for
6795 * the candidate scan, but the thread is locked for the update.
6796 *
6797 * Array should be sized to make forward progress, without
6798 * disabling preemption for long periods.
6799 */
6800
6801 #define THREAD_UPDATE_SIZE 128
6802
6803 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
6804 static uint32_t thread_update_count = 0;
6805
6806 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
6807 boolean_t
thread_update_add_thread(thread_t thread)6808 thread_update_add_thread(thread_t thread)
6809 {
6810 if (thread_update_count == THREAD_UPDATE_SIZE) {
6811 return FALSE;
6812 }
6813
6814 thread_update_array[thread_update_count++] = thread;
6815 thread_reference(thread);
6816 return TRUE;
6817 }
6818
6819 /* Returns whether the kernel should report that a thread triggered the fail-safe. */
6820 static bool
thread_should_report_failsafe(thread_t thread)6821 thread_should_report_failsafe(thread_t thread)
6822 {
6823 if ((thread->sched_flags & TH_SFLAG_FAILSAFE) && !(thread->sched_flags & TH_SFLAG_FAILSAFE_REPORTED)) {
6824 /* disarm the trigger for subsequent invocations */
6825 thread->sched_flags |= TH_SFLAG_FAILSAFE_REPORTED;
6826 return true;
6827 }
6828 return false;
6829 }
6830
6831 void
thread_update_process_threads(void)6832 thread_update_process_threads(void)
6833 {
6834 assert(thread_update_count <= THREAD_UPDATE_SIZE);
6835
6836 for (uint32_t i = 0; i < thread_update_count; i++) {
6837 thread_t thread = thread_update_array[i];
6838 assert_thread_magic(thread);
6839 thread_update_array[i] = THREAD_NULL;
6840
6841 spl_t s = splsched();
6842 thread_lock(thread);
6843
6844 const bool should_report_failsafe = thread_should_report_failsafe(thread);
6845 const sched_mode_t saved_mode = thread->saved_mode; // if reporting
6846
6847 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != os_atomic_load(&sched_tick, relaxed)) {
6848 SCHED(update_priority)(thread);
6849 }
6850 thread_unlock(thread);
6851 splx(s);
6852
6853 /* now that interrupts are enabled, it is safe to report fail-safe triggers */
6854 if (should_report_failsafe) {
6855 assert((saved_mode & TH_MODE_REALTIME) || (saved_mode & TH_MODE_FIXED));
6856 uint64_t th_id = thread->thread_id;
6857 char th_name[MAXTHREADNAMESIZE] = "unknown";
6858 if (thread_has_thread_name(thread)) {
6859 thread_get_thread_name(thread, th_name);
6860 }
6861 task_t task = get_threadtask(thread);
6862 assert(task != NULL);
6863 const char* t_name = task_best_name(task);
6864 pid_t t_pid = task_pid(task);
6865 const int quanta = (saved_mode & TH_MODE_REALTIME) ? max_unsafe_rt_quanta : max_unsafe_fixed_quanta;
6866 const char* mode = (saved_mode & TH_MODE_REALTIME) ? "realtime" : "fixed";
6867 os_log_error(OS_LOG_DEFAULT, "scheduler: thread %s [%llx] in "
6868 "process %s [%d] triggered fail-safe by spinning for at least %d"
6869 "us at %s priority\n",
6870 th_name,
6871 th_id,
6872 t_name,
6873 t_pid,
6874 quanta * (int) sched_get_quantum_us(),
6875 mode);
6876 }
6877
6878 thread_deallocate(thread);
6879 }
6880
6881 thread_update_count = 0;
6882 }
6883
6884 static boolean_t
runq_scan_thread(thread_t thread,sched_update_scan_context_t scan_context)6885 runq_scan_thread(
6886 thread_t thread,
6887 sched_update_scan_context_t scan_context)
6888 {
6889 assert_thread_magic(thread);
6890
6891 if (thread->sched_stamp != os_atomic_load(&sched_tick, relaxed) &&
6892 thread->sched_mode == TH_MODE_TIMESHARE) {
6893 if (thread_update_add_thread(thread) == FALSE) {
6894 return TRUE;
6895 }
6896 }
6897
6898 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
6899 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
6900 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
6901 }
6902 } else {
6903 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
6904 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
6905 }
6906 }
6907
6908 return FALSE;
6909 }
6910
6911 /*
6912 * Scan a runq for candidate threads.
6913 *
6914 * Returns TRUE if retry is needed.
6915 */
6916 boolean_t
runq_scan(run_queue_t runq,sched_update_scan_context_t scan_context)6917 runq_scan(
6918 run_queue_t runq,
6919 sched_update_scan_context_t scan_context)
6920 {
6921 int count = runq->count;
6922 int queue_index;
6923
6924 assert(count >= 0);
6925
6926 if (count == 0) {
6927 return FALSE;
6928 }
6929
6930 for (queue_index = bitmap_first(runq->bitmap, NRQS);
6931 queue_index >= 0;
6932 queue_index = bitmap_next(runq->bitmap, queue_index)) {
6933 thread_t thread;
6934 circle_queue_t queue = &runq->queues[queue_index];
6935
6936 cqe_foreach_element(thread, queue, runq_links) {
6937 assert(count > 0);
6938 if (runq_scan_thread(thread, scan_context) == TRUE) {
6939 return TRUE;
6940 }
6941 count--;
6942 }
6943 }
6944
6945 return FALSE;
6946 }
6947
6948 #if CONFIG_SCHED_CLUTCH
6949
6950 boolean_t
sched_clutch_timeshare_scan(queue_t thread_queue,uint16_t thread_count,sched_update_scan_context_t scan_context)6951 sched_clutch_timeshare_scan(
6952 queue_t thread_queue,
6953 uint16_t thread_count,
6954 sched_update_scan_context_t scan_context)
6955 {
6956 if (thread_count == 0) {
6957 return FALSE;
6958 }
6959
6960 thread_t thread;
6961 qe_foreach_element_safe(thread, thread_queue, th_clutch_timeshare_link) {
6962 if (runq_scan_thread(thread, scan_context) == TRUE) {
6963 return TRUE;
6964 }
6965 thread_count--;
6966 }
6967
6968 assert(thread_count == 0);
6969 return FALSE;
6970 }
6971
6972
6973 #endif /* CONFIG_SCHED_CLUTCH */
6974
6975 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
6976
6977 bool
thread_is_eager_preempt(thread_t thread)6978 thread_is_eager_preempt(thread_t thread)
6979 {
6980 return thread->sched_flags & TH_SFLAG_EAGERPREEMPT;
6981 }
6982
6983 void
thread_set_eager_preempt(thread_t thread)6984 thread_set_eager_preempt(thread_t thread)
6985 {
6986 spl_t s = splsched();
6987 thread_lock(thread);
6988
6989 assert(!thread_is_eager_preempt(thread));
6990
6991 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
6992
6993 if (thread == current_thread()) {
6994 /* csw_check updates current_is_eagerpreempt on the processor */
6995 ast_t ast = csw_check(thread, current_processor(), AST_NONE);
6996
6997 thread_unlock(thread);
6998
6999 if (ast != AST_NONE) {
7000 thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
7001 }
7002 } else {
7003 processor_t last_processor = thread->last_processor;
7004
7005 if (last_processor != PROCESSOR_NULL &&
7006 last_processor->state == PROCESSOR_RUNNING &&
7007 last_processor->active_thread == thread) {
7008 cause_ast_check(last_processor);
7009 }
7010
7011 thread_unlock(thread);
7012 }
7013
7014 splx(s);
7015 }
7016
7017 void
thread_clear_eager_preempt(thread_t thread)7018 thread_clear_eager_preempt(thread_t thread)
7019 {
7020 spl_t s = splsched();
7021 thread_lock(thread);
7022
7023 assert(thread_is_eager_preempt(thread));
7024
7025 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
7026
7027 if (thread == current_thread()) {
7028 current_processor()->current_is_eagerpreempt = false;
7029 }
7030
7031 thread_unlock(thread);
7032 splx(s);
7033 }
7034
7035 /*
7036 * Scheduling statistics
7037 */
7038 void
sched_stats_handle_csw(processor_t processor,int reasons,int selfpri,int otherpri)7039 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
7040 {
7041 struct sched_statistics *stats;
7042 boolean_t to_realtime = FALSE;
7043
7044 stats = PERCPU_GET_RELATIVE(sched_stats, processor, processor);
7045 stats->csw_count++;
7046
7047 if (otherpri >= BASEPRI_REALTIME) {
7048 stats->rt_sched_count++;
7049 to_realtime = TRUE;
7050 }
7051
7052 if ((reasons & AST_PREEMPT) != 0) {
7053 stats->preempt_count++;
7054
7055 if (selfpri >= BASEPRI_REALTIME) {
7056 stats->preempted_rt_count++;
7057 }
7058
7059 if (to_realtime) {
7060 stats->preempted_by_rt_count++;
7061 }
7062 }
7063 }
7064
7065 void
sched_stats_handle_runq_change(struct runq_stats * stats,int old_count)7066 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
7067 {
7068 uint64_t timestamp = mach_absolute_time();
7069
7070 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
7071 stats->last_change_timestamp = timestamp;
7072 }
7073
7074 /*
7075 * For calls from assembly code
7076 */
7077 #undef thread_wakeup
7078 void
7079 thread_wakeup(
7080 event_t x);
7081
7082 void
thread_wakeup(event_t x)7083 thread_wakeup(
7084 event_t x)
7085 {
7086 thread_wakeup_with_result(x, THREAD_AWAKENED);
7087 }
7088
7089 boolean_t
preemption_enabled(void)7090 preemption_enabled(void)
7091 {
7092 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
7093 }
7094
7095 static void
sched_timer_deadline_tracking_init(void)7096 sched_timer_deadline_tracking_init(void)
7097 {
7098 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
7099 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
7100 }
7101
7102 /*
7103 * Check that all CPUs are successfully powered up in places where that's expected.
7104 */
7105 static void
check_all_cpus_are_done_starting(processor_start_kind_t start_kind)7106 check_all_cpus_are_done_starting(processor_start_kind_t start_kind)
7107 {
7108 /*
7109 * `processor_count` may include registered CPUs above cpus= or cpumask= limit.
7110 * Use machine_info.logical_cpu_max for the CPU IDs that matter.
7111 */
7112 for (int cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
7113 processor_t processor = processor_array[cpu_id];
7114 processor_wait_for_start(processor, start_kind);
7115 }
7116 }
7117
7118 /*
7119 * Find some available online CPU that threads can be enqueued on
7120 *
7121 * Called with the sched_available_cores_lock held
7122 */
7123 static int
sched_last_resort_cpu(void)7124 sched_last_resort_cpu(void)
7125 {
7126 simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
7127
7128 int last_resort_cpu = lsb_first(pcs.pcs_effective.pcs_online_cores);
7129
7130 if (last_resort_cpu == -1) {
7131 panic("no last resort cpu found!");
7132 }
7133
7134 return last_resort_cpu;
7135 }
7136
7137
7138 static void
assert_no_processors_in_transition_locked()7139 assert_no_processors_in_transition_locked()
7140 {
7141 assert(pcs.pcs_in_kernel_sleep == false);
7142
7143 /* All processors must be either running or offline */
7144 assert(pcs.pcs_managed_cores ==
7145 (processor_offline_state_map[PROCESSOR_OFFLINE_RUNNING] |
7146 processor_offline_state_map[PROCESSOR_OFFLINE_FULLY_OFFLINE]));
7147
7148 /* All state transitions must be quiesced at this point */
7149 assert(pcs.pcs_effective.pcs_online_cores ==
7150 processor_offline_state_map[PROCESSOR_OFFLINE_RUNNING]);
7151 }
7152
7153 static struct powered_cores_state
sched_compute_requested_powered_cores()7154 sched_compute_requested_powered_cores()
7155 {
7156 simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
7157
7158 struct powered_cores_state output = {
7159 .pcs_online_cores = pcs.pcs_managed_cores,
7160 .pcs_powerdown_recommended_cores = pcs.pcs_managed_cores,
7161 .pcs_tempdown_cores = 0,
7162 };
7163
7164 if (!pcs.pcs_init_completed) {
7165 return output;
7166 }
7167
7168 /*
7169 * if we unify this with derecommendation, note that only sleep should stop derecommendation,
7170 * not dtrace et al
7171 */
7172 if (pcs.pcs_powerdown_suspend_count) {
7173 return output;
7174 } else {
7175 /*
7176 * The cores power clients like ANE require or
7177 * the kernel cannot offline
7178 */
7179 cpumap_t system_required_powered_cores = pcs.pcs_required_online_pmgr |
7180 pcs.pcs_required_online_system;
7181
7182 cpumap_t online_cores_goal;
7183
7184 if (pcs.pcs_user_online_core_control) {
7185 /* This is our new goal state for powered cores */
7186 output.pcs_powerdown_recommended_cores = pcs.pcs_requested_online_user;
7187 online_cores_goal = pcs.pcs_requested_online_user | system_required_powered_cores;
7188 } else {
7189 /* Remove the cores CLPC wants to power down */
7190 cpumap_t clpc_wanted_powered_cores = pcs.pcs_managed_cores;
7191 clpc_wanted_powered_cores &= pcs.pcs_requested_online_clpc_user;
7192 clpc_wanted_powered_cores &= pcs.pcs_requested_online_clpc_system;
7193
7194 output.pcs_powerdown_recommended_cores = clpc_wanted_powered_cores;
7195 online_cores_goal = clpc_wanted_powered_cores | system_required_powered_cores;
7196
7197 /* Any cores in managed cores that are not in wanted powered become temporary */
7198 output.pcs_tempdown_cores = (pcs.pcs_managed_cores & ~clpc_wanted_powered_cores);
7199
7200 /* Future: Treat CLPC user/system separately. */
7201 }
7202
7203 if (online_cores_goal == 0) {
7204 /*
7205 * If we're somehow trying to disable all CPUs,
7206 * force online the lowest numbered CPU.
7207 */
7208 online_cores_goal = BIT(lsb_first(pcs.pcs_managed_cores));
7209 }
7210
7211 #if RHODES_CLUSTER_POWERDOWN_WORKAROUND
7212 /*
7213 * Because warm CPU boot from WFI is not currently implemented,
7214 * we cannot power down only one CPU in a cluster, so we force up
7215 * all the CPUs in the cluster if any one CPU is up in the cluster.
7216 * Once all CPUs are disabled, then the whole cluster goes down at once.
7217 */
7218
7219 cpumap_t workaround_online_cores = 0;
7220
7221 const ml_topology_info_t* topology = ml_get_topology_info();
7222 for (unsigned int i = 0; i < topology->num_clusters; i++) {
7223 ml_topology_cluster_t* cluster = &topology->clusters[i];
7224 if ((cluster->cpu_mask & online_cores_goal) != 0) {
7225 workaround_online_cores |= cluster->cpu_mask;
7226 }
7227 }
7228
7229 online_cores_goal = workaround_online_cores;
7230 #endif /* RHODES_CLUSTER_POWERDOWN_WORKAROUND */
7231
7232 output.pcs_online_cores = online_cores_goal;
7233 }
7234
7235 return output;
7236 }
7237
7238 static bool
sched_needs_update_requested_powered_cores()7239 sched_needs_update_requested_powered_cores()
7240 {
7241 if (!pcs.pcs_init_completed) {
7242 return false;
7243 }
7244
7245 struct powered_cores_state requested = sched_compute_requested_powered_cores();
7246
7247 struct powered_cores_state effective = pcs.pcs_effective;
7248
7249 if (requested.pcs_powerdown_recommended_cores != effective.pcs_powerdown_recommended_cores ||
7250 requested.pcs_online_cores != effective.pcs_online_cores ||
7251 requested.pcs_tempdown_cores != effective.pcs_tempdown_cores) {
7252 return true;
7253 } else {
7254 return false;
7255 }
7256 }
7257
7258 kern_return_t
sched_processor_exit_user(processor_t processor)7259 sched_processor_exit_user(processor_t processor)
7260 {
7261 assert(processor);
7262
7263 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
7264 assert(preemption_enabled());
7265
7266 kern_return_t result;
7267
7268 spl_t s = splsched();
7269 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7270
7271 if (!enable_processor_exit) {
7272 /* This API is not supported on this device. */
7273 result = KERN_NOT_SUPPORTED;
7274 goto unlock;
7275 }
7276
7277 if (bit_test(pcs.pcs_required_online_system, processor->cpu_id)) {
7278 /* This CPU can never change state outside of sleep. */
7279 result = KERN_NOT_SUPPORTED;
7280 goto unlock;
7281 }
7282
7283 /*
7284 * Future: Instead of failing, simulate the processor
7285 * being shut down via derecommendation and decrementing active count.
7286 */
7287 if (bit_test(pcs.pcs_required_online_pmgr, processor->cpu_id)) {
7288 /* PMGR won't let us power down this CPU right now. */
7289 result = KERN_FAILURE;
7290 goto unlock;
7291 }
7292
7293 if (pcs.pcs_powerdown_suspend_count) {
7294 /* A tool that disables CPU powerdown is active. */
7295 result = KERN_FAILURE;
7296 goto unlock;
7297 }
7298
7299 if (!bit_test(pcs.pcs_requested_online_user, processor->cpu_id)) {
7300 /* The CPU is already powered off by userspace. */
7301 result = KERN_NODE_DOWN;
7302 goto unlock;
7303 }
7304
7305 if ((pcs.pcs_recommended_cores & pcs.pcs_effective.pcs_online_cores) == BIT(processor->cpu_id)) {
7306 /* This is the last available core, can't shut it down. */
7307 result = KERN_RESOURCE_SHORTAGE;
7308 goto unlock;
7309 }
7310
7311 result = KERN_SUCCESS;
7312
7313 if (!pcs.pcs_user_online_core_control) {
7314 pcs.pcs_user_online_core_control = true;
7315 }
7316
7317 bit_clear(pcs.pcs_requested_online_user, processor->cpu_id);
7318
7319 if (sched_needs_update_requested_powered_cores()) {
7320 sched_update_powered_cores_drops_lock(REASON_USER, s);
7321 }
7322
7323 unlock:
7324 simple_unlock(&sched_available_cores_lock);
7325 splx(s);
7326
7327 return result;
7328 }
7329
7330 kern_return_t
sched_processor_start_user(processor_t processor)7331 sched_processor_start_user(processor_t processor)
7332 {
7333 assert(processor);
7334
7335 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
7336 assert(preemption_enabled());
7337
7338 kern_return_t result;
7339
7340 spl_t s = splsched();
7341 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7342
7343 if (!enable_processor_exit) {
7344 result = KERN_NOT_SUPPORTED;
7345 goto unlock;
7346 }
7347
7348 if (bit_test(pcs.pcs_required_online_system, processor->cpu_id)) {
7349 result = KERN_NOT_SUPPORTED;
7350 goto unlock;
7351 }
7352
7353 #if CONFIG_SCHED_SMT
7354 /* Not allowed to start an SMT processor while SMT is disabled */
7355 if ((sched_enable_smt == 0) && (processor->processor_primary != processor)) {
7356 result = KERN_FAILURE;
7357 goto unlock;
7358 }
7359 #endif /* CONFIG_SCHED_SMT */
7360
7361 if (pcs.pcs_powerdown_suspend_count) {
7362 result = KERN_FAILURE;
7363 goto unlock;
7364 }
7365
7366 if (bit_test(pcs.pcs_requested_online_user, processor->cpu_id)) {
7367 result = KERN_FAILURE;
7368 goto unlock;
7369 }
7370
7371 result = KERN_SUCCESS;
7372
7373 bit_set(pcs.pcs_requested_online_user, processor->cpu_id);
7374
7375 /*
7376 * Once the user puts all CPUs back online,
7377 * we can resume automatic cluster power down.
7378 */
7379 if (pcs.pcs_requested_online_user == pcs.pcs_managed_cores) {
7380 pcs.pcs_user_online_core_control = false;
7381 }
7382
7383 if (sched_needs_update_requested_powered_cores()) {
7384 sched_update_powered_cores_drops_lock(REASON_USER, s);
7385 }
7386
7387 unlock:
7388 simple_unlock(&sched_available_cores_lock);
7389 splx(s);
7390
7391 return result;
7392 }
7393
7394 sched_cond_atomic_t sched_update_powered_cores_wakeup;
7395 thread_t sched_update_powered_cores_thread;
7396
7397
7398 static void OS_NORETURN sched_update_powered_cores_continue(void *param __unused, wait_result_t wr __unused);
7399
7400 /*
7401 * After all processors have been ml_processor_register'ed and processor_boot'ed
7402 * the scheduler can finalize its datastructures and allow CPU power state changes.
7403 *
7404 * Enforce that this only happens *once*. More than once is definitely not OK. rdar://121270513
7405 */
7406 void
sched_cpu_init_completed(void)7407 sched_cpu_init_completed(void)
7408 {
7409 static bool sched_cpu_init_completed_called = false;
7410
7411 if (!os_atomic_cmpxchg(&sched_cpu_init_completed_called, false, true, relaxed)) {
7412 panic("sched_cpu_init_completed called twice! %d", sched_cpu_init_completed_called);
7413 }
7414
7415 if (SCHED(cpu_init_completed) != NULL) {
7416 SCHED(cpu_init_completed)();
7417 }
7418
7419 SCHED(rt_init_completed)();
7420
7421 /* Wait for any cpu that is still starting, and enforce that they eventually complete. */
7422 check_all_cpus_are_done_starting(PROCESSOR_FIRST_BOOT);
7423
7424 lck_mtx_lock(&cluster_powerdown_lock);
7425
7426 assert(sched_update_powered_cores_thread == THREAD_NULL);
7427
7428 sched_cond_init(&sched_update_powered_cores_wakeup);
7429
7430 kern_return_t result = kernel_thread_start_priority(
7431 sched_update_powered_cores_continue,
7432 NULL, MAXPRI_KERNEL, &sched_update_powered_cores_thread);
7433 if (result != KERN_SUCCESS) {
7434 panic("failed to create sched_update_powered_cores thread");
7435 }
7436
7437 thread_set_thread_name(sched_update_powered_cores_thread,
7438 "sched_update_powered_cores");
7439
7440 spl_t s = splsched();
7441 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7442
7443 assert(pcs.pcs_init_completed == false);
7444
7445 pcs.pcs_managed_cores = pcs.pcs_effective.pcs_online_cores;
7446
7447 assert(__builtin_popcountll(pcs.pcs_managed_cores) == machine_info.logical_cpu_max);
7448
7449 /* If CLPC tries to cluster power down before this point, it's ignored. */
7450 pcs.pcs_requested_online_user = pcs.pcs_managed_cores;
7451 pcs.pcs_requested_online_clpc_system = pcs.pcs_managed_cores;
7452 pcs.pcs_requested_online_clpc_user = pcs.pcs_managed_cores;
7453
7454 cpumap_t system_required_cores = 0;
7455
7456 /*
7457 * Ask the platform layer which CPUs are allowed to
7458 * be powered off outside of system sleep.
7459 */
7460 for (int cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
7461 if (!ml_cpu_can_exit(cpu_id)) {
7462 bit_set(system_required_cores, cpu_id);
7463 }
7464 }
7465
7466 pcs.pcs_required_online_system = system_required_cores;
7467 pcs.pcs_effective.pcs_powerdown_recommended_cores = pcs.pcs_managed_cores;
7468
7469 pcs.pcs_requested = sched_compute_requested_powered_cores();
7470
7471 assert(pcs.pcs_requested.pcs_powerdown_recommended_cores == pcs.pcs_managed_cores);
7472 assert(pcs.pcs_requested.pcs_online_cores == pcs.pcs_managed_cores);
7473 assert(pcs.pcs_requested.pcs_tempdown_cores == 0);
7474
7475 assert(pcs.pcs_effective.pcs_powerdown_recommended_cores == pcs.pcs_managed_cores);
7476 assert(pcs.pcs_effective.pcs_online_cores == pcs.pcs_managed_cores);
7477 assert(pcs.pcs_effective.pcs_tempdown_cores == 0);
7478
7479 pcs.pcs_init_completed = true;
7480
7481 simple_unlock(&sched_available_cores_lock);
7482 splx(s);
7483
7484 lck_mtx_unlock(&cluster_powerdown_lock);
7485
7486 /* Release the +1 pcs_powerdown_suspend_count that we booted up with. */
7487 resume_cluster_powerdown();
7488 }
7489
7490 bool
sched_is_in_sleep(void)7491 sched_is_in_sleep(void)
7492 {
7493 return pcs.pcs_in_kernel_sleep || pcs.pcs_wants_kernel_sleep;
7494 }
7495
7496 bool
sched_is_cpu_init_completed(void)7497 sched_is_cpu_init_completed(void)
7498 {
7499 return pcs.pcs_init_completed;
7500 }
7501
7502 processor_reason_t last_sched_update_powered_cores_continue_reason;
7503
7504 static void OS_NORETURN
sched_update_powered_cores_continue(void * param __unused,wait_result_t wr __unused)7505 sched_update_powered_cores_continue(void *param __unused, wait_result_t wr __unused)
7506 {
7507 sched_cond_ack(&sched_update_powered_cores_wakeup);
7508
7509 while (true) {
7510 lck_mtx_lock(&cluster_powerdown_lock);
7511
7512 spl_t s = splsched();
7513 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7514
7515 bool needs_update = sched_needs_update_requested_powered_cores();
7516
7517 if (needs_update) {
7518 /* This thread shouldn't need to make changes while powerdown is suspended */
7519 assert(pcs.pcs_powerdown_suspend_count == 0);
7520
7521 processor_reason_t reason = last_sched_update_powered_cores_continue_reason;
7522
7523 sched_update_powered_cores_drops_lock(reason, s);
7524 }
7525
7526 simple_unlock(&sched_available_cores_lock);
7527 splx(s);
7528
7529 lck_mtx_unlock(&cluster_powerdown_lock);
7530
7531 /* If we did an update, we dropped the lock, so check again. */
7532
7533 if (!needs_update) {
7534 sched_cond_wait(&sched_update_powered_cores_wakeup, THREAD_UNINT,
7535 sched_update_powered_cores_continue);
7536 /* The condition was signaled since we last blocked, check again. */
7537 }
7538 }
7539 }
7540
7541 __options_decl(sched_powered_cores_flags_t, uint32_t, {
7542 ASSERT_IN_SLEEP = 0x10000000,
7543 ASSERT_POWERDOWN_SUSPENDED = 0x20000000,
7544 POWERED_CORES_OPTIONS_MASK = ASSERT_IN_SLEEP | ASSERT_POWERDOWN_SUSPENDED,
7545 });
7546
7547 /*
7548 * This is KPI with CLPC.
7549 */
7550 void
sched_perfcontrol_update_powered_cores(uint64_t requested_powered_cores,processor_reason_t reason,__unused uint32_t flags)7551 sched_perfcontrol_update_powered_cores(
7552 uint64_t requested_powered_cores,
7553 processor_reason_t reason,
7554 __unused uint32_t flags)
7555 {
7556 assert((reason == REASON_CLPC_SYSTEM) || (reason == REASON_CLPC_USER));
7557
7558 #if DEVELOPMENT || DEBUG
7559 if (flags & (ASSERT_IN_SLEEP | ASSERT_POWERDOWN_SUSPENDED)) {
7560 if (flags & ASSERT_POWERDOWN_SUSPENDED) {
7561 assert(pcs.pcs_powerdown_suspend_count > 0);
7562 }
7563 if (flags & ASSERT_IN_SLEEP) {
7564 assert(pcs.pcs_sleep_override_recommended == true);
7565 }
7566 return;
7567 }
7568 #endif
7569
7570 spl_t s = splsched();
7571 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7572
7573 cpumap_t requested_cores = requested_powered_cores & pcs.pcs_managed_cores;
7574
7575 if (reason == REASON_CLPC_SYSTEM) {
7576 pcs.pcs_requested_online_clpc_system = requested_cores;
7577 } else if (reason == REASON_CLPC_USER) {
7578 pcs.pcs_requested_online_clpc_user = requested_cores;
7579 }
7580
7581 bool needs_update = sched_needs_update_requested_powered_cores();
7582
7583 if (needs_update) {
7584 last_sched_update_powered_cores_continue_reason = reason;
7585 }
7586
7587 simple_unlock(&sched_available_cores_lock);
7588 splx(s);
7589
7590 if (needs_update) {
7591 sched_cond_signal(&sched_update_powered_cores_wakeup,
7592 sched_update_powered_cores_thread);
7593 }
7594 }
7595
7596 /*
7597 * The performance controller invokes this method to reevaluate a thread
7598 * placement on the processor cpu_id when the per-core timer expires to force
7599 * a preemption if necessary.
7600 */
7601 bool
sched_perfcontrol_check_oncore_thread_preemption(__unused uint64_t flags,int cpu_id __assert_only)7602 sched_perfcontrol_check_oncore_thread_preemption(
7603 __unused uint64_t flags,
7604 int cpu_id __assert_only)
7605 {
7606 bool ret = false;
7607 assert(ml_get_interrupts_enabled() == false);
7608
7609 processor_t processor = current_processor();
7610 thread_t thread = current_thread();
7611 assert(processor->cpu_id == cpu_id);
7612
7613 thread_lock(thread);
7614 ast_t preempt = csw_check(thread, processor, AST_NONE);
7615 if (preempt != AST_NONE) {
7616 /*
7617 * TODO: Returning true here is best effort and isn't guaranteed to preempt the thread since thread_select can
7618 * choose to leave the thread on the same processor. Consider using the flags passed in here to callback into
7619 * CLPC before the next scheduling decision point (or sampler tick) if this decision needs to be reevaluated or
7620 * to otherwise adjust this behavior.
7621 */
7622 ret = true;
7623 ast_on(preempt);
7624 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_ONCORE_PREEMPT), thread_tid(thread), processor->cpu_id, 0, 0, 0);
7625 }
7626 thread_unlock(thread);
7627
7628 return ret;
7629 }
7630
7631 /*
7632 * This doesn't just suspend cluster powerdown.
7633 * It also powers up all the cores and leaves them up,
7634 * even if some user wanted them down.
7635 * This is important because dtrace, monotonic, and others can't handle any
7636 * powered down cores, not just cluster powerdown.
7637 */
7638 static void
suspend_cluster_powerdown_locked(bool for_sleep)7639 suspend_cluster_powerdown_locked(bool for_sleep)
7640 {
7641 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
7642 kprintf("%s>calling sched_update_powered_cores to suspend powerdown\n", __func__);
7643
7644 spl_t s = splsched();
7645 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7646
7647 assert(pcs.pcs_powerdown_suspend_count >= 0);
7648
7649 if (for_sleep) {
7650 assert(!pcs.pcs_wants_kernel_sleep);
7651 assert(!pcs.pcs_in_kernel_sleep);
7652 pcs.pcs_wants_kernel_sleep = true;
7653 }
7654
7655 pcs.pcs_powerdown_suspend_count++;
7656
7657 if (sched_needs_update_requested_powered_cores()) {
7658 sched_update_powered_cores_drops_lock(REASON_SYSTEM, s);
7659 }
7660
7661 if (for_sleep) {
7662 assert(pcs.pcs_wants_kernel_sleep);
7663 assert(!pcs.pcs_in_kernel_sleep);
7664 pcs.pcs_in_kernel_sleep = true;
7665
7666 assert(sched_needs_update_requested_powered_cores() == false);
7667 }
7668
7669 simple_unlock(&sched_available_cores_lock);
7670 splx(s);
7671
7672 if (pcs.pcs_init_completed) {
7673 /* At this point, no cpu should be still starting. Let's enforce that. */
7674 check_all_cpus_are_done_starting(for_sleep ?
7675 PROCESSOR_BEFORE_ENTERING_SLEEP : PROCESSOR_CLUSTER_POWERDOWN_SUSPEND);
7676 }
7677 }
7678
7679 static void
resume_cluster_powerdown_locked(bool for_sleep)7680 resume_cluster_powerdown_locked(bool for_sleep)
7681 {
7682 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
7683
7684 if (pcs.pcs_init_completed) {
7685 /* At this point, no cpu should be still starting. Let's enforce that. */
7686 check_all_cpus_are_done_starting(for_sleep ?
7687 PROCESSOR_WAKE_FROM_SLEEP : PROCESSOR_CLUSTER_POWERDOWN_RESUME);
7688 }
7689
7690 kprintf("%s>calling sched_update_powered_cores to resume powerdown\n", __func__);
7691
7692 spl_t s = splsched();
7693 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7694
7695 if (pcs.pcs_powerdown_suspend_count <= 0) {
7696 panic("resume_cluster_powerdown() called with pcs.pcs_powerdown_suspend_count=%d\n", pcs.pcs_powerdown_suspend_count);
7697 }
7698
7699 if (for_sleep) {
7700 assert(pcs.pcs_wants_kernel_sleep);
7701 assert(pcs.pcs_in_kernel_sleep);
7702 pcs.pcs_wants_kernel_sleep = false;
7703 }
7704
7705 pcs.pcs_powerdown_suspend_count--;
7706
7707 if (pcs.pcs_powerdown_suspend_count == 0) {
7708 /* Returning to client controlled powerdown mode */
7709 assert(pcs.pcs_init_completed);
7710
7711 /* To match previous behavior, clear the user state */
7712 pcs.pcs_requested_online_user = pcs.pcs_managed_cores;
7713 pcs.pcs_user_online_core_control = false;
7714
7715 /* To match previous behavior, clear the requested CLPC state. */
7716 pcs.pcs_requested_online_clpc_user = pcs.pcs_managed_cores;
7717 pcs.pcs_requested_online_clpc_system = pcs.pcs_managed_cores;
7718 }
7719
7720 if (sched_needs_update_requested_powered_cores()) {
7721 sched_update_powered_cores_drops_lock(REASON_SYSTEM, s);
7722 }
7723
7724 if (for_sleep) {
7725 assert(!pcs.pcs_wants_kernel_sleep);
7726 assert(pcs.pcs_in_kernel_sleep);
7727 pcs.pcs_in_kernel_sleep = false;
7728
7729 assert(sched_needs_update_requested_powered_cores() == false);
7730 }
7731
7732 simple_unlock(&sched_available_cores_lock);
7733 splx(s);
7734 }
7735
7736 static uint64_t
die_and_cluster_to_cpu_mask(__unused unsigned int die_id,__unused unsigned int die_cluster_id)7737 die_and_cluster_to_cpu_mask(
7738 __unused unsigned int die_id,
7739 __unused unsigned int die_cluster_id)
7740 {
7741 #if __arm__ || __arm64__
7742 const ml_topology_info_t* topology = ml_get_topology_info();
7743 unsigned int num_clusters = topology->num_clusters;
7744 for (unsigned int i = 0; i < num_clusters; i++) {
7745 ml_topology_cluster_t* cluster = &topology->clusters[i];
7746 if ((cluster->die_id == die_id) &&
7747 (cluster->die_cluster_id == die_cluster_id)) {
7748 return cluster->cpu_mask;
7749 }
7750 }
7751 #endif
7752 return 0ull;
7753 }
7754
7755 /*
7756 * Take an assertion that ensures all CPUs in the cluster are powered up until
7757 * the assertion is released.
7758 * A system suspend will still power down the CPUs.
7759 * This call will stall if system suspend is in progress.
7760 *
7761 * Future ER: Could this just power up the cluster, and leave enabling the
7762 * processors to be asynchronous, or deferred?
7763 *
7764 * Enabling the rail is synchronous, it must be powered up before returning.
7765 */
7766 void
sched_enable_acc_rail(unsigned int die_id,unsigned int die_cluster_id)7767 sched_enable_acc_rail(unsigned int die_id, unsigned int die_cluster_id)
7768 {
7769 uint64_t core_mask = die_and_cluster_to_cpu_mask(die_id, die_cluster_id);
7770
7771 lck_mtx_lock(&cluster_powerdown_lock);
7772
7773 /*
7774 * Note: if pcs.pcs_init_completed is false, because the
7775 * CPUs have not booted yet, then we assume that all
7776 * clusters are already powered up at boot (see IOCPUInitialize)
7777 * so we don't have to wait for cpu boot to complete.
7778 * We'll still save the requested assertion and enforce it after
7779 * boot completes.
7780 */
7781
7782 spl_t s = splsched();
7783 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7784
7785 if (pcs.pcs_init_completed) {
7786 assert3u(pcs.pcs_managed_cores & core_mask, ==, core_mask);
7787 }
7788
7789 /* Can't enable something that is already enabled */
7790 assert((pcs.pcs_required_online_pmgr & core_mask) == 0);
7791
7792 pcs.pcs_required_online_pmgr |= core_mask;
7793
7794 if (sched_needs_update_requested_powered_cores()) {
7795 sched_update_powered_cores_drops_lock(REASON_PMGR_SYSTEM, s);
7796 }
7797
7798 simple_unlock(&sched_available_cores_lock);
7799 splx(s);
7800
7801 lck_mtx_unlock(&cluster_powerdown_lock);
7802 }
7803
7804 /*
7805 * Release the assertion ensuring the cluster is powered up.
7806 * This operation is asynchronous, so PMGR doesn't need to wait until it takes
7807 * effect. If the enable comes in before it takes effect, it'll either
7808 * wait on the lock, or the async thread will discover it needs no update.
7809 */
7810 void
sched_disable_acc_rail(unsigned int die_id,unsigned int die_cluster_id)7811 sched_disable_acc_rail(unsigned int die_id, unsigned int die_cluster_id)
7812 {
7813 uint64_t core_mask = die_and_cluster_to_cpu_mask(die_id, die_cluster_id);
7814
7815 spl_t s = splsched();
7816 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7817
7818 /* Can't disable something that is already disabled */
7819 assert((pcs.pcs_required_online_pmgr & core_mask) == core_mask);
7820
7821 if (pcs.pcs_init_completed) {
7822 assert3u(pcs.pcs_managed_cores & core_mask, ==, core_mask);
7823 }
7824
7825 pcs.pcs_required_online_pmgr &= ~core_mask;
7826
7827 bool needs_update = sched_needs_update_requested_powered_cores();
7828
7829 if (needs_update) {
7830 last_sched_update_powered_cores_continue_reason = REASON_PMGR_SYSTEM;
7831 }
7832
7833 simple_unlock(&sched_available_cores_lock);
7834 splx(s);
7835
7836 if (needs_update) {
7837 sched_cond_signal(&sched_update_powered_cores_wakeup,
7838 sched_update_powered_cores_thread);
7839 }
7840 }
7841
7842 void
suspend_cluster_powerdown(void)7843 suspend_cluster_powerdown(void)
7844 {
7845 lck_mtx_lock(&cluster_powerdown_lock);
7846 suspend_cluster_powerdown_locked(false);
7847 lck_mtx_unlock(&cluster_powerdown_lock);
7848 }
7849
7850 void
resume_cluster_powerdown(void)7851 resume_cluster_powerdown(void)
7852 {
7853 lck_mtx_lock(&cluster_powerdown_lock);
7854 resume_cluster_powerdown_locked(false);
7855 lck_mtx_unlock(&cluster_powerdown_lock);
7856
7857 #if CONFIG_SCHED_SMT
7858 if (sched_enable_smt == 0) {
7859 enable_smt_processors(false);
7860 }
7861 #endif /* CONFIG_SCHED_SMT */
7862 }
7863
7864
7865 LCK_MTX_DECLARE(user_cluster_powerdown_lock, &cluster_powerdown_grp);
7866 static bool user_suspended_cluster_powerdown = false;
7867
7868 kern_return_t
suspend_cluster_powerdown_from_user(void)7869 suspend_cluster_powerdown_from_user(void)
7870 {
7871 kern_return_t ret = KERN_FAILURE;
7872
7873 lck_mtx_lock(&user_cluster_powerdown_lock);
7874
7875 if (!user_suspended_cluster_powerdown) {
7876 suspend_cluster_powerdown();
7877 user_suspended_cluster_powerdown = true;
7878 ret = KERN_SUCCESS;
7879 }
7880
7881 lck_mtx_unlock(&user_cluster_powerdown_lock);
7882
7883 return ret;
7884 }
7885
7886 kern_return_t
resume_cluster_powerdown_from_user(void)7887 resume_cluster_powerdown_from_user(void)
7888 {
7889 kern_return_t ret = KERN_FAILURE;
7890
7891 lck_mtx_lock(&user_cluster_powerdown_lock);
7892
7893 if (user_suspended_cluster_powerdown) {
7894 resume_cluster_powerdown();
7895 user_suspended_cluster_powerdown = false;
7896 ret = KERN_SUCCESS;
7897 }
7898
7899 lck_mtx_unlock(&user_cluster_powerdown_lock);
7900
7901 return ret;
7902 }
7903
7904 int
get_cluster_powerdown_user_suspended(void)7905 get_cluster_powerdown_user_suspended(void)
7906 {
7907 lck_mtx_lock(&user_cluster_powerdown_lock);
7908
7909 int ret = (int)user_suspended_cluster_powerdown;
7910
7911 lck_mtx_unlock(&user_cluster_powerdown_lock);
7912
7913 return ret;
7914 }
7915
7916 #if DEVELOPMENT || DEBUG
7917 /* Functions to support the temporary sysctl */
7918 static uint64_t saved_requested_powered_cores = ALL_CORES_POWERED;
7919 void
sched_set_powered_cores(int requested_powered_cores)7920 sched_set_powered_cores(int requested_powered_cores)
7921 {
7922 processor_reason_t reason = bit_test(requested_powered_cores, 31) ? REASON_CLPC_USER : REASON_CLPC_SYSTEM;
7923 sched_powered_cores_flags_t flags = requested_powered_cores & POWERED_CORES_OPTIONS_MASK;
7924
7925 saved_requested_powered_cores = requested_powered_cores;
7926
7927 requested_powered_cores = bits(requested_powered_cores, 28, 0);
7928
7929 sched_perfcontrol_update_powered_cores(requested_powered_cores, reason, flags);
7930 }
7931 int
sched_get_powered_cores(void)7932 sched_get_powered_cores(void)
7933 {
7934 return (int)saved_requested_powered_cores;
7935 }
7936
7937 uint64_t
sched_sysctl_get_recommended_cores(void)7938 sched_sysctl_get_recommended_cores(void)
7939 {
7940 return pcs.pcs_recommended_cores;
7941 }
7942 #endif
7943
7944 /*
7945 * Ensure that all cores are powered and recommended before sleep
7946 * Acquires cluster_powerdown_lock and returns with it held.
7947 */
7948 void
sched_override_available_cores_for_sleep(void)7949 sched_override_available_cores_for_sleep(void)
7950 {
7951 if (!pcs.pcs_init_completed) {
7952 panic("Attempting to sleep before all CPUS are registered");
7953 }
7954
7955 lck_mtx_lock(&cluster_powerdown_lock);
7956
7957 spl_t s = splsched();
7958 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7959
7960 assert(pcs.pcs_sleep_override_recommended == false);
7961
7962 pcs.pcs_sleep_override_recommended = true;
7963 sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
7964
7965 simple_unlock(&sched_available_cores_lock);
7966 splx(s);
7967
7968 suspend_cluster_powerdown_locked(true);
7969 }
7970
7971 /*
7972 * Restore the previously recommended cores, but leave all cores powered
7973 * after sleep.
7974 * Called with cluster_powerdown_lock still held, releases the lock.
7975 */
7976 void
sched_restore_available_cores_after_sleep(void)7977 sched_restore_available_cores_after_sleep(void)
7978 {
7979 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
7980
7981 spl_t s = splsched();
7982 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7983 assert(pcs.pcs_sleep_override_recommended == true);
7984
7985 pcs.pcs_sleep_override_recommended = false;
7986 sched_update_recommended_cores_locked(REASON_NONE, 0);
7987
7988 simple_unlock(&sched_available_cores_lock);
7989 splx(s);
7990
7991 resume_cluster_powerdown_locked(true);
7992
7993 lck_mtx_unlock(&cluster_powerdown_lock);
7994
7995 #if CONFIG_SCHED_SMT
7996 if (sched_enable_smt == 0) {
7997 enable_smt_processors(false);
7998 }
7999 #endif /* CONFIG_SCHED_SMT */
8000 }
8001
8002 #if __arm__ || __arm64__
8003
8004 uint64_t perfcontrol_failsafe_maintenance_runnable_time;
8005 uint64_t perfcontrol_failsafe_activation_time;
8006 uint64_t perfcontrol_failsafe_deactivation_time;
8007
8008 /* data covering who likely caused it and how long they ran */
8009 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
8010 char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
8011 int perfcontrol_failsafe_pid;
8012 uint64_t perfcontrol_failsafe_tid;
8013 uint64_t perfcontrol_failsafe_thread_timer_at_start;
8014 uint64_t perfcontrol_failsafe_thread_timer_last_seen;
8015 uint64_t perfcontrol_failsafe_recommended_at_trigger;
8016
8017 /*
8018 * Perf controller calls here to update the recommended core bitmask.
8019 * If the failsafe is active, we don't immediately apply the new value.
8020 * Instead, we store the new request and use it after the failsafe deactivates.
8021 *
8022 * If the failsafe is not active, immediately apply the update.
8023 *
8024 * No scheduler locks are held, no other locks are held that scheduler might depend on,
8025 * interrupts are enabled
8026 *
8027 * currently prototype is in osfmk/arm/machine_routines.h
8028 */
8029 void
sched_perfcontrol_update_recommended_cores_reason(uint64_t recommended_cores,processor_reason_t reason,__unused uint32_t flags)8030 sched_perfcontrol_update_recommended_cores_reason(
8031 uint64_t recommended_cores,
8032 processor_reason_t reason,
8033 __unused uint32_t flags)
8034 {
8035 assert(preemption_enabled());
8036
8037 spl_t s = splsched();
8038 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8039
8040 if (reason == REASON_CLPC_SYSTEM) {
8041 pcs.pcs_requested_recommended_clpc_system = recommended_cores;
8042 } else {
8043 assert(reason == REASON_CLPC_USER);
8044 pcs.pcs_requested_recommended_clpc_user = recommended_cores;
8045 }
8046
8047 pcs.pcs_requested_recommended_clpc = pcs.pcs_requested_recommended_clpc_system &
8048 pcs.pcs_requested_recommended_clpc_user;
8049
8050 sysctl_sched_recommended_cores = pcs.pcs_requested_recommended_clpc;
8051
8052 sched_update_recommended_cores_locked(reason, 0);
8053
8054 simple_unlock(&sched_available_cores_lock);
8055 splx(s);
8056 }
8057
8058 void
sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)8059 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
8060 {
8061 sched_perfcontrol_update_recommended_cores_reason(recommended_cores, REASON_CLPC_USER, 0);
8062 }
8063
8064 /*
8065 * Consider whether we need to activate the recommended cores failsafe
8066 *
8067 * Called from quantum timer interrupt context of a realtime thread
8068 * No scheduler locks are held, interrupts are disabled
8069 */
8070 void
sched_consider_recommended_cores(uint64_t ctime,thread_t cur_thread)8071 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
8072 {
8073 /*
8074 * Check if a realtime thread is starving the system
8075 * and bringing up non-recommended cores would help
8076 *
8077 * TODO: Is this the correct check for recommended == possible cores?
8078 * TODO: Validate the checks without the relevant lock are OK.
8079 */
8080
8081 if (__improbable(pcs.pcs_recommended_clpc_failsafe_active)) {
8082 /* keep track of how long the responsible thread runs */
8083 uint64_t cur_th_time = recount_current_thread_time_mach();
8084
8085 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8086
8087 if (pcs.pcs_recommended_clpc_failsafe_active &&
8088 cur_thread->thread_id == perfcontrol_failsafe_tid) {
8089 perfcontrol_failsafe_thread_timer_last_seen = cur_th_time;
8090 }
8091
8092 simple_unlock(&sched_available_cores_lock);
8093
8094 /* we're already trying to solve the problem, so bail */
8095 return;
8096 }
8097
8098 /* The failsafe won't help if there are no more processors to enable */
8099 if (__probable(bit_count(pcs.pcs_requested_recommended_clpc) >= processor_count)) {
8100 return;
8101 }
8102
8103 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
8104
8105 /* Use the maintenance thread as our canary in the coal mine */
8106 thread_t m_thread = sched_maintenance_thread;
8107
8108 /* If it doesn't look bad, nothing to see here */
8109 if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
8110 return;
8111 }
8112
8113 /* It looks bad, take the lock to be sure */
8114 thread_lock(m_thread);
8115
8116 if (thread_get_runq(m_thread) == PROCESSOR_NULL ||
8117 (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
8118 m_thread->last_made_runnable_time >= too_long_ago) {
8119 /*
8120 * Maintenance thread is either on cpu or blocked, and
8121 * therefore wouldn't benefit from more cores
8122 */
8123 thread_unlock(m_thread);
8124 return;
8125 }
8126
8127 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
8128
8129 thread_unlock(m_thread);
8130
8131 /*
8132 * There are cores disabled at perfcontrol's recommendation, but the
8133 * system is so overloaded that the maintenance thread can't run.
8134 * That likely means that perfcontrol can't run either, so it can't fix
8135 * the recommendation. We have to kick in a failsafe to keep from starving.
8136 *
8137 * When the maintenance thread has been starved for too long,
8138 * ignore the recommendation from perfcontrol and light up all the cores.
8139 *
8140 * TODO: Consider weird states like boot, sleep, or debugger
8141 */
8142
8143 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8144
8145 if (pcs.pcs_recommended_clpc_failsafe_active) {
8146 simple_unlock(&sched_available_cores_lock);
8147 return;
8148 }
8149
8150 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8151 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
8152 pcs.pcs_requested_recommended_clpc, maintenance_runnable_time, 0, 0, 0);
8153
8154 pcs.pcs_recommended_clpc_failsafe_active = true;
8155 perfcontrol_failsafe_activation_time = mach_absolute_time();
8156 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
8157 perfcontrol_failsafe_recommended_at_trigger = pcs.pcs_requested_recommended_clpc;
8158
8159 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
8160 task_t task = get_threadtask(cur_thread);
8161 perfcontrol_failsafe_pid = task_pid(task);
8162 strlcpy(perfcontrol_failsafe_name, proc_name_address(get_bsdtask_info(task)), sizeof(perfcontrol_failsafe_name));
8163
8164 perfcontrol_failsafe_tid = cur_thread->thread_id;
8165
8166 /* Blame the thread for time it has run recently */
8167 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
8168
8169 uint64_t last_seen = recount_current_thread_time_mach();
8170
8171 /* Compute the start time of the bad behavior in terms of the thread's on core time */
8172 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
8173 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
8174
8175 /* Publish the pcs_recommended_clpc_failsafe_active override to the CPUs */
8176 sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
8177
8178 simple_unlock(&sched_available_cores_lock);
8179 }
8180
8181 /*
8182 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
8183 *
8184 * Runs in the context of the maintenance thread, no locks held
8185 */
8186 static void
sched_recommended_cores_maintenance(void)8187 sched_recommended_cores_maintenance(void)
8188 {
8189 /* Common case - no failsafe, nothing to be done here */
8190 if (__probable(!pcs.pcs_recommended_clpc_failsafe_active)) {
8191 return;
8192 }
8193
8194 uint64_t ctime = mach_absolute_time();
8195
8196 boolean_t print_diagnostic = FALSE;
8197 char p_name[FAILSAFE_NAME_LEN] = "";
8198
8199 spl_t s = splsched();
8200 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8201
8202 /* Check again, under the lock, to avoid races */
8203 if (!pcs.pcs_recommended_clpc_failsafe_active) {
8204 goto out;
8205 }
8206
8207 /*
8208 * Ensure that the other cores get another few ticks to run some threads
8209 * If we don't have this hysteresis, the maintenance thread is the first
8210 * to run, and then it immediately kills the other cores
8211 */
8212 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
8213 goto out;
8214 }
8215
8216 /* Capture some diagnostic state under the lock so we can print it out later */
8217
8218 int pid = perfcontrol_failsafe_pid;
8219 uint64_t tid = perfcontrol_failsafe_tid;
8220
8221 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
8222 perfcontrol_failsafe_thread_timer_at_start;
8223 uint64_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
8224 uint64_t rec_cores_after = pcs.pcs_requested_recommended_clpc;
8225 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
8226 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
8227
8228 print_diagnostic = TRUE;
8229
8230 /* Deactivate the failsafe and reinstate the requested recommendation settings */
8231
8232 perfcontrol_failsafe_deactivation_time = ctime;
8233 pcs.pcs_recommended_clpc_failsafe_active = false;
8234
8235 sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
8236
8237 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8238 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
8239 pcs.pcs_requested_recommended_clpc, failsafe_duration, 0, 0, 0);
8240
8241 out:
8242 simple_unlock(&sched_available_cores_lock);
8243 splx(s);
8244
8245 if (print_diagnostic) {
8246 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
8247
8248 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
8249 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
8250
8251 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
8252 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
8253
8254 printf("recommended core failsafe kicked in for %lld ms "
8255 "likely due to %s[%d] thread 0x%llx spending "
8256 "%lld ms on cpu at realtime priority - "
8257 "new recommendation: 0x%llx -> 0x%llx\n",
8258 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
8259 rec_cores_before, rec_cores_after);
8260 }
8261 }
8262
8263 #endif /* __arm64__ */
8264
8265 /*
8266 * This is true before we have jumped to kernel_bootstrap_thread
8267 * first thread context during boot, or while all processors
8268 * have offlined during system sleep and the scheduler is disabled.
8269 *
8270 * (Note: only ever true on ARM, Intel doesn't actually offline the last CPU)
8271 */
8272 bool
sched_all_cpus_offline(void)8273 sched_all_cpus_offline(void)
8274 {
8275 return pcs.pcs_effective.pcs_online_cores == 0;
8276 }
8277
8278 void
sched_assert_not_last_online_cpu(__assert_only int cpu_id)8279 sched_assert_not_last_online_cpu(__assert_only int cpu_id)
8280 {
8281 assertf(pcs.pcs_effective.pcs_online_cores != BIT(cpu_id),
8282 "attempting to shut down the last online CPU!");
8283 }
8284
8285 /*
8286 * This is the unified single function to change published active core counts based on processor mode.
8287 * Each type of flag affects the other in terms of how the counts change.
8288 *
8289 * Future: Add support for not decrementing counts in 'temporary derecommended online' mode
8290 * Future: Shutdown for system sleep should be 'temporary' according to the user counts
8291 * so that no client sees a transiently low number of CPUs.
8292 */
8293 void
sched_processor_change_mode_locked(processor_t processor,processor_mode_t pcm_mode,bool set)8294 sched_processor_change_mode_locked(processor_t processor, processor_mode_t pcm_mode, bool set)
8295 {
8296 simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
8297 pset_assert_locked(processor->processor_set);
8298
8299 switch (pcm_mode) {
8300 case PCM_RECOMMENDED:
8301 if (set) {
8302 assert(!processor->is_recommended);
8303 assert(!bit_test(pcs.pcs_recommended_cores, processor->cpu_id));
8304
8305 processor->is_recommended = true;
8306 bit_set(pcs.pcs_recommended_cores, processor->cpu_id);
8307
8308 if (processor->processor_online) {
8309 os_atomic_inc(&processor_avail_count_user, relaxed);
8310 #if CONFIG_SCHED_SMT
8311 if (processor->processor_primary == processor) {
8312 os_atomic_inc(&primary_processor_avail_count_user, relaxed);
8313 }
8314 #endif /* CONFIG_SCHED_SMT */
8315 }
8316 } else {
8317 assert(processor->is_recommended);
8318 assert(bit_test(pcs.pcs_recommended_cores, processor->cpu_id));
8319
8320 processor->is_recommended = false;
8321 bit_clear(pcs.pcs_recommended_cores, processor->cpu_id);
8322
8323 if (processor->processor_online) {
8324 os_atomic_dec(&processor_avail_count_user, relaxed);
8325 #if CONFIG_SCHED_SMT
8326 if (processor->processor_primary == processor) {
8327 os_atomic_dec(&primary_processor_avail_count_user, relaxed);
8328 }
8329 #endif /* CONFIG_SCHED_SMT */
8330 }
8331 }
8332 break;
8333 case PCM_TEMPORARY:
8334 if (set) {
8335 assert(!processor->shutdown_temporary);
8336 assert(!bit_test(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id));
8337
8338 processor->shutdown_temporary = true;
8339 bit_set(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id);
8340
8341 if (!processor->processor_online) {
8342 goto counts_up;
8343 }
8344 } else {
8345 assert(processor->shutdown_temporary);
8346 assert(bit_test(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id));
8347
8348 processor->shutdown_temporary = false;
8349 bit_clear(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id);
8350
8351 if (!processor->processor_online) {
8352 goto counts_down;
8353 }
8354 }
8355 break;
8356 case PCM_ONLINE:
8357 if (set) {
8358 assert(!processor->processor_online);
8359 assert(!bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
8360 processor->processor_online = true;
8361 bit_set(pcs.pcs_effective.pcs_online_cores, processor->cpu_id);
8362
8363 if (!processor->shutdown_temporary) {
8364 goto counts_up;
8365 }
8366 } else {
8367 assert(processor->processor_online);
8368 assert(bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
8369 processor->processor_online = false;
8370 bit_clear(pcs.pcs_effective.pcs_online_cores, processor->cpu_id);
8371
8372 if (!processor->shutdown_temporary) {
8373 goto counts_down;
8374 }
8375 }
8376 break;
8377 default:
8378 panic("unknown mode %d", pcm_mode);
8379 }
8380
8381 return;
8382
8383 counts_up:
8384 ml_cpu_up_update_counts(processor->cpu_id);
8385
8386 os_atomic_inc(&processor_avail_count, relaxed);
8387
8388 if (processor->is_recommended) {
8389 os_atomic_inc(&processor_avail_count_user, relaxed);
8390 #if CONFIG_SCHED_SMT
8391 if (processor->processor_primary == processor) {
8392 os_atomic_inc(&primary_processor_avail_count_user, relaxed);
8393 }
8394 #endif /* CONFIG_SCHED_SMT */
8395 }
8396 commpage_update_active_cpus();
8397
8398 return;
8399
8400 counts_down:
8401 ml_cpu_down_update_counts(processor->cpu_id);
8402
8403 os_atomic_dec(&processor_avail_count, relaxed);
8404
8405 if (processor->is_recommended) {
8406 os_atomic_dec(&processor_avail_count_user, relaxed);
8407 #if CONFIG_SCHED_SMT
8408 if (processor->processor_primary == processor) {
8409 os_atomic_dec(&primary_processor_avail_count_user, relaxed);
8410 }
8411 #endif /* CONFIG_SCHED_SMT */
8412 }
8413 commpage_update_active_cpus();
8414
8415 return;
8416 }
8417
8418 bool
sched_mark_processor_online(processor_t processor,__assert_only processor_reason_t reason)8419 sched_mark_processor_online(processor_t processor, __assert_only processor_reason_t reason)
8420 {
8421 assert(processor == current_processor());
8422
8423 processor_set_t pset = processor->processor_set;
8424
8425 spl_t s = splsched();
8426 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8427 pset_lock(pset);
8428
8429 /* Boot CPU coming online for the first time, either at boot or after sleep */
8430 bool is_first_online_processor = sched_all_cpus_offline();
8431 if (is_first_online_processor) {
8432 assert(processor == master_processor);
8433 }
8434
8435 assert((processor != master_processor) || (reason == REASON_SYSTEM) || support_bootcpu_shutdown);
8436
8437 sched_processor_change_mode_locked(processor, PCM_ONLINE, true);
8438
8439 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_STARTING ||
8440 processor->processor_offline_state == PROCESSOR_OFFLINE_STARTED_NOT_RUNNING ||
8441 processor->processor_offline_state == PROCESSOR_OFFLINE_FINAL_SYSTEM_SLEEP);
8442
8443 processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_STARTED_NOT_WAITED);
8444
8445 ++pset->online_processor_count;
8446 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
8447
8448 if (processor->is_recommended) {
8449 SCHED(pset_made_schedulable)(processor, pset, false); /* May relock the pset lock */
8450 }
8451 pset_unlock(pset);
8452
8453 smr_cpu_up(processor, SMR_CPU_REASON_OFFLINE);
8454
8455 simple_unlock(&sched_available_cores_lock);
8456 splx(s);
8457
8458 return is_first_online_processor;
8459 }
8460
8461 void
sched_mark_processor_offline(processor_t processor,bool is_final_system_sleep)8462 sched_mark_processor_offline(processor_t processor, bool is_final_system_sleep)
8463 {
8464 assert(processor == current_processor());
8465
8466 processor_set_t pset = processor->processor_set;
8467
8468 spl_t s = splsched();
8469 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8470
8471 assert(bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
8472 assert(processor->processor_offline_state == PROCESSOR_OFFLINE_BEGIN_SHUTDOWN);
8473
8474 if (!is_final_system_sleep) {
8475 /*
8476 * We can't shut down the last available core!
8477 * Force recommend another CPU if this is the last one.
8478 */
8479
8480 if ((pcs.pcs_effective.pcs_online_cores & pcs.pcs_recommended_cores) == BIT(processor->cpu_id)) {
8481 sched_update_recommended_cores_locked(REASON_SYSTEM, BIT(processor->cpu_id));
8482 }
8483
8484 /* If we're still the last one, something went wrong. */
8485 if ((pcs.pcs_effective.pcs_online_cores & pcs.pcs_recommended_cores) == BIT(processor->cpu_id)) {
8486 panic("shutting down the last available core! online: 0x%llx rec: 0x%llxx",
8487 pcs.pcs_effective.pcs_online_cores,
8488 pcs.pcs_recommended_cores);
8489 }
8490 }
8491
8492 pset_lock(pset);
8493 assert(processor->state == PROCESSOR_RUNNING);
8494 assert(processor->processor_inshutdown);
8495 pset_update_processor_state(pset, processor, PROCESSOR_PENDING_OFFLINE);
8496 --pset->online_processor_count;
8497
8498 sched_processor_change_mode_locked(processor, PCM_ONLINE, false);
8499
8500 if (is_final_system_sleep) {
8501 assert3u(pcs.pcs_effective.pcs_online_cores, ==, 0);
8502 assert(processor == master_processor);
8503 assert(sched_all_cpus_offline());
8504
8505 processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_FINAL_SYSTEM_SLEEP);
8506 } else {
8507 processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_PENDING_OFFLINE);
8508 }
8509
8510 simple_unlock(&sched_available_cores_lock);
8511
8512 SCHED(processor_queue_shutdown)(processor);
8513 /* pset lock dropped */
8514 SCHED(rt_queue_shutdown)(processor);
8515
8516 splx(s);
8517 }
8518
8519 /*
8520 * Apply a new recommended cores mask to the processors it affects
8521 * Runs after considering failsafes and such
8522 *
8523 * Iterate over processors and update their ->is_recommended field.
8524 * If a processor is running, we let it drain out at its next
8525 * quantum expiration or blocking point. If a processor is idle, there
8526 * may be more work for it to do, so IPI it.
8527 *
8528 * interrupts disabled, sched_available_cores_lock is held
8529 *
8530 * If a core is about to go offline, its bit will be set in core_going_offline,
8531 * so we can make sure not to pick it as the last resort cpu.
8532 */
8533 static void
sched_update_recommended_cores_locked(processor_reason_t reason,cpumap_t core_going_offline)8534 sched_update_recommended_cores_locked(processor_reason_t reason,
8535 cpumap_t core_going_offline)
8536 {
8537 simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
8538
8539 cpumap_t recommended_cores = pcs.pcs_requested_recommended_clpc;
8540
8541 if (pcs.pcs_init_completed) {
8542 recommended_cores &= pcs.pcs_effective.pcs_powerdown_recommended_cores;
8543 }
8544
8545 if (pcs.pcs_sleep_override_recommended || pcs.pcs_recommended_clpc_failsafe_active) {
8546 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8547 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
8548 recommended_cores,
8549 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
8550
8551 recommended_cores = pcs.pcs_managed_cores;
8552 }
8553
8554 if (bit_count(recommended_cores & pcs.pcs_effective.pcs_online_cores & ~core_going_offline) == 0) {
8555 /*
8556 * If there are no online cpus recommended,
8557 * then the system will make no forward progress.
8558 * Pick a CPU of last resort to avoid hanging.
8559 */
8560 int last_resort;
8561
8562 if (!support_bootcpu_shutdown) {
8563 /* We know the master_processor is always available */
8564 last_resort = master_processor->cpu_id;
8565 } else {
8566 /* Pick some still-online processor to be the processor of last resort */
8567 last_resort = lsb_first(pcs.pcs_effective.pcs_online_cores & ~core_going_offline);
8568
8569 if (last_resort == -1) {
8570 panic("%s> no last resort cpu found: 0x%llx 0x%llx",
8571 __func__, pcs.pcs_effective.pcs_online_cores, core_going_offline);
8572 }
8573 }
8574
8575 bit_set(recommended_cores, last_resort);
8576 }
8577
8578 if (pcs.pcs_recommended_cores == recommended_cores) {
8579 /* Nothing to do */
8580 return;
8581 }
8582
8583 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) |
8584 DBG_FUNC_START,
8585 recommended_cores,
8586 pcs.pcs_recommended_clpc_failsafe_active, pcs.pcs_sleep_override_recommended, 0);
8587
8588 cpumap_t needs_exit_idle_mask = 0x0;
8589
8590 /* First set recommended cores */
8591 foreach_node(node) {
8592 foreach_pset_id(pset_id, node) {
8593 processor_set_t pset = pset_array[pset_id];
8594
8595 cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
8596 cpumap_t newly_recommended = changed_recommendations & recommended_cores;
8597
8598 if (newly_recommended == 0) {
8599 /* Nothing to do */
8600 continue;
8601 }
8602
8603 pset_lock(pset);
8604
8605 cpumap_foreach(cpu_id, newly_recommended) {
8606 processor_t processor = processor_array[cpu_id];
8607
8608 sched_processor_change_mode_locked(processor, PCM_RECOMMENDED, true);
8609
8610 processor->last_recommend_reason = reason;
8611
8612 if (pset->recommended_bitmask == 0) {
8613 /* Cluster is becoming available for scheduling */
8614 atomic_bit_set(&pset->node->pset_recommended_map, pset->pset_id, memory_order_relaxed);
8615 }
8616 bit_set(pset->recommended_bitmask, processor->cpu_id);
8617
8618 if (processor->state == PROCESSOR_IDLE) {
8619 if (processor != current_processor()) {
8620 bit_set(needs_exit_idle_mask, processor->cpu_id);
8621 }
8622 }
8623
8624 if (processor->processor_online) {
8625 SCHED(pset_made_schedulable)(processor, pset, false); /* May relock the pset lock */
8626 }
8627 }
8628 pset_update_rt_stealable_state(pset);
8629
8630 pset_unlock(pset);
8631
8632 cpumap_foreach(cpu_id, newly_recommended) {
8633 smr_cpu_up(processor_array[cpu_id],
8634 SMR_CPU_REASON_IGNORED);
8635 }
8636 }
8637 }
8638
8639 /* Now shutdown not recommended cores */
8640 foreach_node(node) {
8641 foreach_pset_id(pset_id, node) {
8642 processor_set_t pset = pset_array[pset_id];
8643
8644 cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
8645 cpumap_t newly_unrecommended = changed_recommendations & ~recommended_cores;
8646
8647 if (newly_unrecommended == 0) {
8648 /* Nothing to do */
8649 continue;
8650 }
8651
8652 cpumap_foreach(cpu_id, newly_unrecommended) {
8653 processor_t processor = processor_array[cpu_id];
8654 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
8655
8656 pset_lock(pset);
8657
8658 sched_processor_change_mode_locked(processor, PCM_RECOMMENDED, false);
8659
8660 if (reason != REASON_NONE) {
8661 processor->last_derecommend_reason = reason;
8662 }
8663 bit_clear(pset->recommended_bitmask, processor->cpu_id);
8664 pset_update_rt_stealable_state(pset);
8665 if (pset->recommended_bitmask == 0) {
8666 /* Cluster is becoming unavailable for scheduling */
8667 atomic_bit_clear(&pset->node->pset_recommended_map, pset->pset_id, memory_order_relaxed);
8668 }
8669
8670 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
8671 ipi_type = SCHED_IPI_IMMEDIATE;
8672 }
8673 SCHED(processor_queue_shutdown)(processor);
8674 /* pset unlocked */
8675
8676 SCHED(rt_queue_shutdown)(processor);
8677
8678 if (ipi_type == SCHED_IPI_NONE) {
8679 /*
8680 * If the core is idle,
8681 * we can directly mark the processor
8682 * as "Ignored"
8683 *
8684 * Otherwise, smr will detect this
8685 * during smr_cpu_leave() when the
8686 * processor actually idles.
8687 */
8688 smr_cpu_down(processor, SMR_CPU_REASON_IGNORED);
8689 } else if (processor == current_processor()) {
8690 ast_on(AST_PREEMPT);
8691 } else {
8692 sched_ipi_perform(processor, ipi_type);
8693 }
8694 }
8695 }
8696 }
8697
8698 if (pcs.pcs_init_completed) {
8699 assert3u(pcs.pcs_recommended_cores, ==, recommended_cores);
8700 }
8701
8702 #if defined(__x86_64__)
8703 commpage_update_active_cpus();
8704 #endif
8705 /* Issue all pending IPIs now that the pset lock has been dropped */
8706 cpumap_foreach(cpu_id, needs_exit_idle_mask) {
8707 processor_t processor = processor_array[cpu_id];
8708 machine_signal_idle(processor);
8709 }
8710
8711 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
8712 needs_exit_idle_mask, 0, 0, 0);
8713 }
8714
8715 /*
8716 * Enters with the available cores lock held, returns with it held, but will drop it in the meantime.
8717 * Enters with the cluster_powerdown_lock held, returns with it held, keeps it held.
8718 */
8719 static void
sched_update_powered_cores_drops_lock(processor_reason_t requested_reason,spl_t caller_s)8720 sched_update_powered_cores_drops_lock(processor_reason_t requested_reason, spl_t caller_s)
8721 {
8722 lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8723 simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
8724
8725 assert(ml_get_interrupts_enabled() == false);
8726 assert(caller_s == true); /* Caller must have had interrupts enabled when they took the lock */
8727
8728 /* All transitions should be quiesced before we start changing things */
8729 assert_no_processors_in_transition_locked();
8730
8731 pcs.pcs_in_flight_reason = requested_reason;
8732
8733 struct powered_cores_state requested = sched_compute_requested_powered_cores();
8734 struct powered_cores_state effective = pcs.pcs_effective;
8735
8736 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_START,
8737 requested.pcs_online_cores, requested_reason, 0, effective.pcs_online_cores);
8738
8739 /* The bits that are different and in the new value */
8740 cpumap_t newly_online_cores = (requested.pcs_online_cores ^
8741 effective.pcs_online_cores) & requested.pcs_online_cores;
8742
8743 /* The bits that are different and are not in the new value */
8744 cpumap_t newly_offline_cores = (requested.pcs_online_cores ^
8745 effective.pcs_online_cores) & ~requested.pcs_online_cores;
8746
8747 cpumap_t newly_recommended_cores = (requested.pcs_powerdown_recommended_cores ^
8748 effective.pcs_powerdown_recommended_cores) & requested.pcs_powerdown_recommended_cores;
8749
8750 cpumap_t newly_derecommended_cores = (requested.pcs_powerdown_recommended_cores ^
8751 effective.pcs_powerdown_recommended_cores) & ~requested.pcs_powerdown_recommended_cores;
8752
8753 cpumap_t newly_temporary_cores = (requested.pcs_tempdown_cores ^
8754 effective.pcs_tempdown_cores) & requested.pcs_tempdown_cores;
8755
8756 cpumap_t newly_nontemporary_cores = (requested.pcs_tempdown_cores ^
8757 effective.pcs_tempdown_cores) & ~requested.pcs_tempdown_cores;
8758
8759 /*
8760 * Newly online and derecommended cores should be derecommended
8761 * before powering them up, so they never run around doing stuff
8762 * before we reach the end of this function.
8763 */
8764
8765 cpumap_t newly_online_and_derecommended = newly_online_cores & newly_derecommended_cores;
8766
8767 /*
8768 * Publish the goal state we're working on achieving.
8769 * At the end of this function, pcs_effective will match this.
8770 */
8771 pcs.pcs_requested = requested;
8772
8773 pcs.pcs_effective.pcs_powerdown_recommended_cores |= newly_recommended_cores;
8774 pcs.pcs_effective.pcs_powerdown_recommended_cores &= ~newly_online_and_derecommended;
8775
8776 sched_update_recommended_cores_locked(requested_reason, 0);
8777
8778 simple_unlock(&sched_available_cores_lock);
8779 splx(caller_s);
8780
8781 assert(ml_get_interrupts_enabled() == true);
8782
8783 /* First set powered cores */
8784 cpumap_t started_cores = 0ull;
8785 foreach_node(node) {
8786 foreach_pset_id(pset_id, node) {
8787 processor_set_t pset = pset_array[pset_id];
8788
8789 spl_t s = splsched();
8790 pset_lock(pset);
8791 cpumap_t pset_newly_online = newly_online_cores & pset->cpu_bitmask;
8792
8793 __assert_only cpumap_t pset_online_cores =
8794 pset->cpu_state_map[PROCESSOR_START] |
8795 pset->cpu_state_map[PROCESSOR_IDLE] |
8796 pset->cpu_state_map[PROCESSOR_DISPATCHING] |
8797 pset->cpu_state_map[PROCESSOR_RUNNING];
8798 assert((pset_online_cores & pset_newly_online) == 0);
8799
8800 pset_unlock(pset);
8801 splx(s);
8802
8803 if (pset_newly_online == 0) {
8804 /* Nothing to do */
8805 continue;
8806 }
8807 cpumap_foreach(cpu_id, pset_newly_online) {
8808 processor_start_reason(processor_array[cpu_id], requested_reason);
8809 bit_set(started_cores, cpu_id);
8810 }
8811 }
8812 }
8813
8814 /*
8815 * Wait for processors to finish starting in parallel.
8816 * We never proceed until all newly started processors have finished.
8817 *
8818 * This has the side effect of closing the ml_cpu_up_processors race,
8819 * as all started CPUs must have SIGPdisabled cleared by the time this
8820 * is satisfied. (rdar://124631843)
8821 */
8822 cpumap_foreach(cpu_id, started_cores) {
8823 processor_wait_for_start(processor_array[cpu_id], PROCESSOR_POWERED_CORES_CHANGE);
8824 }
8825
8826 /*
8827 * Update published counts of processors to match new temporary status
8828 * Publish all temporary before nontemporary, so that any readers that
8829 * see a middle state will see a slightly too high count instead of
8830 * ending up seeing a 0 (because that crashes dispatch_apply, ask
8831 * me how I know)
8832 */
8833
8834 spl_t s;
8835 s = splsched();
8836 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8837
8838 foreach_node(node) {
8839 foreach_pset_id(pset_id, node) {
8840 processor_set_t pset = pset_array[pset_id];
8841
8842 pset_lock(pset);
8843
8844 cpumap_t pset_newly_temporary = newly_temporary_cores & pset->cpu_bitmask;
8845
8846 cpumap_foreach(cpu_id, pset_newly_temporary) {
8847 sched_processor_change_mode_locked(processor_array[cpu_id],
8848 PCM_TEMPORARY, true);
8849 }
8850
8851 pset_unlock(pset);
8852 }
8853 }
8854
8855 foreach_node(node) {
8856 foreach_pset_id(pset_id, node) {
8857 processor_set_t pset = pset_array[pset_id];
8858
8859 pset_lock(pset);
8860
8861 cpumap_t pset_newly_nontemporary = newly_nontemporary_cores & pset->cpu_bitmask;
8862
8863 cpumap_foreach(cpu_id, pset_newly_nontemporary) {
8864 sched_processor_change_mode_locked(processor_array[cpu_id],
8865 PCM_TEMPORARY, false);
8866 }
8867
8868 pset_unlock(pset);
8869 }
8870 }
8871
8872 simple_unlock(&sched_available_cores_lock);
8873 splx(s);
8874
8875 /* Now shutdown not powered cores */
8876 foreach_node(node) {
8877 foreach_pset_id(pset_id, node) {
8878 processor_set_t pset = pset_array[pset_id];
8879
8880 s = splsched();
8881 pset_lock(pset);
8882
8883 cpumap_t pset_newly_offline = newly_offline_cores & pset->cpu_bitmask;
8884 __assert_only cpumap_t pset_powered_cores =
8885 pset->cpu_state_map[PROCESSOR_START] |
8886 pset->cpu_state_map[PROCESSOR_IDLE] |
8887 pset->cpu_state_map[PROCESSOR_DISPATCHING] |
8888 pset->cpu_state_map[PROCESSOR_RUNNING];
8889 assert((pset_powered_cores & pset_newly_offline) == pset_newly_offline);
8890
8891 pset_unlock(pset);
8892 splx(s);
8893
8894 if (pset_newly_offline == 0) {
8895 /* Nothing to do */
8896 continue;
8897 }
8898
8899 cpumap_foreach(cpu_id, pset_newly_offline) {
8900 processor_exit_reason(processor_array[cpu_id], requested_reason, false);
8901 }
8902 }
8903 }
8904
8905 assert(ml_get_interrupts_enabled() == true);
8906
8907 s = splsched();
8908 simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8909
8910 assert(s == caller_s);
8911
8912 pcs.pcs_effective.pcs_powerdown_recommended_cores &= ~newly_derecommended_cores;
8913
8914 sched_update_recommended_cores_locked(requested_reason, 0);
8915
8916 pcs.pcs_previous_reason = requested_reason;
8917
8918 /* All transitions should be quiesced now that we are done changing things */
8919 assert_no_processors_in_transition_locked();
8920
8921 assert3u(pcs.pcs_requested.pcs_online_cores, ==, pcs.pcs_effective.pcs_online_cores);
8922 assert3u(pcs.pcs_requested.pcs_tempdown_cores, ==, pcs.pcs_effective.pcs_tempdown_cores);
8923 assert3u(pcs.pcs_requested.pcs_powerdown_recommended_cores, ==, pcs.pcs_effective.pcs_powerdown_recommended_cores);
8924
8925 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_END, 0, 0, 0, 0);
8926 }
8927
8928 void
thread_set_options(uint32_t thopt)8929 thread_set_options(uint32_t thopt)
8930 {
8931 spl_t x;
8932 thread_t t = current_thread();
8933
8934 x = splsched();
8935 thread_lock(t);
8936
8937 t->options |= thopt;
8938
8939 thread_unlock(t);
8940 splx(x);
8941 }
8942
8943 void
thread_set_pending_block_hint(thread_t thread,block_hint_t block_hint)8944 thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
8945 {
8946 thread->pending_block_hint = block_hint;
8947 }
8948
8949 uint32_t
qos_max_parallelism(int qos,uint64_t options)8950 qos_max_parallelism(int qos, uint64_t options)
8951 {
8952 return SCHED(qos_max_parallelism)(qos, options);
8953 }
8954
8955 uint32_t
sched_qos_max_parallelism(__unused int qos,uint64_t options)8956 sched_qos_max_parallelism(__unused int qos, uint64_t options)
8957 {
8958 host_basic_info_data_t hinfo;
8959 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
8960
8961
8962 /*
8963 * The QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE should be used on AMP platforms only which
8964 * implement their own qos_max_parallelism() interfaces.
8965 */
8966 assert((options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) == 0);
8967
8968 /* Query the machine layer for core information */
8969 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
8970 (host_info_t)&hinfo, &count);
8971 assert(kret == KERN_SUCCESS);
8972
8973 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
8974 return hinfo.logical_cpu;
8975 } else {
8976 return hinfo.physical_cpu;
8977 }
8978 }
8979
8980 int sched_allow_NO_SMT_threads = 1;
8981 #if CONFIG_SCHED_SMT
8982 bool
thread_no_smt(thread_t thread)8983 thread_no_smt(thread_t thread)
8984 {
8985 return sched_allow_NO_SMT_threads &&
8986 (thread->bound_processor == PROCESSOR_NULL) &&
8987 ((thread->sched_flags & TH_SFLAG_NO_SMT) || (get_threadtask(thread)->t_flags & TF_NO_SMT));
8988 }
8989
8990 bool
processor_active_thread_no_smt(processor_t processor)8991 processor_active_thread_no_smt(processor_t processor)
8992 {
8993 return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
8994 }
8995 #endif /* CONFIG_SCHED_SMT */
8996
8997 #if __arm64__
8998
8999 /*
9000 * Set up or replace old timer with new timer
9001 *
9002 * Returns true if canceled old timer, false if it did not
9003 */
9004 boolean_t
sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)9005 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
9006 {
9007 /*
9008 * Exchange deadline for new deadline, if old deadline was nonzero,
9009 * then I cancelled the callback, otherwise I didn't
9010 */
9011
9012 return os_atomic_xchg(&sched_perfcontrol_callback_deadline, new_deadline,
9013 relaxed) != 0;
9014 }
9015
9016 /*
9017 * Set global SFI window (in usec)
9018 */
9019 kern_return_t
sched_perfcontrol_sfi_set_window(uint64_t window_usecs)9020 sched_perfcontrol_sfi_set_window(uint64_t window_usecs)
9021 {
9022 kern_return_t ret = KERN_NOT_SUPPORTED;
9023 #if CONFIG_THREAD_GROUPS
9024 if (window_usecs == 0ULL) {
9025 ret = sfi_window_cancel();
9026 } else {
9027 ret = sfi_set_window(window_usecs);
9028 }
9029 #endif // CONFIG_THREAD_GROUPS
9030 return ret;
9031 }
9032
9033 /*
9034 * Set background / maintenance / mitigation SFI class offtimes
9035 */
9036 kern_return_t
sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)9037 sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)
9038 {
9039 kern_return_t ret = KERN_NOT_SUPPORTED;
9040 #if CONFIG_THREAD_GROUPS
9041 if (offtime_usecs == 0ULL) {
9042 ret = sfi_class_offtime_cancel(SFI_CLASS_MAINTENANCE);
9043 ret |= sfi_class_offtime_cancel(SFI_CLASS_DARWIN_BG);
9044 ret |= sfi_class_offtime_cancel(SFI_CLASS_RUNAWAY_MITIGATION);
9045 } else {
9046 ret = sfi_set_class_offtime(SFI_CLASS_MAINTENANCE, offtime_usecs);
9047 ret |= sfi_set_class_offtime(SFI_CLASS_DARWIN_BG, offtime_usecs);
9048 ret |= sfi_set_class_offtime(SFI_CLASS_RUNAWAY_MITIGATION, offtime_usecs);
9049 }
9050 #endif // CONFIG_THREAD_GROUPS
9051 return ret;
9052 }
9053
9054 /*
9055 * Set utility SFI class offtime
9056 */
9057 kern_return_t
sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)9058 sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)
9059 {
9060 kern_return_t ret = KERN_NOT_SUPPORTED;
9061 #if CONFIG_THREAD_GROUPS
9062 if (offtime_usecs == 0ULL) {
9063 ret = sfi_class_offtime_cancel(SFI_CLASS_UTILITY);
9064 } else {
9065 ret = sfi_set_class_offtime(SFI_CLASS_UTILITY, offtime_usecs);
9066 }
9067 #endif // CONFIG_THREAD_GROUPS
9068 return ret;
9069 }
9070
9071 #endif /* __arm64__ */
9072
9073 #if CONFIG_SCHED_EDGE
9074
9075 #define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u
9076
9077 /*
9078 * sched_edge_pset_running_higher_bucket()
9079 *
9080 * Routine to calculate cumulative running counts for each scheduling
9081 * bucket. This effectively lets the load calculation calculate if a
9082 * cluster is running any threads at a QoS lower than the thread being
9083 * migrated etc.
9084 */
9085 static void
sched_edge_pset_running_higher_bucket(processor_set_t pset,uint32_t * running_higher)9086 sched_edge_pset_running_higher_bucket(processor_set_t pset, uint32_t *running_higher)
9087 {
9088 bitmap_t *active_map = &pset->cpu_state_map[PROCESSOR_RUNNING];
9089 bzero(running_higher, sizeof(uint32_t) * TH_BUCKET_SCHED_MAX);
9090
9091 /* Count the running threads per bucket */
9092 for (int cpu = bitmap_first(active_map, MAX_CPUS); cpu >= 0; cpu = bitmap_next(active_map, cpu)) {
9093 sched_bucket_t cpu_bucket = os_atomic_load(&pset->cpu_running_buckets[cpu], relaxed);
9094 /* Don't count idle threads */
9095 if (cpu_bucket < TH_BUCKET_SCHED_MAX) {
9096 running_higher[cpu_bucket]++;
9097 }
9098 }
9099
9100 /* Calculate the cumulative running counts as a prefix sum */
9101 for (sched_bucket_t bucket = TH_BUCKET_FIXPRI; bucket < TH_BUCKET_SCHED_MAX - 1; bucket++) {
9102 running_higher[bucket + 1] += running_higher[bucket];
9103 }
9104 }
9105
9106 /*
9107 * sched_update_pset_load_average()
9108 *
9109 * Updates the load average for each sched bucket for a cluster.
9110 * This routine must be called with the pset lock held.
9111 */
9112 void
sched_update_pset_load_average(processor_set_t pset,uint64_t curtime)9113 sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
9114 {
9115 int avail_cpu_count = pset_available_cpu_count(pset);
9116 if (avail_cpu_count == 0) {
9117 /* Looks like the pset is not runnable any more; nothing to do here */
9118 return;
9119 }
9120
9121 /*
9122 * Edge Scheduler Optimization
9123 *
9124 * See if more callers of this routine can pass in timestamps to avoid the
9125 * mach_absolute_time() call here.
9126 */
9127
9128 if (!curtime) {
9129 curtime = mach_absolute_time();
9130 }
9131 uint64_t last_update = os_atomic_load(&pset->pset_load_last_update, relaxed);
9132 int64_t delta_ticks = curtime - last_update;
9133 if (delta_ticks < 0) {
9134 return;
9135 }
9136
9137 uint64_t delta_nsecs = 0;
9138 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
9139
9140 if (__improbable(delta_nsecs > UINT32_MAX)) {
9141 delta_nsecs = UINT32_MAX;
9142 }
9143
9144 /* Update the shared resource load on the pset */
9145 for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
9146 uint64_t shared_rsrc_runnable_load = sched_edge_shared_rsrc_runnable_load(&pset->pset_clutch_root, shared_rsrc_type);
9147 uint64_t shared_rsrc_running_load = bit_count(pset->cpu_running_cluster_shared_rsrc_thread[shared_rsrc_type]);
9148 uint64_t new_shared_load = shared_rsrc_runnable_load + shared_rsrc_running_load;
9149 uint64_t old_shared_load = os_atomic_xchg(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], new_shared_load, relaxed);
9150 if (old_shared_load != new_shared_load) {
9151 KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_CLUSTER_SHARED_LOAD) | DBG_FUNC_NONE, pset->pset_cluster_id, shared_rsrc_type, new_shared_load, shared_rsrc_running_load);
9152 }
9153 }
9154
9155 uint32_t running_higher[TH_BUCKET_SCHED_MAX];
9156 sched_edge_pset_running_higher_bucket(pset, running_higher);
9157
9158 for (sched_bucket_t sched_bucket = TH_BUCKET_FIXPRI; sched_bucket < TH_BUCKET_SCHED_MAX; sched_bucket++) {
9159 uint64_t old_load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
9160 uint64_t old_load_average_factor = old_load_average * SCHED_PSET_LOAD_EWMA_TC_NSECS;
9161 uint32_t current_runq_depth = sched_edge_cluster_cumulative_count(&pset->pset_clutch_root, sched_bucket) + rt_runq_count(pset) + running_higher[sched_bucket];
9162 os_atomic_store(&pset->pset_runnable_depth[sched_bucket], current_runq_depth, relaxed);
9163
9164 uint32_t current_load = current_runq_depth / avail_cpu_count;
9165 /*
9166 * For the new load average multiply current_load by delta_nsecs (which results in a 32.0 value).
9167 * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the
9168 * new load average needs to be shifted before it can be added to the old load average.
9169 */
9170 uint64_t new_load_average_factor = (current_load * delta_nsecs) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS;
9171
9172 /*
9173 * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero
9174 * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA
9175 * when the system is already loaded; otherwise for an idle system use the latest load average immediately.
9176 */
9177 int old_load_shifted = (int)((old_load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
9178 boolean_t load_uptick = (old_load_shifted == 0) && (current_load != 0);
9179 boolean_t load_downtick = (old_load_shifted != 0) && (current_load == 0);
9180 uint64_t load_average;
9181 if (load_uptick || load_downtick) {
9182 load_average = (current_load << SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
9183 } else {
9184 /* Indicates a loaded system; use EWMA for load average calculation */
9185 load_average = (old_load_average_factor + new_load_average_factor) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
9186 }
9187 os_atomic_store(&pset->pset_load_average[sched_bucket], load_average, relaxed);
9188 if (load_average != old_load_average) {
9189 KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_LOAD_AVG) | DBG_FUNC_NONE, pset->pset_cluster_id, (load_average >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS), load_average & SCHED_PSET_LOAD_EWMA_FRACTION_MASK, sched_bucket);
9190 }
9191 }
9192 os_atomic_store(&pset->pset_load_last_update, curtime, relaxed);
9193 }
9194
9195 void
sched_update_pset_avg_execution_time(processor_set_t pset,uint64_t execution_time,uint64_t curtime,sched_bucket_t sched_bucket)9196 sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t execution_time, uint64_t curtime, sched_bucket_t sched_bucket)
9197 {
9198 pset_execution_time_t old_execution_time_packed, new_execution_time_packed;
9199 uint64_t avg_thread_execution_time = 0;
9200
9201 os_atomic_rmw_loop(&pset->pset_execution_time[sched_bucket].pset_execution_time_packed,
9202 old_execution_time_packed.pset_execution_time_packed,
9203 new_execution_time_packed.pset_execution_time_packed, relaxed, {
9204 uint64_t last_update = old_execution_time_packed.pset_execution_time_last_update;
9205 int64_t delta_ticks = curtime - last_update;
9206 if (delta_ticks <= 0) {
9207 /*
9208 * Its possible that another CPU came in and updated the pset_execution_time
9209 * before this CPU could do it. Since the average execution time is meant to
9210 * be an approximate measure per cluster, ignore the older update.
9211 */
9212 os_atomic_rmw_loop_give_up(return );
9213 }
9214 uint64_t delta_nsecs = 0;
9215 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
9216
9217 uint64_t nanotime = 0;
9218 absolutetime_to_nanoseconds(execution_time, &nanotime);
9219 uint64_t execution_time_us = nanotime / NSEC_PER_USEC;
9220
9221 /*
9222 * Since the average execution time is stored in microseconds, avoid rounding errors in
9223 * the EWMA calculation by only using a non-zero previous value.
9224 */
9225 uint64_t old_avg_thread_execution_time = MAX(old_execution_time_packed.pset_avg_thread_execution_time, 1ULL);
9226
9227 uint64_t old_execution_time = (old_avg_thread_execution_time * SCHED_PSET_LOAD_EWMA_TC_NSECS);
9228 uint64_t new_execution_time = (execution_time_us * delta_nsecs);
9229
9230 avg_thread_execution_time = (old_execution_time + new_execution_time) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
9231 new_execution_time_packed.pset_avg_thread_execution_time = avg_thread_execution_time;
9232 new_execution_time_packed.pset_execution_time_last_update = curtime;
9233 });
9234 if (new_execution_time_packed.pset_avg_thread_execution_time != old_execution_time_packed.pset_execution_time_packed) {
9235 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_AVG_EXEC_TIME) | DBG_FUNC_NONE, pset->pset_cluster_id, avg_thread_execution_time, sched_bucket);
9236 }
9237 }
9238
9239 uint64_t
sched_pset_cluster_shared_rsrc_load(processor_set_t pset,cluster_shared_rsrc_type_t shared_rsrc_type)9240 sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type)
9241 {
9242 /* Prevent migrations to derecommended clusters */
9243 if (!pset_is_recommended(pset)) {
9244 return UINT64_MAX;
9245 }
9246 return os_atomic_load(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], relaxed);
9247 }
9248
9249 #else /* CONFIG_SCHED_EDGE */
9250
9251 void
sched_update_pset_load_average(processor_set_t pset,__unused uint64_t curtime)9252 sched_update_pset_load_average(processor_set_t pset, __unused uint64_t curtime)
9253 {
9254 int non_rt_load = pset->pset_runq.count;
9255 int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
9256 int new_load_average = ((int)pset->load_average + load) >> 1;
9257
9258 pset->load_average = new_load_average;
9259 #if (DEVELOPMENT || DEBUG)
9260 #if __AMP__
9261 if (pset->pset_cluster_type == PSET_AMP_P) {
9262 KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset, 0), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)));
9263 }
9264 #endif
9265 #endif
9266 }
9267
9268 void
sched_update_pset_avg_execution_time(__unused processor_set_t pset,__unused uint64_t execution_time,__unused uint64_t curtime,__unused sched_bucket_t sched_bucket)9269 sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket)
9270 {
9271 }
9272
9273 #endif /* CONFIG_SCHED_EDGE */
9274
9275 /* pset is locked */
9276 bool
processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset,processor_t processor)9277 processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor)
9278 {
9279 int cpuid = processor->cpu_id;
9280 #if defined(__x86_64__)
9281 if (sched_avoid_cpu0 && (cpuid == 0)) {
9282 return false;
9283 }
9284 #endif
9285
9286 cpumap_t fasttrack_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
9287
9288 return bit_test(fasttrack_map, cpuid);
9289 }
9290
9291 #if CONFIG_SCHED_SMT
9292 /* pset is locked */
9293 static bool
all_available_primaries_are_running_realtime_threads(processor_set_t pset,bool include_backups)9294 all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups)
9295 {
9296 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
9297 int nbackup_cpus = 0;
9298
9299 if (include_backups && rt_runq_is_low_latency(pset)) {
9300 nbackup_cpus = sched_rt_n_backup_processors;
9301 }
9302
9303 cpumap_t cpu_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
9304 if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
9305 bit_clear(cpu_map, 0);
9306 }
9307 return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
9308 }
9309
9310 /* pset is locked */
9311 static bool
these_processors_are_running_realtime_threads(processor_set_t pset,uint64_t these_map,bool include_backups)9312 these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups)
9313 {
9314 int nbackup_cpus = 0;
9315
9316 if (include_backups && rt_runq_is_low_latency(pset)) {
9317 nbackup_cpus = sched_rt_n_backup_processors;
9318 }
9319
9320 cpumap_t cpu_map = pset_available_cpumap(pset) & these_map & ~pset->realtime_map;
9321 return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
9322 }
9323 #endif /* CONFIG_SCHED_SMT */
9324
9325 static bool
sched_ok_to_run_realtime_thread(processor_set_t pset,processor_t processor,bool as_backup)9326 sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup)
9327 {
9328 if (!processor->is_recommended) {
9329 return false;
9330 }
9331 bool ok_to_run_realtime_thread = true;
9332 #if CONFIG_SCHED_SMT
9333 bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
9334 if (spill_pending) {
9335 return true;
9336 }
9337 if (processor->cpu_id == 0) {
9338 if (sched_avoid_cpu0 == 1) {
9339 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1, as_backup);
9340 } else if (sched_avoid_cpu0 == 2) {
9341 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, ~0x3, as_backup);
9342 }
9343 } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
9344 ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2, as_backup);
9345 } else if (processor->processor_primary != processor) {
9346 ok_to_run_realtime_thread = (sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset, as_backup));
9347 }
9348 #else /* CONFIG_SCHED_SMT */
9349 (void)pset;
9350 (void)processor;
9351 (void)as_backup;
9352 #endif /* CONFIG_SCHED_SMT */
9353 return ok_to_run_realtime_thread;
9354 }
9355
9356 void
sched_pset_made_schedulable(__unused processor_t processor,processor_set_t pset,boolean_t drop_lock)9357 sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset, boolean_t drop_lock)
9358 {
9359 if (drop_lock) {
9360 pset_unlock(pset);
9361 }
9362 }
9363
9364 #if defined(__x86_64__)
9365 void
thread_set_no_smt(bool set)9366 thread_set_no_smt(bool set)
9367 {
9368 (void) set;
9369 #if CONFIG_SCHED_SMT
9370 if (!system_is_SMT) {
9371 /* Not a machine that supports SMT */
9372 return;
9373 }
9374
9375 thread_t thread = current_thread();
9376
9377 spl_t s = splsched();
9378 thread_lock(thread);
9379 if (set) {
9380 thread->sched_flags |= TH_SFLAG_NO_SMT;
9381 }
9382 thread_unlock(thread);
9383 splx(s);
9384 #endif /* CONFIG_SCHED_SMT */
9385 }
9386 #endif /* __x86_64__ */
9387
9388
9389 #if CONFIG_SCHED_SMT
9390 bool
thread_get_no_smt(void)9391 thread_get_no_smt(void)
9392 {
9393 return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
9394 }
9395
9396 extern void task_set_no_smt(task_t);
9397 void
task_set_no_smt(task_t task)9398 task_set_no_smt(task_t task)
9399 {
9400 if (!system_is_SMT) {
9401 /* Not a machine that supports SMT */
9402 return;
9403 }
9404
9405 if (task == TASK_NULL) {
9406 task = current_task();
9407 }
9408
9409 task_lock(task);
9410 task->t_flags |= TF_NO_SMT;
9411 task_unlock(task);
9412 }
9413
9414 #if DEBUG || DEVELOPMENT
9415 extern void sysctl_task_set_no_smt(char no_smt);
9416 void
sysctl_task_set_no_smt(char no_smt)9417 sysctl_task_set_no_smt(char no_smt)
9418 {
9419 if (!system_is_SMT) {
9420 /* Not a machine that supports SMT */
9421 return;
9422 }
9423
9424 task_t task = current_task();
9425
9426 task_lock(task);
9427 if (no_smt == '1') {
9428 task->t_flags |= TF_NO_SMT;
9429 }
9430 task_unlock(task);
9431 }
9432
9433 extern char sysctl_task_get_no_smt(void);
9434 char
sysctl_task_get_no_smt(void)9435 sysctl_task_get_no_smt(void)
9436 {
9437 task_t task = current_task();
9438
9439 if (task->t_flags & TF_NO_SMT) {
9440 return '1';
9441 }
9442 return '0';
9443 }
9444 #endif /* DEVELOPMENT || DEBUG */
9445 #else /* CONFIG_SCHED_SMT */
9446
9447 extern void task_set_no_smt(task_t);
9448 void
task_set_no_smt(__unused task_t task)9449 task_set_no_smt(__unused task_t task)
9450 {
9451 return;
9452 }
9453
9454 #if DEBUG || DEVELOPMENT
9455 extern void sysctl_task_set_no_smt(char no_smt);
9456 void
sysctl_task_set_no_smt(__unused char no_smt)9457 sysctl_task_set_no_smt(__unused char no_smt)
9458 {
9459 return;
9460 }
9461
9462 extern char sysctl_task_get_no_smt(void);
9463 char
sysctl_task_get_no_smt(void)9464 sysctl_task_get_no_smt(void)
9465 {
9466 return '1';
9467 }
9468 #endif /* DEBUG || DEVELOPMENT */
9469 #endif /* CONFIG_SCHED_SMT */
9470
9471 #if __AMP__
9472 static kern_return_t
pset_cluster_type_from_name_char(char cluster_type_name,pset_cluster_type_t * pset_cluster_type)9473 pset_cluster_type_from_name_char(char cluster_type_name, pset_cluster_type_t *pset_cluster_type)
9474 {
9475 switch (cluster_type_name) {
9476 case 'E':
9477 case 'e':
9478 *pset_cluster_type = PSET_AMP_E;
9479 return KERN_SUCCESS;
9480 case 'P':
9481 case 'p':
9482 *pset_cluster_type = PSET_AMP_P;
9483 return KERN_SUCCESS;
9484 default:
9485 return KERN_INVALID_ARGUMENT;
9486 }
9487 }
9488 #endif /* __AMP__ */
9489
9490 __private_extern__ kern_return_t
thread_soft_bind_cluster_type(thread_t thread,char cluster_type)9491 thread_soft_bind_cluster_type(thread_t thread, char cluster_type)
9492 {
9493 #if __AMP__
9494 kern_return_t kr;
9495 spl_t s = splsched();
9496 thread_lock(thread);
9497 thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
9498 pset_cluster_type_t pset_cluster_type;
9499 kr = pset_cluster_type_from_name_char(cluster_type, &pset_cluster_type);
9500 if (kr == KERN_SUCCESS) {
9501 pset_node_t bind_node = pset_node_for_pset_cluster_type(pset_cluster_type);
9502 if (bind_node != PSET_NODE_NULL) {
9503 thread->th_bound_cluster_id = bind_node->psets->pset_id;
9504 } else {
9505 /*
9506 * The specified cluster type isn't present on the system,
9507 * either because we're too early in boot or because the
9508 * underlying platform lacks that cluster type. This error
9509 * code assumes the latter.
9510 */
9511 kr = KERN_INVALID_ARGUMENT;
9512 }
9513 }
9514 thread_unlock(thread);
9515 splx(s);
9516
9517 if ((kr == KERN_SUCCESS) && (thread == current_thread())) {
9518 /* Trigger a context-switch to get on the newly bound cluster */
9519 thread_block(THREAD_CONTINUE_NULL);
9520 }
9521 return kr;
9522 #else /* __AMP__ */
9523 (void)thread;
9524 (void)cluster_type;
9525 return KERN_SUCCESS;
9526 #endif /* __AMP__ */
9527 }
9528
9529 extern uint32_t thread_bound_cluster_id(thread_t thread);
9530 uint32_t
thread_bound_cluster_id(thread_t thread)9531 thread_bound_cluster_id(thread_t thread)
9532 {
9533 return thread->th_bound_cluster_id;
9534 }
9535
9536 __private_extern__ kern_return_t
thread_soft_bind_cluster_id(thread_t thread,uint32_t cluster_id,thread_bind_option_t options)9537 thread_soft_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options)
9538 {
9539 #if __AMP__
9540 if (cluster_id == THREAD_BOUND_CLUSTER_NONE) {
9541 /* Treat binding to THREAD_BOUND_CLUSTER_NONE as a request to unbind. */
9542 options |= THREAD_UNBIND;
9543 }
9544
9545 if (options & THREAD_UNBIND) {
9546 cluster_id = THREAD_BOUND_CLUSTER_NONE;
9547 } else {
9548 /* Validate the specified cluster id */
9549 int max_clusters = ml_get_cluster_count();
9550 if (cluster_id >= max_clusters) {
9551 /* Invalid cluster id */
9552 return KERN_INVALID_VALUE;
9553 }
9554 processor_set_t pset = pset_array[cluster_id];
9555 if (pset == NULL) {
9556 /* Cluster has not finished initializing at boot */
9557 return KERN_FAILURE;
9558 }
9559 if (options & THREAD_BIND_ELIGIBLE_ONLY) {
9560 if (SCHED(thread_eligible_for_pset)(thread, pset) == false) {
9561 /* Thread is not recommended for the cluster type */
9562 return KERN_INVALID_POLICY;
9563 }
9564 }
9565 }
9566
9567 spl_t s = splsched();
9568 thread_lock(thread);
9569
9570 thread->th_bound_cluster_id = cluster_id;
9571
9572 thread_unlock(thread);
9573 splx(s);
9574
9575 if (thread == current_thread()) {
9576 /* Trigger a context-switch to get on the newly bound cluster */
9577 thread_block(THREAD_CONTINUE_NULL);
9578 }
9579 #else /* __AMP__ */
9580 (void)thread;
9581 (void)cluster_id;
9582 (void)options;
9583 #endif /* __AMP__ */
9584 return KERN_SUCCESS;
9585 }
9586
9587 #if DEVELOPMENT || DEBUG
9588 extern int32_t sysctl_get_bound_cpuid(void);
9589 int32_t
sysctl_get_bound_cpuid(void)9590 sysctl_get_bound_cpuid(void)
9591 {
9592 int32_t cpuid = -1;
9593 thread_t self = current_thread();
9594
9595 processor_t processor = self->bound_processor;
9596 if (processor == NULL) {
9597 cpuid = -1;
9598 } else {
9599 cpuid = processor->cpu_id;
9600 }
9601
9602 return cpuid;
9603 }
9604
9605 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
9606 kern_return_t
sysctl_thread_bind_cpuid(int32_t cpuid)9607 sysctl_thread_bind_cpuid(int32_t cpuid)
9608 {
9609 processor_t processor = PROCESSOR_NULL;
9610
9611 if (cpuid == -1) {
9612 goto unbind;
9613 }
9614
9615 if (cpuid < 0 || cpuid >= MAX_SCHED_CPUS) {
9616 return KERN_INVALID_VALUE;
9617 }
9618
9619 processor = processor_array[cpuid];
9620 if (processor == PROCESSOR_NULL) {
9621 return KERN_INVALID_VALUE;
9622 }
9623
9624 unbind:
9625 thread_bind(processor);
9626
9627 thread_block(THREAD_CONTINUE_NULL);
9628 return KERN_SUCCESS;
9629 }
9630
9631 #if __AMP__
9632
9633 static char
pset_cluster_type_to_name_char(pset_cluster_type_t pset_type)9634 pset_cluster_type_to_name_char(pset_cluster_type_t pset_type)
9635 {
9636 switch (pset_type) {
9637 case PSET_AMP_E:
9638 return 'E';
9639 case PSET_AMP_P:
9640 return 'P';
9641 default:
9642 panic("Unexpected AMP pset cluster type %d", pset_type);
9643 }
9644 }
9645
9646 #endif /* __AMP__ */
9647
9648 extern char sysctl_get_task_cluster_type(void);
9649 char
sysctl_get_task_cluster_type(void)9650 sysctl_get_task_cluster_type(void)
9651 {
9652 #if __AMP__
9653 task_t task = current_task();
9654 processor_set_t pset_hint = task->pset_hint;
9655
9656 if (!pset_hint) {
9657 return '0';
9658 }
9659 return pset_cluster_type_to_name_char(pset_hint->pset_cluster_type);
9660 #else /* !__AMP__ */
9661 return '0';
9662 #endif /* __AMP__ */
9663 }
9664
9665 #if __AMP__
9666 extern char sysctl_get_bound_cluster_type(void);
9667 char
sysctl_get_bound_cluster_type(void)9668 sysctl_get_bound_cluster_type(void)
9669 {
9670 thread_t self = current_thread();
9671
9672 if (self->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) {
9673 return '0';
9674 }
9675 pset_cluster_type_t pset_type = pset_array[self->th_bound_cluster_id]->pset_cluster_type;
9676 return pset_cluster_type_to_name_char(pset_type);
9677 }
9678
9679 static processor_set_t
find_pset_of_type(pset_cluster_type_t t)9680 find_pset_of_type(pset_cluster_type_t t)
9681 {
9682 for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
9683 if (node->pset_cluster_type != t) {
9684 continue;
9685 }
9686
9687 processor_set_t pset = PROCESSOR_SET_NULL;
9688 for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
9689 pset = pset_array[pset_id];
9690 /* Prefer one with recommended processsors */
9691 if (pset_is_recommended(pset)) {
9692 assert(pset->pset_cluster_type == t);
9693 return pset;
9694 }
9695 }
9696 /* Otherwise return whatever was found last */
9697 return pset;
9698 }
9699
9700 return PROCESSOR_SET_NULL;
9701 }
9702 #endif /* __AMP__ */
9703
9704 extern kern_return_t sysctl_task_set_cluster_type(char cluster_type);
9705 kern_return_t
sysctl_task_set_cluster_type(char cluster_type)9706 sysctl_task_set_cluster_type(char cluster_type)
9707 {
9708 #if __AMP__
9709 kern_return_t kr;
9710 task_t task = current_task();
9711 pset_cluster_type_t pset_cluster_type;
9712 kr = pset_cluster_type_from_name_char(cluster_type, &pset_cluster_type);
9713 if (kr == KERN_SUCCESS) {
9714 processor_set_t pset_hint = find_pset_of_type(pset_cluster_type);
9715 if (pset_hint) {
9716 task_lock(task);
9717 task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
9718 task->pset_hint = pset_hint;
9719 task_unlock(task);
9720
9721 thread_block(THREAD_CONTINUE_NULL);
9722 return KERN_SUCCESS;
9723 }
9724 }
9725 return KERN_INVALID_ARGUMENT;
9726 #else
9727 (void)cluster_type;
9728 return KERN_SUCCESS;
9729 #endif
9730 }
9731
9732 extern kern_return_t sysctl_clutch_thread_group_cpu_time_for_thread(thread_t thread,
9733 int sched_bucket, uint64_t *cpu_stats);
9734
9735 #if CONFIG_SCHED_CLUTCH
9736
9737 kern_return_t
sysctl_clutch_thread_group_cpu_time_for_thread(thread_t thread,int sched_bucket,uint64_t * cpu_stats)9738 sysctl_clutch_thread_group_cpu_time_for_thread(thread_t thread,
9739 int sched_bucket, uint64_t *cpu_stats)
9740 {
9741 return sched_clutch_thread_group_cpu_time_for_thread(thread, sched_bucket, cpu_stats);
9742 }
9743
9744 #else /* !CONFIG_SCHED_CLUTCH */
9745
9746 kern_return_t
sysctl_clutch_thread_group_cpu_time_for_thread(__unused thread_t thread,__unused int sched_bucket,__unused uint64_t * cpu_stats)9747 sysctl_clutch_thread_group_cpu_time_for_thread(__unused thread_t thread,
9748 __unused int sched_bucket, __unused uint64_t *cpu_stats)
9749 {
9750 return KERN_NOT_SUPPORTED;
9751 }
9752
9753 #endif /* !CONFIG_SCHED_CLUTCH */
9754
9755 #endif /* DEVELOPMENT || DEBUG */
9756