xref: /xnu-11417.101.15/osfmk/kern/sched_prim.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	sched_prim.c
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1986
62  *
63  *	Scheduling primitives
64  *
65  */
66 
67 #include <debug.h>
68 
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74 
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/limits.h>
79 #include <machine/atomic.h>
80 
81 #include <machine/commpage.h>
82 
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/cpu_number.h>
87 #include <kern/cpu_data.h>
88 #include <kern/smp.h>
89 #include <kern/debug.h>
90 #include <kern/macro_help.h>
91 #include <kern/machine.h>
92 #include <kern/misc_protos.h>
93 #include <kern/monotonic.h>
94 #include <kern/processor.h>
95 #include <kern/queue.h>
96 #include <kern/recount.h>
97 #include <kern/restartable.h>
98 #include <kern/sched.h>
99 #include <kern/sched_prim.h>
100 #include <kern/sfi.h>
101 #include <kern/syscall_subr.h>
102 #include <kern/task.h>
103 #include <kern/thread.h>
104 #include <kern/thread_group.h>
105 #include <kern/ledger.h>
106 #include <kern/timer_queue.h>
107 #include <kern/waitq.h>
108 #include <kern/policy_internal.h>
109 
110 #include <vm/pmap.h>
111 #include <vm/vm_kern.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_pageout_xnu.h>
114 
115 #include <mach/sdt.h>
116 #include <mach/mach_host.h>
117 #include <mach/host_info.h>
118 
119 #include <sys/kdebug.h>
120 #include <kperf/kperf.h>
121 #include <kern/kpc.h>
122 #include <san/kasan.h>
123 #include <kern/pms.h>
124 #include <kern/host.h>
125 #include <stdatomic.h>
126 #include <os/atomic_private.h>
127 #include <os/log.h>
128 
129 #ifdef KDBG_MACOS_RELEASE
130 #define KTRC KDBG_MACOS_RELEASE
131 #else
132 #define KTRC KDBG_RELEASE
133 #endif
134 
135 
136 struct sched_statistics PERCPU_DATA(sched_stats);
137 bool sched_stats_active;
138 
139 static uint64_t
deadline_add(uint64_t d,uint64_t e)140 deadline_add(uint64_t d, uint64_t e)
141 {
142 	uint64_t sum;
143 	return os_add_overflow(d, e, &sum) ? UINT64_MAX : sum;
144 }
145 
146 int
rt_runq_count(processor_set_t pset)147 rt_runq_count(processor_set_t pset)
148 {
149 	return os_atomic_load(&SCHED(rt_runq)(pset)->count, relaxed);
150 }
151 
152 uint64_t
rt_runq_earliest_deadline(processor_set_t pset)153 rt_runq_earliest_deadline(processor_set_t pset)
154 {
155 	return os_atomic_load_wide(&SCHED(rt_runq)(pset)->earliest_deadline, relaxed);
156 }
157 
158 static int
rt_runq_priority(processor_set_t pset)159 rt_runq_priority(processor_set_t pset)
160 {
161 	pset_assert_locked(pset);
162 	rt_queue_t rt_run_queue = SCHED(rt_runq)(pset);
163 
164 	bitmap_t *map = rt_run_queue->bitmap;
165 	int i = bitmap_first(map, NRTQS);
166 	assert(i < NRTQS);
167 
168 	if (i >= 0) {
169 		return i + BASEPRI_RTQUEUES;
170 	}
171 
172 	return i;
173 }
174 
175 static thread_t rt_runq_first(rt_queue_t rt_runq);
176 
177 #if DEBUG
178 static void
check_rt_runq_consistency(rt_queue_t rt_run_queue,thread_t thread)179 check_rt_runq_consistency(rt_queue_t rt_run_queue, thread_t thread)
180 {
181 	bitmap_t *map = rt_run_queue->bitmap;
182 
183 	uint64_t earliest_deadline = RT_DEADLINE_NONE;
184 	uint32_t constraint = RT_CONSTRAINT_NONE;
185 	int ed_index = NOPRI;
186 	int count = 0;
187 	bool found_thread = false;
188 
189 	for (int pri = BASEPRI_RTQUEUES; pri <= MAXPRI; pri++) {
190 		int i = pri - BASEPRI_RTQUEUES;
191 		rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
192 		queue_t queue = &rt_runq->pri_queue;
193 		queue_entry_t iter;
194 		int n = 0;
195 		uint64_t previous_deadline = 0;
196 		qe_foreach(iter, queue) {
197 			thread_t iter_thread = qe_element(iter, struct thread, runq_links);
198 			assert_thread_magic(iter_thread);
199 			if (iter_thread == thread) {
200 				found_thread = true;
201 			}
202 			assert(iter_thread->sched_pri == (i + BASEPRI_RTQUEUES));
203 			assert(iter_thread->realtime.deadline < RT_DEADLINE_NONE);
204 			assert(iter_thread->realtime.constraint < RT_CONSTRAINT_NONE);
205 			assert(previous_deadline <= iter_thread->realtime.deadline);
206 			n++;
207 			if (iter == queue_first(queue)) {
208 				assert(rt_runq->pri_earliest_deadline == iter_thread->realtime.deadline);
209 				assert(rt_runq->pri_constraint == iter_thread->realtime.constraint);
210 			}
211 			previous_deadline = iter_thread->realtime.deadline;
212 		}
213 		assert(n == rt_runq->pri_count);
214 		if (n == 0) {
215 			assert(bitmap_test(map, i) == false);
216 			assert(rt_runq->pri_earliest_deadline == RT_DEADLINE_NONE);
217 			assert(rt_runq->pri_constraint == RT_CONSTRAINT_NONE);
218 		} else {
219 			assert(bitmap_test(map, i) == true);
220 		}
221 		if (rt_runq->pri_earliest_deadline < earliest_deadline) {
222 			earliest_deadline = rt_runq->pri_earliest_deadline;
223 			constraint = rt_runq->pri_constraint;
224 			ed_index = i;
225 		}
226 		count += n;
227 	}
228 	assert(os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed) == earliest_deadline);
229 	assert(os_atomic_load(&rt_run_queue->count, relaxed) == count);
230 	assert(os_atomic_load(&rt_run_queue->constraint, relaxed) == constraint);
231 	assert(os_atomic_load(&rt_run_queue->ed_index, relaxed) == ed_index);
232 	if (thread) {
233 		assert(found_thread);
234 	}
235 }
236 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    check_rt_runq_consistency(q, th)
237 #else
238 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    do {} while (0)
239 #endif
240 
241 uint32_t rt_constraint_threshold;
242 
243 static bool
rt_runq_is_low_latency(processor_set_t pset)244 rt_runq_is_low_latency(processor_set_t pset)
245 {
246 	return os_atomic_load(&SCHED(rt_runq)(pset)->constraint, relaxed) <= rt_constraint_threshold;
247 }
248 
249 TUNABLE(bool, cpulimit_affects_quantum, "cpulimit_affects_quantum", true);
250 
251 /* TODO: enable this, to 50us (less than the deferred IPI latency, to beat a spill) */
252 TUNABLE(uint32_t, nonurgent_preemption_timer_us, "nonurgent_preemption_timer", 0); /* microseconds */
253 static uint64_t nonurgent_preemption_timer_abs = 0;
254 
255 #define         DEFAULT_PREEMPTION_RATE         100             /* (1/s) */
256 TUNABLE(int, default_preemption_rate, "preempt", DEFAULT_PREEMPTION_RATE);
257 
258 #define         DEFAULT_BG_PREEMPTION_RATE      400             /* (1/s) */
259 TUNABLE(int, default_bg_preemption_rate, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE);
260 
261 #if XNU_TARGET_OS_XR
262 #define         MAX_UNSAFE_RT_QUANTA               1
263 #define         SAFE_RT_MULTIPLIER                 5
264 #else
265 #define         MAX_UNSAFE_RT_QUANTA               100
266 #define         SAFE_RT_MULTIPLIER                 2
267 #endif /* XNU_TARGET_OS_XR */
268 
269 #define         MAX_UNSAFE_FIXED_QUANTA               100
270 #define         SAFE_FIXED_MULTIPLIER                 SAFE_RT_MULTIPLIER
271 
272 TUNABLE_DEV_WRITEABLE(int, max_unsafe_rt_quanta, "max_unsafe_rt_quanta", MAX_UNSAFE_RT_QUANTA);
273 TUNABLE_DEV_WRITEABLE(int, max_unsafe_fixed_quanta, "max_unsafe_fixed_quanta", MAX_UNSAFE_FIXED_QUANTA);
274 
275 TUNABLE_DEV_WRITEABLE(int, safe_rt_multiplier, "safe_rt_multiplier", SAFE_RT_MULTIPLIER);
276 TUNABLE_DEV_WRITEABLE(int, safe_fixed_multiplier, "safe_fixed_multiplier", SAFE_FIXED_MULTIPLIER);
277 
278 #define         MAX_POLL_QUANTA                 2
279 TUNABLE(int, max_poll_quanta, "poll", MAX_POLL_QUANTA);
280 
281 #define         SCHED_POLL_YIELD_SHIFT          4               /* 1/16 */
282 int             sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
283 
284 uint64_t        max_poll_computation;
285 
286 uint64_t        max_unsafe_rt_computation;
287 uint64_t        max_unsafe_fixed_computation;
288 uint64_t        sched_safe_rt_duration;
289 uint64_t        sched_safe_fixed_duration;
290 
291 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
292 
293 uint32_t        std_quantum;
294 uint32_t        min_std_quantum;
295 uint32_t        bg_quantum;
296 
297 uint32_t        std_quantum_us;
298 uint32_t        bg_quantum_us;
299 
300 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
301 
302 uint32_t        thread_depress_time;
303 uint32_t        default_timeshare_computation;
304 uint32_t        default_timeshare_constraint;
305 
306 uint32_t        max_rt_quantum;
307 uint32_t        min_rt_quantum;
308 
309 uint32_t        rt_deadline_epsilon;
310 
311 uint32_t        rt_constraint_threshold;
312 
313 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
314 
315 unsigned                sched_tick;
316 uint32_t                sched_tick_interval;
317 
318 /* Timeshare load calculation interval (15ms) */
319 uint32_t                sched_load_compute_interval_us = 15000;
320 uint64_t                sched_load_compute_interval_abs;
321 static _Atomic uint64_t sched_load_compute_deadline;
322 
323 uint32_t        sched_pri_shifts[TH_BUCKET_MAX];
324 uint32_t        sched_fixed_shift;
325 
326 uint32_t        sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
327 
328 /* Allow foreground to decay past default to resolve inversions */
329 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
330 int             sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
331 
332 /* Defaults for timer deadline profiling */
333 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
334 	                                               * 2ms */
335 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
336 	                                               *   <= 5ms */
337 
338 uint64_t timer_deadline_tracking_bin_1;
339 uint64_t timer_deadline_tracking_bin_2;
340 
341 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
342 
343 thread_t sched_maintenance_thread;
344 
345 LCK_GRP_DECLARE(cluster_powerdown_grp, "cluster_powerdown");
346 LCK_MTX_DECLARE(cluster_powerdown_lock, &cluster_powerdown_grp);
347 
348 /* interrupts disabled lock to guard core online, recommendation, pcs state */
349 decl_simple_lock_data(, sched_available_cores_lock);
350 
351 /*
352  * Locked by sched_available_cores_lock.
353  * cluster_powerdown_lock is held while making changes to CPU offline state.
354  */
355 static struct global_powered_cores_state {
356 	/*
357 	 * Set when PCS has seen all cores boot up and is ready to manage online
358 	 * state.  CPU recommendation works before this point.
359 	 */
360 	bool    pcs_init_completed;
361 
362 	cpumap_t pcs_managed_cores;         /* all cores managed by the PCS */
363 
364 	/*
365 	 * Inputs for CPU offline state provided by clients
366 	 */
367 	cpumap_t pcs_requested_online_user; /* updated by processor_start/exit from userspace */
368 	cpumap_t pcs_requested_online_clpc_user;
369 	cpumap_t pcs_requested_online_clpc_system;
370 	cpumap_t pcs_required_online_pmgr;  /* e.g. ANE needs these powered for their rail to be happy */
371 	cpumap_t pcs_required_online_system;  /* e.g. smt1 for interrupts, boot processor unless boot arg is set, makes them disable instead of sleep */
372 
373 	/*
374 	 * When a suspend count is held, all CPUs must be powered up.
375 	 */
376 	int32_t  pcs_powerdown_suspend_count;
377 
378 	/*
379 	 * Disable automatic cluster powerdown in favor of explicit user core online control
380 	 */
381 	bool     pcs_user_online_core_control;
382 	bool     pcs_wants_kernel_sleep;
383 	bool     pcs_in_kernel_sleep;
384 
385 	struct powered_cores_state {
386 		/*
387 		 * The input into the recommendation computation from update powered cores.
388 		 */
389 		cpumap_t pcs_powerdown_recommended_cores;
390 
391 		/*
392 		 * These cores are online and are not powered down.
393 		 *
394 		 * Processors with processor->processor_online bit set.
395 		 */
396 		cpumap_t pcs_online_cores;
397 
398 		/*
399 		 * These cores are disabled or powered down
400 		 * due to temporary reasons and will come back under presented load
401 		 * so the user should still see them as active in the cpu count.
402 		 *
403 		 * Processors with processor->shutdown_temporary bit set.
404 		 */
405 		cpumap_t pcs_tempdown_cores;
406 	} pcs_effective;
407 
408 	/* The 'goal state' PCS has computed and is attempting to apply */
409 	struct powered_cores_state pcs_requested;
410 
411 	/*
412 	 * Inputs into CPU recommended cores provided by clients.
413 	 * Note that these may be changed under the available cores lock and
414 	 * become effective while sched_update_powered_cores_drops_lock is in
415 	 * the middle of making changes to CPU online state.
416 	 */
417 
418 	cpumap_t        pcs_requested_recommended_clpc;
419 	cpumap_t        pcs_requested_recommended_clpc_system;
420 	cpumap_t        pcs_requested_recommended_clpc_user;
421 	bool            pcs_recommended_clpc_failsafe_active;
422 	bool            pcs_sleep_override_recommended;
423 
424 	/*
425 	 * These cores are recommended and can be used for execution
426 	 * of non-bound threads.
427 	 *
428 	 * Processors with processor->is_recommended bit set.
429 	 */
430 	cpumap_t pcs_recommended_cores;
431 
432 	/*
433 	 * These are for the debugger.
434 	 * Use volatile to stop the compiler from optimizing out the stores
435 	 */
436 	volatile processor_reason_t pcs_in_flight_reason;
437 	volatile processor_reason_t pcs_previous_reason;
438 } pcs = {
439 	/*
440 	 * Powerdown is suspended during boot until after all CPUs finish booting,
441 	 * released by sched_cpu_init_completed.
442 	 */
443 	.pcs_powerdown_suspend_count = 1,
444 	.pcs_requested_online_user = ALL_CORES_POWERED,
445 	.pcs_requested_online_clpc_user = ALL_CORES_POWERED,
446 	.pcs_requested_online_clpc_system = ALL_CORES_POWERED,
447 	.pcs_in_flight_reason = REASON_NONE,
448 	.pcs_previous_reason = REASON_NONE,
449 	.pcs_requested.pcs_powerdown_recommended_cores = ALL_CORES_POWERED,
450 	.pcs_requested_recommended_clpc = ALL_CORES_RECOMMENDED,
451 	.pcs_requested_recommended_clpc_system = ALL_CORES_RECOMMENDED,
452 	.pcs_requested_recommended_clpc_user = ALL_CORES_RECOMMENDED,
453 };
454 
455 uint64_t sysctl_sched_recommended_cores = ALL_CORES_RECOMMENDED;
456 
457 static int sched_last_resort_cpu(void);
458 
459 static void sched_update_recommended_cores_locked(processor_reason_t reason, cpumap_t core_going_offline);
460 static void sched_update_powered_cores_drops_lock(processor_reason_t requested_reason, spl_t s);
461 
462 #if __arm64__
463 static void sched_recommended_cores_maintenance(void);
464 uint64_t    perfcontrol_failsafe_starvation_threshold;
465 extern char *proc_name_address(struct proc *p);
466 #endif /* __arm64__ */
467 
468 uint64_t        sched_one_second_interval;
469 boolean_t       allow_direct_handoff = TRUE;
470 
471 /* Forwards */
472 
473 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
474 
475 static void load_shift_init(void);
476 static void preempt_pri_init(void);
477 
478 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
479 
480 thread_t        processor_idle(
481 	thread_t                        thread,
482 	processor_t                     processor);
483 
484 static ast_t
485 csw_check_locked(
486 	thread_t        thread,
487 	processor_t     processor,
488 	processor_set_t pset,
489 	ast_t           check_reason);
490 
491 static void processor_setrun(
492 	processor_t                    processor,
493 	thread_t                       thread,
494 	integer_t                      options);
495 
496 static void
497 sched_realtime_timebase_init(void);
498 
499 static void
500 sched_timer_deadline_tracking_init(void);
501 
502 #if     DEBUG
503 extern int debug_task;
504 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
505 #else
506 #define TLOG(a, fmt, args...) do {} while (0)
507 #endif
508 
509 static processor_t
510 thread_bind_internal(
511 	thread_t                thread,
512 	processor_t             processor);
513 
514 static void
515 sched_vm_group_maintenance(void);
516 
517 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
518 int8_t          sched_load_shifts[NRQS];
519 bitmap_t        sched_preempt_pri[BITMAP_LEN(NRQS_MAX)];
520 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
521 
522 #define cpumap_foreach(cpu_id, cpumap) \
523 	for (int cpu_id = lsb_first(cpumap); \
524 	    (cpu_id) >= 0; \
525 	     cpu_id = lsb_next((cpumap), cpu_id))
526 
527 #define foreach_node(node) \
528 	for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list)
529 
530 #define foreach_pset_id(pset_id, node) \
531 	for (int pset_id = lsb_first((node)->pset_map); \
532 	    pset_id >= 0; \
533 	    pset_id = lsb_next((node)->pset_map, pset_id))
534 
535 /*
536  * Statically allocate a buffer to hold the longest possible
537  * scheduler description string, as currently implemented.
538  * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
539  * to export to userspace via sysctl(3). If either version
540  * changes, update the other.
541  *
542  * Note that in addition to being an upper bound on the strings
543  * in the kernel, it's also an exact parameter to PE_get_default(),
544  * which interrogates the device tree on some platforms. That
545  * API requires the caller know the exact size of the device tree
546  * property, so we need both a legacy size (32) and the current size
547  * (48) to deal with old and new device trees. The device tree property
548  * is similarly padded to a fixed size so that the same kernel image
549  * can run on multiple devices with different schedulers configured
550  * in the device tree.
551  */
552 char sched_string[SCHED_STRING_MAX_LENGTH];
553 
554 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
555 
556 /* Global flag which indicates whether Background Stepper Context is enabled */
557 static int cpu_throttle_enabled = 1;
558 
559 #if DEVELOPMENT || DEBUG
560 int enable_task_set_cluster_type = 0;
561 bool system_ecore_only = false;
562 #endif /* DEVELOPMENT || DEBUG */
563 
564 void
sched_init(void)565 sched_init(void)
566 {
567 	boolean_t direct_handoff = FALSE;
568 	kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
569 
570 	if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
571 		/* No boot-args, check in device tree */
572 		if (!PE_get_default("kern.sched_pri_decay_limit",
573 		    &sched_pri_decay_band_limit,
574 		    sizeof(sched_pri_decay_band_limit))) {
575 			/* Allow decay all the way to normal limits */
576 			sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
577 		}
578 	}
579 
580 	kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
581 
582 	if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
583 		kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
584 	}
585 	strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
586 
587 #if __arm64__
588 	clock_interval_to_absolutetime_interval(expecting_ipi_wfe_timeout_usec, NSEC_PER_USEC, &expecting_ipi_wfe_timeout_mt);
589 #endif /* __arm64__ */
590 
591 	SCHED(init)();
592 	SCHED(rt_init)(&pset0);
593 	sched_timer_deadline_tracking_init();
594 
595 	SCHED(pset_init)(&pset0);
596 	SCHED(processor_init)(master_processor);
597 
598 	if (PE_parse_boot_argn("direct_handoff", &direct_handoff, sizeof(direct_handoff))) {
599 		allow_direct_handoff = direct_handoff;
600 	}
601 
602 #if DEVELOPMENT || DEBUG
603 	if (PE_parse_boot_argn("enable_skstsct", &enable_task_set_cluster_type, sizeof(enable_task_set_cluster_type))) {
604 		system_ecore_only = (enable_task_set_cluster_type == 2);
605 	}
606 #endif /* DEVELOPMENT || DEBUG */
607 }
608 
609 void
sched_timebase_init(void)610 sched_timebase_init(void)
611 {
612 	uint64_t        abstime;
613 
614 	clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
615 	sched_one_second_interval = abstime;
616 
617 	SCHED(timebase_init)();
618 	sched_realtime_timebase_init();
619 }
620 
621 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
622 
623 void
sched_timeshare_init(void)624 sched_timeshare_init(void)
625 {
626 	/*
627 	 * Calculate the timeslicing quantum
628 	 * in us.
629 	 */
630 	if (default_preemption_rate < 1) {
631 		default_preemption_rate = DEFAULT_PREEMPTION_RATE;
632 	}
633 	std_quantum_us = (1000 * 1000) / default_preemption_rate;
634 
635 	printf("standard timeslicing quantum is %d us\n", std_quantum_us);
636 
637 	if (default_bg_preemption_rate < 1) {
638 		default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
639 	}
640 	bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
641 
642 	printf("standard background quantum is %d us\n", bg_quantum_us);
643 
644 	load_shift_init();
645 	preempt_pri_init();
646 	sched_tick = 0;
647 }
648 
649 void
sched_set_max_unsafe_rt_quanta(int max)650 sched_set_max_unsafe_rt_quanta(int max)
651 {
652 	const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
653 
654 	max_unsafe_rt_computation = ((uint64_t)max) * quantum_size;
655 
656 	const int mult = safe_rt_multiplier <= 0 ? 2 : safe_rt_multiplier;
657 	sched_safe_rt_duration = mult * ((uint64_t)max) * quantum_size;
658 
659 
660 #if DEVELOPMENT || DEBUG
661 	max_unsafe_rt_quanta = max;
662 #else
663 	/*
664 	 * On RELEASE kernels, this is only called on boot where
665 	 * max is already equal to max_unsafe_rt_quanta.
666 	 */
667 	assert3s(max, ==, max_unsafe_rt_quanta);
668 #endif
669 }
670 
671 void
sched_set_max_unsafe_fixed_quanta(int max)672 sched_set_max_unsafe_fixed_quanta(int max)
673 {
674 	const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
675 
676 	max_unsafe_fixed_computation = ((uint64_t)max) * quantum_size;
677 
678 	const int mult = safe_fixed_multiplier <= 0 ? 2 : safe_fixed_multiplier;
679 	sched_safe_fixed_duration = mult * ((uint64_t)max) * quantum_size;
680 
681 #if DEVELOPMENT || DEBUG
682 	max_unsafe_fixed_quanta = max;
683 #else
684 	/*
685 	 * On RELEASE kernels, this is only called on boot where
686 	 * max is already equal to max_unsafe_fixed_quanta.
687 	 */
688 	assert3s(max, ==, max_unsafe_fixed_quanta);
689 #endif
690 }
691 
692 uint64_t
sched_get_quantum_us(void)693 sched_get_quantum_us(void)
694 {
695 	uint32_t quantum = SCHED(initial_quantum_size)(THREAD_NULL);
696 
697 	uint64_t quantum_ns;
698 	absolutetime_to_nanoseconds(quantum, &quantum_ns);
699 
700 	return quantum_ns / 1000;
701 }
702 
703 void
sched_timeshare_timebase_init(void)704 sched_timeshare_timebase_init(void)
705 {
706 	uint64_t        abstime;
707 	uint32_t        shift;
708 
709 	/* standard timeslicing quantum */
710 	clock_interval_to_absolutetime_interval(
711 		std_quantum_us, NSEC_PER_USEC, &abstime);
712 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
713 	std_quantum = (uint32_t)abstime;
714 
715 	/* smallest remaining quantum (250 us) */
716 	clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
717 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
718 	min_std_quantum = (uint32_t)abstime;
719 
720 	/* quantum for background tasks */
721 	clock_interval_to_absolutetime_interval(
722 		bg_quantum_us, NSEC_PER_USEC, &abstime);
723 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
724 	bg_quantum = (uint32_t)abstime;
725 
726 	/* scheduler tick interval */
727 	clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
728 	    NSEC_PER_USEC, &abstime);
729 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
730 	sched_tick_interval = (uint32_t)abstime;
731 
732 	/* timeshare load calculation interval & deadline initialization */
733 	clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
734 	os_atomic_init(&sched_load_compute_deadline, sched_load_compute_interval_abs);
735 
736 	/*
737 	 * Compute conversion factor from usage to
738 	 * timesharing priorities with 5/8 ** n aging.
739 	 */
740 	abstime = (abstime * 5) / 3;
741 	for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
742 		abstime >>= 1;
743 	}
744 	sched_fixed_shift = shift;
745 
746 	for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
747 		sched_pri_shifts[i] = INT8_MAX;
748 	}
749 
750 	sched_set_max_unsafe_rt_quanta(max_unsafe_rt_quanta);
751 	sched_set_max_unsafe_fixed_quanta(max_unsafe_fixed_quanta);
752 
753 	max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
754 	thread_depress_time = 1 * std_quantum;
755 	default_timeshare_computation = std_quantum / 2;
756 	default_timeshare_constraint = std_quantum;
757 
758 #if __arm64__
759 	perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
760 #endif /* __arm64__ */
761 
762 	if (nonurgent_preemption_timer_us) {
763 		clock_interval_to_absolutetime_interval(nonurgent_preemption_timer_us, NSEC_PER_USEC, &abstime);
764 		nonurgent_preemption_timer_abs = abstime;
765 	}
766 }
767 
768 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
769 
770 void
pset_rt_init(processor_set_t pset)771 pset_rt_init(processor_set_t pset)
772 {
773 	for (int pri = BASEPRI_RTQUEUES; pri <= MAXPRI; pri++) {
774 		int i = pri - BASEPRI_RTQUEUES;
775 		rt_queue_pri_t *rqi = &pset->rt_runq.rt_queue_pri[i];
776 		queue_init(&rqi->pri_queue);
777 		rqi->pri_count = 0;
778 		rqi->pri_earliest_deadline = RT_DEADLINE_NONE;
779 		rqi->pri_constraint = RT_CONSTRAINT_NONE;
780 	}
781 	os_atomic_init(&pset->rt_runq.count, 0);
782 	os_atomic_init(&pset->rt_runq.earliest_deadline, RT_DEADLINE_NONE);
783 	os_atomic_init(&pset->rt_runq.constraint, RT_CONSTRAINT_NONE);
784 	os_atomic_init(&pset->rt_runq.ed_index, NOPRI);
785 	memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
786 }
787 
788 /* epsilon for comparing RT deadlines */
789 int rt_deadline_epsilon_us = 100;
790 
791 int
sched_get_rt_deadline_epsilon(void)792 sched_get_rt_deadline_epsilon(void)
793 {
794 	return rt_deadline_epsilon_us;
795 }
796 
797 void
sched_set_rt_deadline_epsilon(int new_epsilon_us)798 sched_set_rt_deadline_epsilon(int new_epsilon_us)
799 {
800 	rt_deadline_epsilon_us = new_epsilon_us;
801 
802 	uint64_t abstime;
803 	clock_interval_to_absolutetime_interval(rt_deadline_epsilon_us, NSEC_PER_USEC, &abstime);
804 	assert((abstime >> 32) == 0 && ((rt_deadline_epsilon_us == 0) || (uint32_t)abstime != 0));
805 	rt_deadline_epsilon = (uint32_t)abstime;
806 }
807 
808 static void
sched_realtime_timebase_init(void)809 sched_realtime_timebase_init(void)
810 {
811 	uint64_t abstime;
812 
813 	/* smallest rt computation (50 us) */
814 	clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
815 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
816 	min_rt_quantum = (uint32_t)abstime;
817 
818 	/* maximum rt computation (50 ms) */
819 	clock_interval_to_absolutetime_interval(
820 		50, 1000 * NSEC_PER_USEC, &abstime);
821 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
822 	max_rt_quantum = (uint32_t)abstime;
823 
824 	/* constraint threshold for sending backup IPIs (4 ms) */
825 	clock_interval_to_absolutetime_interval(4, NSEC_PER_MSEC, &abstime);
826 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
827 	rt_constraint_threshold = (uint32_t)abstime;
828 
829 	/* epsilon for comparing deadlines */
830 	sched_set_rt_deadline_epsilon(rt_deadline_epsilon_us);
831 }
832 
833 void
sched_check_spill(processor_set_t pset,thread_t thread)834 sched_check_spill(processor_set_t pset, thread_t thread)
835 {
836 	(void)pset;
837 	(void)thread;
838 
839 	return;
840 }
841 
842 bool
sched_thread_should_yield(processor_t processor,thread_t thread)843 sched_thread_should_yield(processor_t processor, thread_t thread)
844 {
845 	(void)thread;
846 
847 	return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
848 }
849 
850 /* Default implementations of .steal_thread_enabled */
851 bool
sched_steal_thread_DISABLED(processor_set_t pset)852 sched_steal_thread_DISABLED(processor_set_t pset)
853 {
854 	(void)pset;
855 	return false;
856 }
857 
858 bool
sched_steal_thread_enabled(processor_set_t pset)859 sched_steal_thread_enabled(processor_set_t pset)
860 {
861 	return bit_count(pset->node->pset_map) > 1;
862 }
863 
864 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
865 
866 /*
867  * Set up values for timeshare
868  * loading factors.
869  */
870 static void
load_shift_init(void)871 load_shift_init(void)
872 {
873 	int8_t          k, *p = sched_load_shifts;
874 	uint32_t        i, j;
875 
876 	uint32_t        sched_decay_penalty = 1;
877 
878 	if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
879 		kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
880 	}
881 
882 	if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
883 		kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
884 	}
885 
886 	if (sched_decay_penalty == 0) {
887 		/*
888 		 * There is no penalty for timeshare threads for using too much
889 		 * CPU, so set all load shifts to INT8_MIN. Even under high load,
890 		 * sched_pri_shift will be >INT8_MAX, and there will be no
891 		 * penalty applied to threads (nor will sched_usage be updated per
892 		 * thread).
893 		 */
894 		for (i = 0; i < NRQS; i++) {
895 			sched_load_shifts[i] = INT8_MIN;
896 		}
897 
898 		return;
899 	}
900 
901 	*p++ = INT8_MIN; *p++ = 0;
902 
903 	/*
904 	 * For a given system load "i", the per-thread priority
905 	 * penalty per quantum of CPU usage is ~2^k priority
906 	 * levels. "sched_decay_penalty" can cause more
907 	 * array entries to be filled with smaller "k" values
908 	 */
909 	for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
910 		for (j <<= 1; (i < j) && (i < NRQS); ++i) {
911 			*p++ = k;
912 		}
913 	}
914 }
915 
916 static void
preempt_pri_init(void)917 preempt_pri_init(void)
918 {
919 	bitmap_t *p = sched_preempt_pri;
920 
921 	for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
922 		bitmap_set(p, i);
923 	}
924 
925 	for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
926 		bitmap_set(p, i);
927 	}
928 }
929 
930 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
931 
932 void
check_monotonic_time(uint64_t ctime)933 check_monotonic_time(uint64_t ctime)
934 {
935 	processor_t processor = current_processor();
936 	uint64_t last_dispatch = processor->last_dispatch;
937 
938 	if (last_dispatch > ctime) {
939 		panic("Non-monotonic time: last_dispatch at 0x%llx, ctime 0x%llx",
940 		    last_dispatch, ctime);
941 	}
942 }
943 
944 
945 /*
946  *	Thread wait timer expiration.
947  *	Runs in timer interrupt context with interrupts disabled.
948  */
949 void
thread_timer_expire(void * p0,__unused void * p1)950 thread_timer_expire(void *p0, __unused void *p1)
951 {
952 	thread_t thread = (thread_t)p0;
953 
954 	assert_thread_magic(thread);
955 
956 	assert(ml_get_interrupts_enabled() == FALSE);
957 
958 	thread_lock(thread);
959 
960 	if (thread->wait_timer_armed) {
961 		thread->wait_timer_armed = false;
962 		clear_wait_internal(thread, THREAD_TIMED_OUT);
963 		/* clear_wait_internal may have dropped and retaken the thread lock */
964 	}
965 
966 	thread->wait_timer_active--;
967 
968 	thread_unlock(thread);
969 }
970 
971 /*
972  *	thread_unblock:
973  *
974  *	Unblock thread on wake up.
975  *
976  *	Returns TRUE if the thread should now be placed on the runqueue.
977  *
978  *	Thread must be locked.
979  *
980  *	Called at splsched().
981  */
982 boolean_t
thread_unblock(thread_t thread,wait_result_t wresult)983 thread_unblock(
984 	thread_t                thread,
985 	wait_result_t   wresult)
986 {
987 	boolean_t               ready_for_runq = FALSE;
988 	thread_t                cthread = current_thread();
989 	uint32_t                new_run_count;
990 	int                             old_thread_state;
991 
992 	/*
993 	 *	Set wait_result.
994 	 */
995 	thread->wait_result = wresult;
996 
997 	/*
998 	 *	Cancel pending wait timer.
999 	 */
1000 	if (thread->wait_timer_armed) {
1001 		if (timer_call_cancel(thread->wait_timer)) {
1002 			thread->wait_timer_active--;
1003 		}
1004 		thread->wait_timer_armed = false;
1005 	}
1006 
1007 	boolean_t aticontext, pidle;
1008 	ml_get_power_state(&aticontext, &pidle);
1009 
1010 	/*
1011 	 *	Update scheduling state: not waiting,
1012 	 *	set running.
1013 	 */
1014 	old_thread_state = thread->state;
1015 	thread->state = (old_thread_state | TH_RUN) &
1016 	    ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT | TH_WAKING);
1017 
1018 	if ((old_thread_state & TH_RUN) == 0) {
1019 		uint64_t ctime = mach_approximate_time();
1020 
1021 		check_monotonic_time(ctime);
1022 
1023 		thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
1024 		timer_start(&thread->runnable_timer, ctime);
1025 
1026 		ready_for_runq = TRUE;
1027 
1028 		if (old_thread_state & TH_WAIT_REPORT) {
1029 			(*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1030 		}
1031 
1032 		/* Update the runnable thread count */
1033 		new_run_count = SCHED(run_count_incr)(thread);
1034 
1035 #if CONFIG_SCHED_AUTO_JOIN
1036 		if (aticontext == FALSE && work_interval_should_propagate(cthread, thread)) {
1037 			work_interval_auto_join_propagate(cthread, thread);
1038 		}
1039 #endif /*CONFIG_SCHED_AUTO_JOIN */
1040 
1041 	} else {
1042 		/*
1043 		 * Either the thread is idling in place on another processor,
1044 		 * or it hasn't finished context switching yet.
1045 		 */
1046 		assert((thread->state & TH_IDLE) == 0);
1047 		/*
1048 		 * The run count is only dropped after the context switch completes
1049 		 * and the thread is still waiting, so we should not run_incr here
1050 		 */
1051 		new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
1052 	}
1053 
1054 	/*
1055 	 * Calculate deadline for real-time threads.
1056 	 */
1057 	if (thread->sched_mode == TH_MODE_REALTIME) {
1058 		uint64_t ctime = mach_absolute_time();
1059 		thread->realtime.deadline = thread->realtime.constraint + ctime;
1060 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SET_RT_DEADLINE) | DBG_FUNC_NONE,
1061 		    (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
1062 	}
1063 
1064 	/*
1065 	 * Clear old quantum, fail-safe computation, etc.
1066 	 */
1067 	thread->quantum_remaining = 0;
1068 	thread->computation_metered = 0;
1069 	thread->reason = AST_NONE;
1070 	thread->block_hint = kThreadWaitNone;
1071 
1072 	/* Obtain power-relevant interrupt and "platform-idle exit" statistics.
1073 	 * We also account for "double hop" thread signaling via
1074 	 * the thread callout infrastructure.
1075 	 * DRK: consider removing the callout wakeup counters in the future
1076 	 * they're present for verification at the moment.
1077 	 */
1078 
1079 	if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
1080 		DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, current_proc());
1081 
1082 		uint64_t ttd = current_processor()->timer_call_ttd;
1083 
1084 		if (ttd) {
1085 			if (ttd <= timer_deadline_tracking_bin_1) {
1086 				thread->thread_timer_wakeups_bin_1++;
1087 			} else if (ttd <= timer_deadline_tracking_bin_2) {
1088 				thread->thread_timer_wakeups_bin_2++;
1089 			}
1090 		}
1091 
1092 		ledger_credit_thread(thread, thread->t_ledger,
1093 		    task_ledgers.interrupt_wakeups, 1);
1094 		if (pidle) {
1095 			ledger_credit_thread(thread, thread->t_ledger,
1096 			    task_ledgers.platform_idle_wakeups, 1);
1097 		}
1098 	} else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
1099 		/* TODO: what about an interrupt that does a wake taken on a callout thread? */
1100 		if (cthread->callout_woken_from_icontext) {
1101 			ledger_credit_thread(thread, thread->t_ledger,
1102 			    task_ledgers.interrupt_wakeups, 1);
1103 			thread->thread_callout_interrupt_wakeups++;
1104 
1105 			if (cthread->callout_woken_from_platform_idle) {
1106 				ledger_credit_thread(thread, thread->t_ledger,
1107 				    task_ledgers.platform_idle_wakeups, 1);
1108 				thread->thread_callout_platform_idle_wakeups++;
1109 			}
1110 
1111 			cthread->callout_woke_thread = TRUE;
1112 		}
1113 	}
1114 
1115 	if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
1116 		thread->callout_woken_from_icontext = !!aticontext;
1117 		thread->callout_woken_from_platform_idle = !!pidle;
1118 		thread->callout_woke_thread = FALSE;
1119 	}
1120 
1121 #if KPERF
1122 	if (ready_for_runq) {
1123 		kperf_make_runnable(thread, aticontext);
1124 	}
1125 #endif /* KPERF */
1126 
1127 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1128 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
1129 	    (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
1130 	    sched_run_buckets[TH_BUCKET_RUN], 0);
1131 
1132 	DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, current_proc());
1133 
1134 	return ready_for_runq;
1135 }
1136 
1137 /*
1138  *	Routine:	thread_allowed_for_handoff
1139  *	Purpose:
1140  *		Check if the thread is allowed for handoff operation
1141  *	Conditions:
1142  *		thread lock held, IPC locks may be held.
1143  *	TODO: In future, do not allow handoff if threads have different cluster
1144  *	recommendations.
1145  */
1146 boolean_t
thread_allowed_for_handoff(thread_t thread)1147 thread_allowed_for_handoff(
1148 	thread_t         thread)
1149 {
1150 	thread_t self = current_thread();
1151 
1152 	if (allow_direct_handoff &&
1153 	    thread->sched_mode == TH_MODE_REALTIME &&
1154 	    self->sched_mode == TH_MODE_REALTIME) {
1155 		return TRUE;
1156 	}
1157 
1158 	return FALSE;
1159 }
1160 
1161 /*
1162  *	Routine:	thread_go
1163  *	Purpose:
1164  *		Unblock and dispatch thread.
1165  *	Conditions:
1166  *		thread lock held, IPC locks may be held.
1167  *		thread must have been waiting
1168  */
1169 void
thread_go(thread_t thread,wait_result_t wresult,bool try_handoff)1170 thread_go(
1171 	thread_t                thread,
1172 	wait_result_t           wresult,
1173 	bool                    try_handoff)
1174 {
1175 	thread_t self = current_thread();
1176 
1177 	assert_thread_magic(thread);
1178 
1179 	assert(thread->at_safe_point == FALSE);
1180 	assert(thread->wait_event == NO_EVENT64);
1181 	assert(waitq_is_null(thread->waitq));
1182 
1183 	assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
1184 	assert(thread->state & TH_WAIT);
1185 
1186 	if (thread->started) {
1187 		assert(thread->state & TH_WAKING);
1188 	}
1189 
1190 	thread_lock_assert(thread, LCK_ASSERT_OWNED);
1191 
1192 	assert(ml_get_interrupts_enabled() == false);
1193 
1194 	if (thread_unblock(thread, wresult)) {
1195 #if SCHED_TRACE_THREAD_WAKEUPS
1196 		backtrace(&thread->thread_wakeup_bt[0],
1197 		    (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL,
1198 		    NULL);
1199 #endif /* SCHED_TRACE_THREAD_WAKEUPS */
1200 		if (try_handoff && thread_allowed_for_handoff(thread)) {
1201 			thread_reference(thread);
1202 			assert(self->handoff_thread == NULL);
1203 			self->handoff_thread = thread;
1204 
1205 			/*
1206 			 * A TH_RUN'ed thread must have a chosen_processor.
1207 			 * thread_setrun would have set it, so we need to
1208 			 * replicate that here.
1209 			 */
1210 			thread->chosen_processor = current_processor();
1211 		} else {
1212 			thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1213 		}
1214 	}
1215 }
1216 
1217 /*
1218  *	Routine:	thread_mark_wait_locked
1219  *	Purpose:
1220  *		Mark a thread as waiting.  If, given the circumstances,
1221  *		it doesn't want to wait (i.e. already aborted), then
1222  *		indicate that in the return value.
1223  *	Conditions:
1224  *		at splsched() and thread is locked.
1225  */
1226 __private_extern__
1227 wait_result_t
thread_mark_wait_locked(thread_t thread,wait_interrupt_t interruptible_orig)1228 thread_mark_wait_locked(
1229 	thread_t                        thread,
1230 	wait_interrupt_t        interruptible_orig)
1231 {
1232 	boolean_t                       at_safe_point;
1233 	wait_interrupt_t        interruptible = interruptible_orig;
1234 
1235 	if (thread->state & TH_IDLE) {
1236 		panic("Invalid attempt to wait while running the idle thread");
1237 	}
1238 
1239 	assert(!(thread->state & (TH_WAIT | TH_WAKING | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
1240 
1241 	/*
1242 	 *	The thread may have certain types of interrupts/aborts masked
1243 	 *	off.  Even if the wait location says these types of interrupts
1244 	 *	are OK, we have to honor mask settings (outer-scoped code may
1245 	 *	not be able to handle aborts at the moment).
1246 	 */
1247 	interruptible &= TH_OPT_INTMASK;
1248 	if (interruptible > (thread->options & TH_OPT_INTMASK)) {
1249 		interruptible = thread->options & TH_OPT_INTMASK;
1250 	}
1251 
1252 	at_safe_point = (interruptible == THREAD_ABORTSAFE);
1253 
1254 	if (interruptible == THREAD_UNINT ||
1255 	    !(thread->sched_flags & TH_SFLAG_ABORT) ||
1256 	    (!at_safe_point &&
1257 	    (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
1258 		if (!(thread->state & TH_TERMINATE)) {
1259 			DTRACE_SCHED(sleep);
1260 		}
1261 
1262 		int state_bits = TH_WAIT;
1263 		if (!interruptible) {
1264 			state_bits |= TH_UNINT;
1265 		}
1266 		if (thread->sched_call) {
1267 			wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
1268 			if (is_kerneltask(get_threadtask(thread))) {
1269 				mask = THREAD_WAIT_NOREPORT_KERNEL;
1270 			}
1271 			if ((interruptible_orig & mask) == 0) {
1272 				state_bits |= TH_WAIT_REPORT;
1273 			}
1274 		}
1275 		thread->state |= state_bits;
1276 		thread->at_safe_point = at_safe_point;
1277 
1278 		/* TODO: pass this through assert_wait instead, have
1279 		 * assert_wait just take a struct as an argument */
1280 		assert(!thread->block_hint);
1281 		thread->block_hint = thread->pending_block_hint;
1282 		thread->pending_block_hint = kThreadWaitNone;
1283 
1284 		return thread->wait_result = THREAD_WAITING;
1285 	} else {
1286 		if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
1287 			thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1288 		}
1289 	}
1290 	thread->pending_block_hint = kThreadWaitNone;
1291 
1292 	return thread->wait_result = THREAD_INTERRUPTED;
1293 }
1294 
1295 /*
1296  *	Routine:	thread_interrupt_level
1297  *	Purpose:
1298  *	        Set the maximum interruptible state for the
1299  *		current thread.  The effective value of any
1300  *		interruptible flag passed into assert_wait
1301  *		will never exceed this.
1302  *
1303  *		Useful for code that must not be interrupted,
1304  *		but which calls code that doesn't know that.
1305  *	Returns:
1306  *		The old interrupt level for the thread.
1307  */
1308 __private_extern__
1309 wait_interrupt_t
thread_interrupt_level(wait_interrupt_t new_level)1310 thread_interrupt_level(
1311 	wait_interrupt_t new_level)
1312 {
1313 	thread_t thread = current_thread();
1314 	wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1315 
1316 	thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1317 
1318 	return result;
1319 }
1320 
1321 /*
1322  *	assert_wait:
1323  *
1324  *	Assert that the current thread is about to go to
1325  *	sleep until the specified event occurs.
1326  */
1327 wait_result_t
assert_wait(event_t event,wait_interrupt_t interruptible)1328 assert_wait(
1329 	event_t                         event,
1330 	wait_interrupt_t        interruptible)
1331 {
1332 	if (__improbable(event == NO_EVENT)) {
1333 		panic("%s() called with NO_EVENT", __func__);
1334 	}
1335 
1336 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1337 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1338 	    VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
1339 
1340 	struct waitq *waitq;
1341 	waitq = global_eventq(event);
1342 	return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
1343 }
1344 
1345 /*
1346  *	assert_wait_queue:
1347  *
1348  *	Return the global waitq for the specified event
1349  */
1350 struct waitq *
assert_wait_queue(event_t event)1351 assert_wait_queue(
1352 	event_t                         event)
1353 {
1354 	return global_eventq(event);
1355 }
1356 
1357 wait_result_t
assert_wait_timeout(event_t event,wait_interrupt_t interruptible,uint32_t interval,uint32_t scale_factor)1358 assert_wait_timeout(
1359 	event_t                         event,
1360 	wait_interrupt_t        interruptible,
1361 	uint32_t                        interval,
1362 	uint32_t                        scale_factor)
1363 {
1364 	thread_t                        thread = current_thread();
1365 	wait_result_t           wresult;
1366 	uint64_t                        deadline;
1367 	spl_t                           s;
1368 
1369 	if (__improbable(event == NO_EVENT)) {
1370 		panic("%s() called with NO_EVENT", __func__);
1371 	}
1372 
1373 	struct waitq *waitq;
1374 	waitq = global_eventq(event);
1375 
1376 	s = splsched();
1377 	waitq_lock(waitq);
1378 
1379 	clock_interval_to_deadline(interval, scale_factor, &deadline);
1380 
1381 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1382 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1383 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1384 
1385 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1386 	    interruptible,
1387 	    TIMEOUT_URGENCY_SYS_NORMAL,
1388 	    deadline, TIMEOUT_NO_LEEWAY,
1389 	    thread);
1390 
1391 	waitq_unlock(waitq);
1392 	splx(s);
1393 	return wresult;
1394 }
1395 
1396 wait_result_t
assert_wait_timeout_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint32_t interval,uint32_t leeway,uint32_t scale_factor)1397 assert_wait_timeout_with_leeway(
1398 	event_t                         event,
1399 	wait_interrupt_t        interruptible,
1400 	wait_timeout_urgency_t  urgency,
1401 	uint32_t                        interval,
1402 	uint32_t                        leeway,
1403 	uint32_t                        scale_factor)
1404 {
1405 	thread_t                        thread = current_thread();
1406 	wait_result_t           wresult;
1407 	uint64_t                        deadline;
1408 	uint64_t                        abstime;
1409 	uint64_t                        slop;
1410 	uint64_t                        now;
1411 	spl_t                           s;
1412 
1413 	if (__improbable(event == NO_EVENT)) {
1414 		panic("%s() called with NO_EVENT", __func__);
1415 	}
1416 
1417 	now = mach_absolute_time();
1418 	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1419 	deadline = now + abstime;
1420 
1421 	clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1422 
1423 	struct waitq *waitq;
1424 	waitq = global_eventq(event);
1425 
1426 	s = splsched();
1427 	waitq_lock(waitq);
1428 
1429 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1430 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1431 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1432 
1433 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1434 	    interruptible,
1435 	    urgency, deadline, slop,
1436 	    thread);
1437 
1438 	waitq_unlock(waitq);
1439 	splx(s);
1440 	return wresult;
1441 }
1442 
1443 wait_result_t
assert_wait_deadline(event_t event,wait_interrupt_t interruptible,uint64_t deadline)1444 assert_wait_deadline(
1445 	event_t                         event,
1446 	wait_interrupt_t        interruptible,
1447 	uint64_t                        deadline)
1448 {
1449 	thread_t                        thread = current_thread();
1450 	wait_result_t           wresult;
1451 	spl_t                           s;
1452 
1453 	if (__improbable(event == NO_EVENT)) {
1454 		panic("%s() called with NO_EVENT", __func__);
1455 	}
1456 
1457 	struct waitq *waitq;
1458 	waitq = global_eventq(event);
1459 
1460 	s = splsched();
1461 	waitq_lock(waitq);
1462 
1463 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1464 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1465 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1466 
1467 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1468 	    interruptible,
1469 	    TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1470 	    TIMEOUT_NO_LEEWAY, thread);
1471 	waitq_unlock(waitq);
1472 	splx(s);
1473 	return wresult;
1474 }
1475 
1476 wait_result_t
assert_wait_deadline_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint64_t deadline,uint64_t leeway)1477 assert_wait_deadline_with_leeway(
1478 	event_t                         event,
1479 	wait_interrupt_t        interruptible,
1480 	wait_timeout_urgency_t  urgency,
1481 	uint64_t                        deadline,
1482 	uint64_t                        leeway)
1483 {
1484 	thread_t                        thread = current_thread();
1485 	wait_result_t           wresult;
1486 	spl_t                           s;
1487 
1488 	if (__improbable(event == NO_EVENT)) {
1489 		panic("%s() called with NO_EVENT", __func__);
1490 	}
1491 
1492 	struct waitq *waitq;
1493 	waitq = global_eventq(event);
1494 
1495 	s = splsched();
1496 	waitq_lock(waitq);
1497 
1498 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1499 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1500 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1501 
1502 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1503 	    interruptible,
1504 	    urgency, deadline, leeway,
1505 	    thread);
1506 	waitq_unlock(waitq);
1507 	splx(s);
1508 	return wresult;
1509 }
1510 
1511 void
sched_cond_init(sched_cond_atomic_t * cond)1512 sched_cond_init(
1513 	sched_cond_atomic_t *cond)
1514 {
1515 	os_atomic_init(cond, SCHED_COND_INIT);
1516 }
1517 
1518 wait_result_t
sched_cond_wait_parameter(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation,void * parameter)1519 sched_cond_wait_parameter(
1520 	sched_cond_atomic_t *cond,
1521 	wait_interrupt_t interruptible,
1522 	thread_continue_t continuation,
1523 	void *parameter)
1524 {
1525 	assert_wait((event_t) cond, interruptible);
1526 	/* clear active bit to indicate future wakeups will have to unblock this thread */
1527 	sched_cond_t new_state = (sched_cond_t) os_atomic_andnot(cond, SCHED_COND_ACTIVE, relaxed);
1528 	if (__improbable(new_state & SCHED_COND_WAKEUP)) {
1529 		/* a wakeup has been issued; undo wait assertion, ack the wakeup, and return */
1530 		thread_t thread = current_thread();
1531 		clear_wait(thread, THREAD_AWAKENED);
1532 		sched_cond_ack(cond);
1533 		return THREAD_AWAKENED;
1534 	}
1535 	return thread_block_parameter(continuation, parameter);
1536 }
1537 
1538 wait_result_t
sched_cond_wait(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation)1539 sched_cond_wait(
1540 	sched_cond_atomic_t *cond,
1541 	wait_interrupt_t interruptible,
1542 	thread_continue_t continuation)
1543 {
1544 	return sched_cond_wait_parameter(cond, interruptible, continuation, NULL);
1545 }
1546 
1547 sched_cond_t
sched_cond_ack(sched_cond_atomic_t * cond)1548 sched_cond_ack(
1549 	sched_cond_atomic_t *cond)
1550 {
1551 	sched_cond_t new_cond = (sched_cond_t) os_atomic_xor(cond, SCHED_COND_ACTIVE | SCHED_COND_WAKEUP, acquire);
1552 	assert(new_cond & SCHED_COND_ACTIVE);
1553 	return new_cond;
1554 }
1555 
1556 kern_return_t
sched_cond_signal(sched_cond_atomic_t * cond,thread_t thread)1557 sched_cond_signal(
1558 	sched_cond_atomic_t  *cond,
1559 	thread_t thread)
1560 {
1561 	disable_preemption();
1562 	sched_cond_t old_cond = (sched_cond_t) os_atomic_or_orig(cond, SCHED_COND_WAKEUP, release);
1563 	if (!(old_cond & (SCHED_COND_WAKEUP | SCHED_COND_ACTIVE))) {
1564 		/* this was the first wakeup to be issued AND the thread was inactive */
1565 		thread_wakeup_thread((event_t) cond, thread);
1566 	}
1567 	enable_preemption();
1568 	return KERN_SUCCESS;
1569 }
1570 
1571 /*
1572  * thread_isoncpu:
1573  *
1574  * Return TRUE if a thread is running on a processor such that an AST
1575  * is needed to pull it out of userspace execution, or if executing in
1576  * the kernel, bring to a context switch boundary that would cause
1577  * thread state to be serialized in the thread PCB.
1578  *
1579  * Thread locked, returns the same way. While locked, fields
1580  * like "state" cannot change. "runq" can change only from set to unset.
1581  */
1582 static inline boolean_t
thread_isoncpu(thread_t thread)1583 thread_isoncpu(thread_t thread)
1584 {
1585 	/* Not running or runnable */
1586 	if (!(thread->state & TH_RUN)) {
1587 		return FALSE;
1588 	}
1589 
1590 	/* Waiting on a runqueue, not currently running */
1591 	/* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1592 	/* TODO: This can also be incorrect for `handoff` cases where
1593 	 * the thread is never enqueued on the runq */
1594 	if (thread_get_runq(thread) != PROCESSOR_NULL) {
1595 		return FALSE;
1596 	}
1597 
1598 	/*
1599 	 * Thread does not have a stack yet
1600 	 * It could be on the stack alloc queue or preparing to be invoked
1601 	 */
1602 	if (!thread->kernel_stack) {
1603 		return FALSE;
1604 	}
1605 
1606 	/*
1607 	 * Thread must be running on a processor, or
1608 	 * about to run, or just did run. In all these
1609 	 * cases, an AST to the processor is needed
1610 	 * to guarantee that the thread is kicked out
1611 	 * of userspace and the processor has
1612 	 * context switched (and saved register state).
1613 	 */
1614 	return TRUE;
1615 }
1616 
1617 /*
1618  * thread_stop:
1619  *
1620  * Force a preemption point for a thread and wait
1621  * for it to stop running on a CPU. If a stronger
1622  * guarantee is requested, wait until no longer
1623  * runnable. Arbitrates access among
1624  * multiple stop requests. (released by unstop)
1625  *
1626  * The thread must enter a wait state and stop via a
1627  * separate means.
1628  *
1629  * Returns FALSE if interrupted.
1630  */
1631 boolean_t
thread_stop(thread_t thread,boolean_t until_not_runnable)1632 thread_stop(
1633 	thread_t                thread,
1634 	boolean_t       until_not_runnable)
1635 {
1636 	wait_result_t   wresult;
1637 	spl_t                   s = splsched();
1638 	boolean_t               oncpu;
1639 
1640 	wake_lock(thread);
1641 	thread_lock(thread);
1642 
1643 	while (thread->state & TH_SUSP) {
1644 		thread->wake_active = TRUE;
1645 		thread_unlock(thread);
1646 
1647 		wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1648 		wake_unlock(thread);
1649 		splx(s);
1650 
1651 		if (wresult == THREAD_WAITING) {
1652 			wresult = thread_block(THREAD_CONTINUE_NULL);
1653 		}
1654 
1655 		if (wresult != THREAD_AWAKENED) {
1656 			return FALSE;
1657 		}
1658 
1659 		s = splsched();
1660 		wake_lock(thread);
1661 		thread_lock(thread);
1662 	}
1663 
1664 	thread->state |= TH_SUSP;
1665 
1666 	while ((oncpu = thread_isoncpu(thread)) ||
1667 	    (until_not_runnable && (thread->state & TH_RUN))) {
1668 		if (oncpu) {
1669 			/*
1670 			 * TODO: chosen_processor isn't really the right
1671 			 * thing to IPI here.  We really want `last_processor`,
1672 			 * but we also want to know where to send the IPI
1673 			 * *before* thread_invoke sets last_processor.
1674 			 *
1675 			 * rdar://47149497 (thread_stop doesn't IPI the right core)
1676 			 */
1677 			assert(thread->state & TH_RUN);
1678 			processor_t processor = thread->chosen_processor;
1679 			assert(processor != PROCESSOR_NULL);
1680 			cause_ast_check(processor);
1681 		}
1682 
1683 		thread->wake_active = TRUE;
1684 		thread_unlock(thread);
1685 
1686 		wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1687 		wake_unlock(thread);
1688 		splx(s);
1689 
1690 		if (wresult == THREAD_WAITING) {
1691 			wresult = thread_block(THREAD_CONTINUE_NULL);
1692 		}
1693 
1694 		if (wresult != THREAD_AWAKENED) {
1695 			thread_unstop(thread);
1696 			return FALSE;
1697 		}
1698 
1699 		s = splsched();
1700 		wake_lock(thread);
1701 		thread_lock(thread);
1702 	}
1703 
1704 	thread_unlock(thread);
1705 	wake_unlock(thread);
1706 	splx(s);
1707 
1708 	/*
1709 	 * We return with the thread unlocked. To prevent it from
1710 	 * transitioning to a runnable state (or from TH_RUN to
1711 	 * being on the CPU), the caller must ensure the thread
1712 	 * is stopped via an external means (such as an AST)
1713 	 */
1714 
1715 	return TRUE;
1716 }
1717 
1718 /*
1719  * thread_unstop:
1720  *
1721  * Release a previous stop request and set
1722  * the thread running if appropriate.
1723  *
1724  * Use only after a successful stop operation.
1725  */
1726 void
thread_unstop(thread_t thread)1727 thread_unstop(
1728 	thread_t        thread)
1729 {
1730 	spl_t           s = splsched();
1731 
1732 	wake_lock(thread);
1733 	thread_lock(thread);
1734 
1735 	assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
1736 
1737 	if (thread->state & TH_SUSP) {
1738 		thread->state &= ~TH_SUSP;
1739 
1740 		if (thread->wake_active) {
1741 			thread->wake_active = FALSE;
1742 			thread_unlock(thread);
1743 
1744 			thread_wakeup(&thread->wake_active);
1745 			wake_unlock(thread);
1746 			splx(s);
1747 
1748 			return;
1749 		}
1750 	}
1751 
1752 	thread_unlock(thread);
1753 	wake_unlock(thread);
1754 	splx(s);
1755 }
1756 
1757 /*
1758  * thread_wait:
1759  *
1760  * Wait for a thread to stop running. (non-interruptible)
1761  *
1762  */
1763 void
thread_wait(thread_t thread,boolean_t until_not_runnable)1764 thread_wait(
1765 	thread_t        thread,
1766 	boolean_t       until_not_runnable)
1767 {
1768 	wait_result_t   wresult;
1769 	boolean_t       oncpu;
1770 	processor_t     processor;
1771 	spl_t           s = splsched();
1772 
1773 	wake_lock(thread);
1774 	thread_lock(thread);
1775 
1776 	/*
1777 	 * Wait until not running on a CPU.  If stronger requirement
1778 	 * desired, wait until not runnable.  Assumption: if thread is
1779 	 * on CPU, then TH_RUN is set, so we're not waiting in any case
1780 	 * where the original, pure "TH_RUN" check would have let us
1781 	 * finish.
1782 	 */
1783 	while ((oncpu = thread_isoncpu(thread)) ||
1784 	    (until_not_runnable && (thread->state & TH_RUN))) {
1785 		if (oncpu) {
1786 			assert(thread->state & TH_RUN);
1787 			processor = thread->chosen_processor;
1788 			cause_ast_check(processor);
1789 		}
1790 
1791 		thread->wake_active = TRUE;
1792 		thread_unlock(thread);
1793 
1794 		wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1795 		wake_unlock(thread);
1796 		splx(s);
1797 
1798 		if (wresult == THREAD_WAITING) {
1799 			thread_block(THREAD_CONTINUE_NULL);
1800 		}
1801 
1802 		s = splsched();
1803 		wake_lock(thread);
1804 		thread_lock(thread);
1805 	}
1806 
1807 	thread_unlock(thread);
1808 	wake_unlock(thread);
1809 	splx(s);
1810 }
1811 
1812 /*
1813  *	Routine: clear_wait_internal
1814  *
1815  *		Clear the wait condition for the specified thread.
1816  *		Start the thread executing if that is appropriate.
1817  *	Arguments:
1818  *		thread		thread to awaken
1819  *		result		Wakeup result the thread should see
1820  *	Conditions:
1821  *		At splsched
1822  *		the thread is locked.
1823  *	Returns:
1824  *		KERN_SUCCESS		thread was rousted out a wait
1825  *		KERN_FAILURE		thread was waiting but could not be rousted
1826  *		KERN_NOT_WAITING	thread was not waiting
1827  */
1828 __private_extern__ kern_return_t
clear_wait_internal(thread_t thread,wait_result_t wresult)1829 clear_wait_internal(
1830 	thread_t        thread,
1831 	wait_result_t   wresult)
1832 {
1833 	waitq_t waitq = thread->waitq;
1834 
1835 	if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1836 		return KERN_FAILURE;
1837 	}
1838 
1839 	/*
1840 	 * Check that the thread is waiting and not waking, as a waking thread
1841 	 * has already cleared its waitq, and is destined to be go'ed, don't
1842 	 * need to do it again.
1843 	 */
1844 	if ((thread->state & (TH_WAIT | TH_TERMINATE | TH_WAKING)) != TH_WAIT) {
1845 		assert(waitq_is_null(thread->waitq));
1846 		return KERN_NOT_WAITING;
1847 	}
1848 
1849 	/* may drop and retake the thread lock */
1850 	if (!waitq_is_null(waitq) && !waitq_pull_thread_locked(waitq, thread)) {
1851 		return KERN_NOT_WAITING;
1852 	}
1853 
1854 	thread_go(thread, wresult, /* handoff */ false);
1855 
1856 	return KERN_SUCCESS;
1857 }
1858 
1859 
1860 /*
1861  *	clear_wait:
1862  *
1863  *	Clear the wait condition for the specified thread.  Start the thread
1864  *	executing if that is appropriate.
1865  *
1866  *	parameters:
1867  *	  thread		thread to awaken
1868  *	  result		Wakeup result the thread should see
1869  */
1870 kern_return_t
clear_wait(thread_t thread,wait_result_t result)1871 clear_wait(
1872 	thread_t                thread,
1873 	wait_result_t   result)
1874 {
1875 	kern_return_t ret;
1876 	spl_t           s;
1877 
1878 	s = splsched();
1879 	thread_lock(thread);
1880 
1881 	ret = clear_wait_internal(thread, result);
1882 
1883 	if (thread == current_thread()) {
1884 		/*
1885 		 * The thread must be ready to wait again immediately
1886 		 * after clearing its own wait.
1887 		 */
1888 		assert((thread->state & TH_WAKING) == 0);
1889 	}
1890 
1891 	thread_unlock(thread);
1892 	splx(s);
1893 	return ret;
1894 }
1895 
1896 /*
1897  *	thread_wakeup_prim:
1898  *
1899  *	Common routine for thread_wakeup, thread_wakeup_with_result,
1900  *	and thread_wakeup_one.
1901  *
1902  */
1903 kern_return_t
thread_wakeup_nthreads_prim(event_t event,uint32_t nthreads,wait_result_t result)1904 thread_wakeup_nthreads_prim(
1905 	event_t          event,
1906 	uint32_t         nthreads,
1907 	wait_result_t    result)
1908 {
1909 	if (__improbable(event == NO_EVENT)) {
1910 		panic("%s() called with NO_EVENT", __func__);
1911 	}
1912 
1913 	struct waitq *wq = global_eventq(event);
1914 
1915 	return waitq_wakeup64_nthreads(wq, CAST_EVENT64_T(event), result, WAITQ_WAKEUP_DEFAULT, nthreads);
1916 }
1917 
1918 /*
1919  *	thread_wakeup_prim:
1920  *
1921  *	Common routine for thread_wakeup, thread_wakeup_with_result,
1922  *	and thread_wakeup_one.
1923  *
1924  */
1925 kern_return_t
thread_wakeup_prim(event_t event,boolean_t one_thread,wait_result_t result)1926 thread_wakeup_prim(
1927 	event_t          event,
1928 	boolean_t        one_thread,
1929 	wait_result_t    result)
1930 {
1931 	if (one_thread) {
1932 		return thread_wakeup_nthreads_prim(event, 1, result);
1933 	} else {
1934 		return thread_wakeup_nthreads_prim(event, UINT32_MAX, result);
1935 	}
1936 }
1937 
1938 /*
1939  * Wakeup a specified thread if and only if it's waiting for this event
1940  */
1941 kern_return_t
thread_wakeup_thread(event_t event,thread_t thread)1942 thread_wakeup_thread(
1943 	event_t         event,
1944 	thread_t        thread)
1945 {
1946 	if (__improbable(event == NO_EVENT)) {
1947 		panic("%s() called with NO_EVENT", __func__);
1948 	}
1949 
1950 	if (__improbable(thread == THREAD_NULL)) {
1951 		panic("%s() called with THREAD_NULL", __func__);
1952 	}
1953 
1954 	struct waitq *wq = global_eventq(event);
1955 
1956 	return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1957 }
1958 
1959 /*
1960  * Wakeup a thread waiting on an event and promote it to a priority.
1961  *
1962  * Requires woken thread to un-promote itself when done.
1963  */
1964 kern_return_t
thread_wakeup_one_with_pri(event_t event,int priority)1965 thread_wakeup_one_with_pri(
1966 	event_t      event,
1967 	int          priority)
1968 {
1969 	if (__improbable(event == NO_EVENT)) {
1970 		panic("%s() called with NO_EVENT", __func__);
1971 	}
1972 
1973 	struct waitq *wq = global_eventq(event);
1974 
1975 	return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1976 }
1977 
1978 /*
1979  * Wakeup a thread waiting on an event,
1980  * promote it to a priority,
1981  * and return a reference to the woken thread.
1982  *
1983  * Requires woken thread to un-promote itself when done.
1984  */
1985 thread_t
thread_wakeup_identify(event_t event,int priority)1986 thread_wakeup_identify(event_t  event,
1987     int      priority)
1988 {
1989 	if (__improbable(event == NO_EVENT)) {
1990 		panic("%s() called with NO_EVENT", __func__);
1991 	}
1992 
1993 	struct waitq *wq = global_eventq(event);
1994 
1995 	return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1996 }
1997 
1998 /*
1999  *	thread_bind:
2000  *
2001  *	Force the current thread to execute on the specified processor.
2002  *	Takes effect after the next thread_block().
2003  *
2004  *	Returns the previous binding.  PROCESSOR_NULL means
2005  *	not bound.
2006  *
2007  *	XXX - DO NOT export this to users - XXX
2008  */
2009 processor_t
thread_bind(processor_t processor)2010 thread_bind(
2011 	processor_t             processor)
2012 {
2013 	thread_t                self = current_thread();
2014 	processor_t             prev;
2015 	spl_t                   s;
2016 
2017 	s = splsched();
2018 	thread_lock(self);
2019 
2020 	prev = thread_bind_internal(self, processor);
2021 
2022 	thread_unlock(self);
2023 	splx(s);
2024 
2025 	return prev;
2026 }
2027 
2028 void
thread_bind_during_wakeup(thread_t thread,processor_t processor)2029 thread_bind_during_wakeup(thread_t thread, processor_t processor)
2030 {
2031 	assert(!ml_get_interrupts_enabled());
2032 	assert((thread->state & (TH_WAIT | TH_WAKING)) == (TH_WAIT | TH_WAKING));
2033 #if MACH_ASSERT
2034 	thread_lock_assert(thread, LCK_ASSERT_OWNED);
2035 #endif
2036 
2037 	if (thread->bound_processor != processor) {
2038 		thread_bind_internal(thread, processor);
2039 	}
2040 }
2041 
2042 void
thread_unbind_after_queue_shutdown(thread_t thread,processor_t processor __assert_only)2043 thread_unbind_after_queue_shutdown(
2044 	thread_t                thread,
2045 	processor_t             processor __assert_only)
2046 {
2047 	assert(!ml_get_interrupts_enabled());
2048 
2049 	thread_lock(thread);
2050 
2051 	if (thread->bound_processor) {
2052 		bool removed;
2053 
2054 		assert(thread->bound_processor == processor);
2055 
2056 		removed = thread_run_queue_remove(thread);
2057 		/*
2058 		 * we can always unbind even if we didn't really remove the
2059 		 * thread from the runqueue
2060 		 */
2061 		thread_bind_internal(thread, PROCESSOR_NULL);
2062 		if (removed) {
2063 			thread_run_queue_reinsert(thread, SCHED_TAILQ);
2064 		}
2065 	}
2066 
2067 	thread_unlock(thread);
2068 }
2069 
2070 /*
2071  * thread_bind_internal:
2072  *
2073  * If the specified thread is not the current thread, and it is currently
2074  * running on another CPU, a remote AST must be sent to that CPU to cause
2075  * the thread to migrate to its bound processor. Otherwise, the migration
2076  * will occur at the next quantum expiration or blocking point.
2077  *
2078  * When the thread is the current thread, and explicit thread_block() should
2079  * be used to force the current processor to context switch away and
2080  * let the thread migrate to the bound processor.
2081  *
2082  * Thread must be locked, and at splsched.
2083  */
2084 
2085 static processor_t
thread_bind_internal(thread_t thread,processor_t processor)2086 thread_bind_internal(
2087 	thread_t                thread,
2088 	processor_t             processor)
2089 {
2090 	processor_t             prev;
2091 
2092 	/* <rdar://problem/15102234> */
2093 	assert(thread->sched_pri < BASEPRI_RTQUEUES);
2094 	/* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
2095 	thread_assert_runq_null(thread);
2096 
2097 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND),
2098 	    thread_tid(thread), processor ? processor->cpu_id : ~0ul, 0, 0, 0);
2099 
2100 	prev = thread->bound_processor;
2101 	thread->bound_processor = processor;
2102 
2103 	return prev;
2104 }
2105 
2106 /*
2107  * thread_vm_bind_group_add:
2108  *
2109  * The "VM bind group" is a special mechanism to mark a collection
2110  * of threads from the VM subsystem that, in general, should be scheduled
2111  * with only one CPU of parallelism. To accomplish this, we initially
2112  * bind all the threads to the master processor, which has the effect
2113  * that only one of the threads in the group can execute at once, including
2114  * preempting threads in the group that are a lower priority. Future
2115  * mechanisms may use more dynamic mechanisms to prevent the collection
2116  * of VM threads from using more CPU time than desired.
2117  *
2118  * The current implementation can result in priority inversions where
2119  * compute-bound priority 95 or realtime threads that happen to have
2120  * landed on the master processor prevent the VM threads from running.
2121  * When this situation is detected, we unbind the threads for one
2122  * scheduler tick to allow the scheduler to run the threads an
2123  * additional CPUs, before restoring the binding (assuming high latency
2124  * is no longer a problem).
2125  */
2126 
2127 /*
2128  * The current max is provisioned for:
2129  * vm_compressor_swap_trigger_thread (92)
2130  * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
2131  * vm_pageout_continue (92)
2132  * memorystatus_thread (95)
2133  */
2134 #define MAX_VM_BIND_GROUP_COUNT (5)
2135 decl_simple_lock_data(static, sched_vm_group_list_lock);
2136 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
2137 static int sched_vm_group_thread_count;
2138 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
2139 
2140 void
thread_vm_bind_group_add(void)2141 thread_vm_bind_group_add(void)
2142 {
2143 	thread_t self = current_thread();
2144 
2145 	if (support_bootcpu_shutdown) {
2146 		/*
2147 		 * Bind group is not supported without an always-on
2148 		 * processor to bind to. If we need these to coexist,
2149 		 * we'd need to dynamically move the group to
2150 		 * another processor as it shuts down, or build
2151 		 * a different way to run a set of threads
2152 		 * without parallelism.
2153 		 */
2154 		return;
2155 	}
2156 
2157 	thread_reference(self);
2158 	self->options |= TH_OPT_SCHED_VM_GROUP;
2159 
2160 	simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
2161 	assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
2162 	sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
2163 	simple_unlock(&sched_vm_group_list_lock);
2164 
2165 	thread_bind(master_processor);
2166 
2167 	/* Switch to bound processor if not already there */
2168 	thread_block(THREAD_CONTINUE_NULL);
2169 }
2170 
2171 static void
sched_vm_group_maintenance(void)2172 sched_vm_group_maintenance(void)
2173 {
2174 	uint64_t ctime = mach_absolute_time();
2175 	uint64_t longtime = ctime - sched_tick_interval;
2176 	int i;
2177 	spl_t s;
2178 	boolean_t high_latency_observed = FALSE;
2179 	boolean_t runnable_and_not_on_runq_observed = FALSE;
2180 	boolean_t bind_target_changed = FALSE;
2181 	processor_t bind_target = PROCESSOR_NULL;
2182 
2183 	/* Make sure nobody attempts to add new threads while we are enumerating them */
2184 	simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
2185 
2186 	s = splsched();
2187 
2188 	for (i = 0; i < sched_vm_group_thread_count; i++) {
2189 		thread_t thread = sched_vm_group_thread_list[i];
2190 		assert(thread != THREAD_NULL);
2191 		thread_lock(thread);
2192 		if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
2193 			if (thread_get_runq(thread) != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
2194 				high_latency_observed = TRUE;
2195 			} else if (thread_get_runq(thread) == PROCESSOR_NULL) {
2196 				/* There are some cases where a thread be transitiong that also fall into this case */
2197 				runnable_and_not_on_runq_observed = TRUE;
2198 			}
2199 		}
2200 		thread_unlock(thread);
2201 
2202 		if (high_latency_observed && runnable_and_not_on_runq_observed) {
2203 			/* All the things we are looking for are true, stop looking */
2204 			break;
2205 		}
2206 	}
2207 
2208 	splx(s);
2209 
2210 	if (sched_vm_group_temporarily_unbound) {
2211 		/* If we turned off binding, make sure everything is OK before rebinding */
2212 		if (!high_latency_observed) {
2213 			/* rebind */
2214 			bind_target_changed = TRUE;
2215 			bind_target = master_processor;
2216 			sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
2217 		}
2218 	} else {
2219 		/*
2220 		 * Check if we're in a bad state, which is defined by high
2221 		 * latency with no core currently executing a thread. If a
2222 		 * single thread is making progress on a CPU, that means the
2223 		 * binding concept to reduce parallelism is working as
2224 		 * designed.
2225 		 */
2226 		if (high_latency_observed && !runnable_and_not_on_runq_observed) {
2227 			/* unbind */
2228 			bind_target_changed = TRUE;
2229 			bind_target = PROCESSOR_NULL;
2230 			sched_vm_group_temporarily_unbound = TRUE;
2231 		}
2232 	}
2233 
2234 	if (bind_target_changed) {
2235 		s = splsched();
2236 		for (i = 0; i < sched_vm_group_thread_count; i++) {
2237 			thread_t thread = sched_vm_group_thread_list[i];
2238 			boolean_t removed;
2239 			assert(thread != THREAD_NULL);
2240 
2241 			thread_lock(thread);
2242 			removed = thread_run_queue_remove(thread);
2243 			if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
2244 				thread_bind_internal(thread, bind_target);
2245 			} else {
2246 				/*
2247 				 * Thread was in the middle of being context-switched-to,
2248 				 * or was in the process of blocking. To avoid switching the bind
2249 				 * state out mid-flight, defer the change if possible.
2250 				 */
2251 				if (bind_target == PROCESSOR_NULL) {
2252 					thread_bind_internal(thread, bind_target);
2253 				} else {
2254 					sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
2255 				}
2256 			}
2257 
2258 			if (removed) {
2259 				thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
2260 			}
2261 			thread_unlock(thread);
2262 		}
2263 		splx(s);
2264 	}
2265 
2266 	simple_unlock(&sched_vm_group_list_lock);
2267 }
2268 
2269 #if defined(__x86_64__)
2270 #define SCHED_AVOID_CPU0 1
2271 #else
2272 #define SCHED_AVOID_CPU0 0
2273 #endif
2274 
2275 int sched_allow_rt_smt = 1;
2276 int sched_avoid_cpu0 = SCHED_AVOID_CPU0;
2277 int sched_allow_rt_steal = 1;
2278 int sched_backup_cpu_timeout_count = 5; /* The maximum number of 10us delays to wait before using a backup cpu */
2279 
2280 int sched_rt_n_backup_processors = SCHED_DEFAULT_BACKUP_PROCESSORS;
2281 
2282 int
sched_get_rt_n_backup_processors(void)2283 sched_get_rt_n_backup_processors(void)
2284 {
2285 	return sched_rt_n_backup_processors;
2286 }
2287 
2288 void
sched_set_rt_n_backup_processors(int n)2289 sched_set_rt_n_backup_processors(int n)
2290 {
2291 	if (n < 0) {
2292 		n = 0;
2293 	} else if (n > SCHED_MAX_BACKUP_PROCESSORS) {
2294 		n = SCHED_MAX_BACKUP_PROCESSORS;
2295 	}
2296 
2297 	sched_rt_n_backup_processors = n;
2298 }
2299 
2300 int sched_rt_runq_strict_priority = false;
2301 
2302 inline static processor_set_t
change_locked_pset(processor_set_t current_pset,processor_set_t new_pset)2303 change_locked_pset(processor_set_t current_pset, processor_set_t new_pset)
2304 {
2305 	if (current_pset != new_pset) {
2306 		pset_unlock(current_pset);
2307 		pset_lock(new_pset);
2308 	}
2309 
2310 	return new_pset;
2311 }
2312 
2313 /*
2314  * Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
2315  * rebalancing opportunity exists when a core is (instantaneously) idle, but
2316  * other SMT-capable cores may be over-committed. TODO: some possible negatives:
2317  * IPI thrash if this core does not remain idle following the load balancing ASTs
2318  * Idle "thrash", when IPI issue is followed by idle entry/core power down
2319  * followed by a wakeup shortly thereafter.
2320  */
2321 
2322 #if (DEVELOPMENT || DEBUG)
2323 int sched_smt_balance = 1;
2324 #endif
2325 
2326 #if CONFIG_SCHED_SMT
2327 /* Invoked with pset locked, returns with pset unlocked */
2328 bool
sched_SMT_balance(processor_t cprocessor,processor_set_t cpset)2329 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
2330 {
2331 	processor_t ast_processor = NULL;
2332 
2333 #if (DEVELOPMENT || DEBUG)
2334 	if (__improbable(sched_smt_balance == 0)) {
2335 		goto smt_balance_exit;
2336 	}
2337 #endif
2338 
2339 	assert(cprocessor == current_processor());
2340 	if (cprocessor->is_SMT == FALSE) {
2341 		goto smt_balance_exit;
2342 	}
2343 
2344 	processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
2345 
2346 	/* Determine if both this processor and its sibling are idle,
2347 	 * indicating an SMT rebalancing opportunity.
2348 	 */
2349 	if (sib_processor->state != PROCESSOR_IDLE) {
2350 		goto smt_balance_exit;
2351 	}
2352 
2353 	processor_t sprocessor;
2354 
2355 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2356 	uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
2357 	    ~cpset->primary_map);
2358 	for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
2359 		sprocessor = processor_array[cpuid];
2360 		if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
2361 		    (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
2362 			ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2363 			if (ipi_type != SCHED_IPI_NONE) {
2364 				assert(sprocessor != cprocessor);
2365 				ast_processor = sprocessor;
2366 				break;
2367 			}
2368 		}
2369 	}
2370 
2371 smt_balance_exit:
2372 	pset_unlock(cpset);
2373 
2374 	if (ast_processor) {
2375 		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
2376 		sched_ipi_perform(ast_processor, ipi_type);
2377 	}
2378 	return false;
2379 }
2380 #else /* CONFIG_SCHED_SMT */
2381 /* Invoked with pset locked, returns with pset unlocked */
2382 bool
sched_SMT_balance(__unused processor_t cprocessor,__unused processor_set_t cpset)2383 sched_SMT_balance(__unused processor_t cprocessor, __unused processor_set_t cpset)
2384 {
2385 	pset_unlock(cpset);
2386 	return false;
2387 }
2388 #endif /* CONFIG_SCHED_SMT */
2389 
2390 
2391 static cpumap_t
pset_available_cpumap(processor_set_t pset)2392 pset_available_cpumap(processor_set_t pset)
2393 {
2394 	return pset->cpu_available_map & pset->recommended_bitmask;
2395 }
2396 
2397 int
pset_available_cpu_count(processor_set_t pset)2398 pset_available_cpu_count(processor_set_t pset)
2399 {
2400 	return bit_count(pset_available_cpumap(pset));
2401 }
2402 
2403 bool
pset_is_recommended(processor_set_t pset)2404 pset_is_recommended(processor_set_t pset)
2405 {
2406 	if (!pset) {
2407 		return false;
2408 	}
2409 	return pset_available_cpu_count(pset) > 0;
2410 }
2411 
2412 bool
pset_type_is_recommended(processor_set_t pset)2413 pset_type_is_recommended(processor_set_t pset)
2414 {
2415 	if (!pset) {
2416 		return false;
2417 	}
2418 	pset_map_t recommended_psets = os_atomic_load(&pset->node->pset_recommended_map, relaxed);
2419 	return bit_count(recommended_psets) > 0;
2420 }
2421 
2422 static cpumap_t
pset_available_but_not_running_cpumap(processor_set_t pset)2423 pset_available_but_not_running_cpumap(processor_set_t pset)
2424 {
2425 	return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
2426 	       pset->recommended_bitmask;
2427 }
2428 
2429 bool
pset_has_stealable_threads(processor_set_t pset)2430 pset_has_stealable_threads(processor_set_t pset)
2431 {
2432 	pset_assert_locked(pset);
2433 
2434 	cpumap_t avail_map = pset_available_but_not_running_cpumap(pset);
2435 #if CONFIG_SCHED_SMT
2436 	/*
2437 	 * Secondary CPUs never steal, so allow stealing of threads if there are more threads than
2438 	 * available primary CPUs
2439 	 */
2440 	avail_map &= pset->primary_map;
2441 #endif /* CONFIG_SCHED_SMT */
2442 
2443 	return (pset->pset_runq.count > 0) && ((pset->pset_runq.count + rt_runq_count(pset)) > bit_count(avail_map));
2444 }
2445 
2446 static cpumap_t
pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset)2447 pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset)
2448 {
2449 	cpumap_t avail_map = pset_available_cpumap(pset);
2450 #if CONFIG_SCHED_SMT
2451 	if (!sched_allow_rt_smt) {
2452 		/*
2453 		 * Secondary CPUs are not allowed to run RT threads, so
2454 		 * only primary CPUs should be included
2455 		 */
2456 		avail_map &= pset->primary_map;
2457 	}
2458 #endif /* CONFIG_SCHED_SMT */
2459 
2460 	return avail_map & ~pset->realtime_map;
2461 }
2462 
2463 static bool
pset_needs_a_followup_IPI(processor_set_t pset)2464 pset_needs_a_followup_IPI(processor_set_t pset)
2465 {
2466 	int nbackup_cpus = 0;
2467 
2468 	if (rt_runq_is_low_latency(pset)) {
2469 		nbackup_cpus = sched_rt_n_backup_processors;
2470 	}
2471 
2472 	int rt_rq_count = rt_runq_count(pset);
2473 
2474 	return (rt_rq_count > 0) && ((rt_rq_count + nbackup_cpus - bit_count(pset->pending_AST_URGENT_cpu_mask)) > 0);
2475 }
2476 
2477 bool
pset_has_stealable_rt_threads(processor_set_t pset)2478 pset_has_stealable_rt_threads(processor_set_t pset)
2479 {
2480 	pset_node_t node = pset->node;
2481 	if (bit_count(node->pset_map) == 1) {
2482 		return false;
2483 	}
2484 
2485 	cpumap_t avail_map = pset_available_but_not_running_rt_threads_cpumap(pset);
2486 
2487 	return rt_runq_count(pset) > bit_count(avail_map);
2488 }
2489 
2490 static void
pset_update_rt_stealable_state(processor_set_t pset)2491 pset_update_rt_stealable_state(processor_set_t pset)
2492 {
2493 	if (pset_has_stealable_rt_threads(pset)) {
2494 		pset->stealable_rt_threads_earliest_deadline = rt_runq_earliest_deadline(pset);
2495 	} else {
2496 		pset->stealable_rt_threads_earliest_deadline = RT_DEADLINE_NONE;
2497 	}
2498 }
2499 
2500 static void
clear_pending_AST_bits(processor_set_t pset,processor_t processor,__kdebug_only const int trace_point_number)2501 clear_pending_AST_bits(processor_set_t pset, processor_t processor, __kdebug_only const int trace_point_number)
2502 {
2503 	/* Acknowledge any pending IPIs here with pset lock held */
2504 	pset_assert_locked(pset);
2505 	if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2506 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END,
2507 		    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, trace_point_number);
2508 	}
2509 	bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2510 
2511 #if defined(CONFIG_SCHED_DEFERRED_AST)
2512 	bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
2513 #endif
2514 }
2515 
2516 /*
2517  * Called with pset locked, on a processor that is committing to run a new thread
2518  * Will transition an idle or dispatching processor to running as it picks up
2519  * the first new thread from the idle thread.
2520  */
2521 static void
pset_commit_processor_to_new_thread(processor_set_t pset,processor_t processor,thread_t new_thread)2522 pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
2523 {
2524 	pset_assert_locked(pset);
2525 
2526 	if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2527 		assert(current_thread() == processor->idle_thread);
2528 
2529 		/*
2530 		 * Dispatching processor is now committed to running new_thread,
2531 		 * so change its state to PROCESSOR_RUNNING.
2532 		 */
2533 		pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
2534 	} else {
2535 		assert(processor->state == PROCESSOR_RUNNING);
2536 	}
2537 
2538 	processor_state_update_from_thread(processor, new_thread, true);
2539 
2540 	if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2541 		bit_set(pset->realtime_map, processor->cpu_id);
2542 	} else {
2543 		bit_clear(pset->realtime_map, processor->cpu_id);
2544 	}
2545 	pset_update_rt_stealable_state(pset);
2546 
2547 	pset_node_t node = pset->node;
2548 
2549 	if (bit_count(node->pset_map) == 1) {
2550 		/* Node has only a single pset, so skip node pset map updates */
2551 		return;
2552 	}
2553 
2554 	cpumap_t avail_map = pset_available_cpumap(pset);
2555 
2556 	if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2557 		if ((avail_map & pset->realtime_map) == avail_map) {
2558 			/* No more non-RT CPUs in this pset */
2559 			atomic_bit_clear(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2560 		}
2561 #if CONFIG_SCHED_SMT
2562 		avail_map &= pset->primary_map;
2563 		if ((avail_map & pset->realtime_map) == avail_map) {
2564 			/* No more non-RT primary CPUs in this pset */
2565 			atomic_bit_clear(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2566 		}
2567 #endif /* CONFIG_SCHED_SMT */
2568 	} else {
2569 		if ((avail_map & pset->realtime_map) != avail_map) {
2570 			if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
2571 				atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2572 			}
2573 		}
2574 #if CONFIG_SCHED_SMT
2575 		avail_map &= pset->primary_map;
2576 		if ((avail_map & pset->realtime_map) != avail_map) {
2577 			if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
2578 				atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2579 			}
2580 		}
2581 #endif /* CONFIG_SCHED_SMT */
2582 	}
2583 }
2584 
2585 #if CONFIG_SCHED_SMT
2586 static processor_t choose_processor_for_realtime_thread_smt(processor_set_t pset, processor_t skip_processor, bool consider_secondaries, bool skip_spills);
2587 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups);
2588 static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups);
2589 #else /* CONFIG_SCHED_SMT */
2590 static processor_t choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool skip_spills);
2591 #endif /* CONFIG_SCHED_SMT */
2592 static processor_t choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline,
2593     processor_t skip_processor, bool skip_spills, bool include_ast_urgent_pending_cpus);
2594 static processor_t choose_next_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool consider_secondaries);
2595 static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup);
2596 static bool processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor);
2597 
2598 static bool
other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset,uint64_t earliest_deadline)2599 other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset, uint64_t earliest_deadline)
2600 {
2601 	pset_map_t pset_map = stealing_pset->node->pset_map;
2602 
2603 	bit_clear(pset_map, stealing_pset->pset_id);
2604 
2605 	for (int pset_id = lsb_first(pset_map); pset_id >= 0; pset_id = lsb_next(pset_map, pset_id)) {
2606 		processor_set_t nset = pset_array[pset_id];
2607 
2608 		if (deadline_add(nset->stealable_rt_threads_earliest_deadline, rt_deadline_epsilon) < earliest_deadline) {
2609 			return true;
2610 		}
2611 	}
2612 
2613 	return false;
2614 }
2615 
2616 /*
2617  * starting_pset must be locked, but returns true if it is unlocked before return
2618  */
2619 static bool
choose_next_rt_processor_for_IPI(processor_set_t starting_pset,processor_t chosen_processor,bool spill_ipi,processor_t * result_processor,sched_ipi_type_t * result_ipi_type)2620 choose_next_rt_processor_for_IPI(processor_set_t starting_pset, processor_t chosen_processor, bool spill_ipi,
2621     processor_t *result_processor, sched_ipi_type_t *result_ipi_type)
2622 {
2623 	bool starting_pset_is_unlocked = false;
2624 	uint64_t earliest_deadline = rt_runq_earliest_deadline(starting_pset);
2625 	int max_pri = rt_runq_priority(starting_pset);
2626 	__kdebug_only uint64_t spill_tid = thread_tid(rt_runq_first(&starting_pset->rt_runq));
2627 	processor_set_t pset = starting_pset;
2628 	processor_t next_rt_processor = PROCESSOR_NULL;
2629 	if (spill_ipi) {
2630 		processor_set_t nset = next_pset(pset);
2631 		assert(nset != starting_pset);
2632 		pset = change_locked_pset(pset, nset);
2633 		starting_pset_is_unlocked = true;
2634 	}
2635 	do {
2636 		const bool consider_secondaries = true;
2637 		next_rt_processor = choose_next_processor_for_realtime_thread(pset, max_pri, earliest_deadline, chosen_processor, consider_secondaries);
2638 		if (next_rt_processor == PROCESSOR_NULL) {
2639 			if (!spill_ipi) {
2640 				break;
2641 			}
2642 			processor_set_t nset = next_pset(pset);
2643 			if (nset == starting_pset) {
2644 				break;
2645 			}
2646 			pset = change_locked_pset(pset, nset);
2647 			starting_pset_is_unlocked = true;
2648 		}
2649 	} while (next_rt_processor == PROCESSOR_NULL);
2650 	if (next_rt_processor) {
2651 		if (pset != starting_pset) {
2652 			if (bit_set_if_clear(pset->rt_pending_spill_cpu_mask, next_rt_processor->cpu_id)) {
2653 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_START,
2654 				    next_rt_processor->cpu_id, pset->rt_pending_spill_cpu_mask, starting_pset->cpu_set_low, (uintptr_t)spill_tid);
2655 			}
2656 		}
2657 		*result_ipi_type = sched_ipi_action(next_rt_processor, NULL, SCHED_IPI_EVENT_RT_PREEMPT);
2658 		*result_processor = next_rt_processor;
2659 	}
2660 	if (pset != starting_pset) {
2661 		pset_unlock(pset);
2662 	}
2663 
2664 	return starting_pset_is_unlocked;
2665 }
2666 
2667 /*
2668  * backup processor - used by choose_processor to send a backup IPI to in case the preferred processor can't immediately respond
2669  * followup processor - used in thread_select when there are still threads on the run queue and available processors
2670  * spill processor - a processor in a different processor set that is signalled to steal a thread from this run queue
2671  */
2672 typedef enum {
2673 	none,
2674 	backup,
2675 	followup,
2676 	spill
2677 } next_processor_type_t;
2678 
2679 #undef LOOP_COUNT
2680 #ifdef LOOP_COUNT
2681 int max_loop_count[MAX_SCHED_CPUS] = { 0 };
2682 #endif
2683 
2684 /*
2685  *	thread_select:
2686  *
2687  *	Select a new thread for the current processor to execute.
2688  *
2689  *	May select the current thread, which must be locked.
2690  */
2691 static thread_t
thread_select(thread_t thread,processor_t processor,ast_t * reason)2692 thread_select(thread_t          thread,
2693     processor_t       processor,
2694     ast_t            *reason)
2695 {
2696 	processor_set_t         pset = processor->processor_set;
2697 	thread_t                        new_thread = THREAD_NULL;
2698 
2699 	assert(processor == current_processor());
2700 	assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2701 
2702 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_START,
2703 	    0, pset->pending_AST_URGENT_cpu_mask, 0, 0);
2704 
2705 	__kdebug_only int idle_reason = 0;
2706 	__kdebug_only int delay_count = 0;
2707 
2708 #if CONFIG_SCHED_SMT
2709 	int timeout_count = sched_backup_cpu_timeout_count;
2710 	if ((sched_avoid_cpu0 == 1) && (processor->cpu_id == 0)) {
2711 		/* Prefer cpu0 as backup */
2712 		timeout_count--;
2713 	} else if ((sched_avoid_cpu0 == 2) && (processor->processor_primary != processor)) {
2714 		/* Prefer secondary cpu as backup */
2715 		timeout_count--;
2716 	}
2717 #endif /* CONFIG_SCHED_SMT */
2718 	bool pending_AST_URGENT = false;
2719 	bool pending_AST_PREEMPT = false;
2720 
2721 #ifdef LOOP_COUNT
2722 	int loop_count = -1;
2723 #endif
2724 
2725 	do {
2726 		/*
2727 		 *	Update the priority.
2728 		 */
2729 		if (SCHED(can_update_priority)(thread)) {
2730 			SCHED(update_priority)(thread);
2731 		}
2732 
2733 		pset_lock(pset);
2734 
2735 restart:
2736 #ifdef LOOP_COUNT
2737 		loop_count++;
2738 		if (loop_count > max_loop_count[processor->cpu_id]) {
2739 			max_loop_count[processor->cpu_id] = loop_count;
2740 			if (bit_count(loop_count) == 1) {
2741 				kprintf("[%d]%s>max_loop_count = %d\n", processor->cpu_id, __FUNCTION__, loop_count);
2742 			}
2743 		}
2744 #endif
2745 		pending_AST_URGENT = bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
2746 		pending_AST_PREEMPT = bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2747 
2748 		processor_state_update_from_thread(processor, thread, true);
2749 
2750 		idle_reason = 0;
2751 
2752 		processor_t ast_processor = PROCESSOR_NULL;
2753 		processor_t next_rt_processor = PROCESSOR_NULL;
2754 		sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2755 		sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2756 
2757 		assert(processor->state != PROCESSOR_OFF_LINE);
2758 
2759 		/*
2760 		 * Bound threads are dispatched to a processor without going through
2761 		 * choose_processor(), so in those cases we must continue trying to dequeue work
2762 		 * as we are the only option.
2763 		 */
2764 		if (!SCHED(processor_bound_count)(processor)) {
2765 			if (!processor->is_recommended) {
2766 				/*
2767 				 * The performance controller has provided a hint to not dispatch more threads,
2768 				 */
2769 				idle_reason = 1;
2770 				goto send_followup_ipi_before_idle;
2771 			} else if (rt_runq_count(pset)) {
2772 				bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, false);
2773 				/* Give the current RT thread a chance to complete */
2774 				ok_to_run_realtime_thread |= (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice);
2775 #if CONFIG_SCHED_SMT
2776 				/*
2777 				 * On Intel we want to avoid SMT secondary processors and processor 0
2778 				 * but allow them to be used as backup processors in case the preferred chosen
2779 				 * processor is delayed by interrupts or processor stalls.  So if it is
2780 				 * not ok_to_run_realtime_thread as preferred (sched_ok_to_run_realtime_thread(pset, processor, as_backup=false))
2781 				 * but ok_to_run_realtime_thread as backup (sched_ok_to_run_realtime_thread(pset, processor, as_backup=true))
2782 				 * we delay up to (timeout_count * 10us) to give the preferred processor chance
2783 				 * to grab the thread before the (current) backup processor does.
2784 				 *
2785 				 * timeout_count defaults to 5 but can be tuned using sysctl kern.sched_backup_cpu_timeout_count
2786 				 * on DEVELOPMENT || DEBUG kernels.  It is also adjusted (see above) depending on whether we want to use
2787 				 * cpu0 before secondary cpus or not.
2788 				 */
2789 				if (!ok_to_run_realtime_thread) {
2790 					if (sched_ok_to_run_realtime_thread(pset, processor, true)) {
2791 						if (timeout_count-- > 0) {
2792 							pset_unlock(pset);
2793 							thread_unlock(thread);
2794 							delay(10);
2795 							delay_count++;
2796 							thread_lock(thread);
2797 							pset_lock(pset);
2798 							goto restart;
2799 						}
2800 						ok_to_run_realtime_thread = true;
2801 					}
2802 				}
2803 #endif /* CONFIG_SCHED_SMT */
2804 				if (!ok_to_run_realtime_thread) {
2805 					idle_reason = 2;
2806 					goto send_followup_ipi_before_idle;
2807 				}
2808 			}
2809 #if CONFIG_SCHED_SMT
2810 			else if (processor->processor_primary != processor) {
2811 				/*
2812 				 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
2813 				 * we should look for work only under the same conditions that choose_processor()
2814 				 * would have assigned work, which is when all primary processors have been assigned work.
2815 				 */
2816 				if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
2817 					/* There are idle primaries */
2818 					idle_reason = 3;
2819 					goto idle;
2820 				}
2821 			}
2822 #endif /* CONFIG_SCHED_SMT */
2823 		}
2824 
2825 		/*
2826 		 *	Test to see if the current thread should continue
2827 		 *	to run on this processor.  Must not be attempting to wait, and not
2828 		 *	bound to a different processor, nor be in the wrong
2829 		 *	processor set, nor be forced to context switch by TH_SUSP.
2830 		 *
2831 		 *	Note that there are never any RT threads in the regular runqueue.
2832 		 *
2833 		 *	This code is very insanely tricky.
2834 		 */
2835 
2836 		/* i.e. not waiting, not TH_SUSP'ed */
2837 		bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
2838 
2839 		/*
2840 		 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
2841 		 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
2842 		 *       <rdar://problem/47907700>
2843 		 *
2844 		 * A yielding thread shouldn't be forced to context switch.
2845 		 */
2846 
2847 		bool is_yielding         = (*reason & AST_YIELD) == AST_YIELD;
2848 
2849 #if CONFIG_SCHED_SMT
2850 		bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
2851 #endif /* CONFIG_SCHED_SMT */
2852 
2853 		bool affinity_mismatch   = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
2854 
2855 		bool bound_elsewhere     = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
2856 
2857 		bool avoid_processor     = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread, *reason);
2858 
2859 		bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, true);
2860 
2861 		bool current_thread_can_keep_running = (
2862 			still_running
2863 #if CONFIG_SCHED_SMT
2864 			&& !needs_smt_rebalance
2865 #endif /* CONFIG_SCHED_SMT */
2866 			&& !affinity_mismatch
2867 			&& !bound_elsewhere
2868 			&& !avoid_processor);
2869 		if (current_thread_can_keep_running) {
2870 			/*
2871 			 * This thread is eligible to keep running on this processor.
2872 			 *
2873 			 * RT threads with un-expired quantum stay on processor,
2874 			 * unless there's a valid RT thread with an earlier deadline
2875 			 * and it is still ok_to_run_realtime_thread.
2876 			 */
2877 			if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
2878 				/*
2879 				 * Pick a new RT thread only if ok_to_run_realtime_thread
2880 				 * (but the current thread is allowed to complete).
2881 				 */
2882 				if (ok_to_run_realtime_thread) {
2883 					if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
2884 						goto pick_new_rt_thread;
2885 					}
2886 					if (rt_runq_priority(pset) > thread->sched_pri) {
2887 						if (sched_rt_runq_strict_priority) {
2888 							/* The next RT thread is better, so pick it off the runqueue. */
2889 							goto pick_new_rt_thread;
2890 						}
2891 
2892 						/*
2893 						 * See if the current lower priority thread can continue to run without causing
2894 						 * the higher priority thread on the runq queue to miss its deadline.
2895 						 */
2896 						thread_t hi_thread = rt_runq_first(SCHED(rt_runq)(pset));
2897 						if (thread->realtime.computation + hi_thread->realtime.computation + rt_deadline_epsilon >= hi_thread->realtime.constraint) {
2898 							/* The next RT thread is better, so pick it off the runqueue. */
2899 							goto pick_new_rt_thread;
2900 						}
2901 					} else if ((rt_runq_count(pset) > 0) && (deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < thread->realtime.deadline)) {
2902 						/* The next RT thread is better, so pick it off the runqueue. */
2903 						goto pick_new_rt_thread;
2904 					}
2905 					if (other_psets_have_earlier_rt_threads_pending(pset, thread->realtime.deadline)) {
2906 						goto pick_new_rt_thread;
2907 					}
2908 				}
2909 
2910 				/* This is still the best RT thread to run. */
2911 				processor->deadline = thread->realtime.deadline;
2912 
2913 				sched_update_pset_load_average(pset, 0);
2914 
2915 				clear_pending_AST_bits(pset, processor, 1);
2916 
2917 				next_rt_processor = PROCESSOR_NULL;
2918 				next_rt_ipi_type = SCHED_IPI_NONE;
2919 
2920 				bool pset_unlocked = false;
2921 				__kdebug_only next_processor_type_t nptype = none;
2922 				if (sched_allow_rt_steal && pset_has_stealable_rt_threads(pset)) {
2923 					nptype = spill;
2924 					pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, true, &next_rt_processor, &next_rt_ipi_type);
2925 				} else if (pset_needs_a_followup_IPI(pset)) {
2926 					nptype = followup;
2927 					pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, false, &next_rt_processor, &next_rt_ipi_type);
2928 				}
2929 				if (!pset_unlocked) {
2930 					pset_unlock(pset);
2931 				}
2932 
2933 				if (next_rt_processor) {
2934 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
2935 					    next_rt_processor->cpu_id, next_rt_processor->state, nptype, 2);
2936 					sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2937 				}
2938 
2939 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2940 				    (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 1);
2941 				return thread;
2942 			}
2943 
2944 			if ((rt_runq_count(pset) == 0) &&
2945 			    SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
2946 				/* This thread is still the highest priority runnable (non-idle) thread */
2947 				processor->deadline = RT_DEADLINE_NONE;
2948 
2949 				sched_update_pset_load_average(pset, 0);
2950 
2951 				clear_pending_AST_bits(pset, processor, 2);
2952 
2953 				pset_unlock(pset);
2954 
2955 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2956 				    (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 2);
2957 				return thread;
2958 			}
2959 		} else {
2960 			/*
2961 			 * This processor must context switch.
2962 			 * If it's due to a rebalance, we should aggressively find this thread a new home.
2963 			 */
2964 			bool ast_rebalance = affinity_mismatch || bound_elsewhere || avoid_processor;
2965 #if CONFIG_SCHED_SMT
2966 			ast_rebalance = ast_rebalance || needs_smt_rebalance;
2967 #endif /* CONFIG_SCHED_SMT */
2968 			if (ast_rebalance) {
2969 				*reason |= AST_REBALANCE;
2970 			}
2971 		}
2972 
2973 #if CONFIG_SCHED_SMT
2974 		bool secondary_forced_idle = ((processor->processor_secondary != PROCESSOR_NULL) &&
2975 		    (thread_no_smt(thread) || (thread->sched_pri >= BASEPRI_RTQUEUES)) &&
2976 		    (processor->processor_secondary->state == PROCESSOR_IDLE));
2977 #endif /* CONFIG_SCHED_SMT */
2978 
2979 		/* OK, so we're not going to run the current thread. Look at the RT queue. */
2980 		if (ok_to_run_realtime_thread) {
2981 pick_new_rt_thread:
2982 			new_thread = sched_rt_choose_thread(pset);
2983 			if (new_thread != THREAD_NULL) {
2984 				processor->deadline = new_thread->realtime.deadline;
2985 				pset_commit_processor_to_new_thread(pset, processor, new_thread);
2986 
2987 				clear_pending_AST_bits(pset, processor, 3);
2988 
2989 #if CONFIG_SCHED_SMT
2990 				if (processor->processor_secondary != NULL) {
2991 					processor_t sprocessor = processor->processor_secondary;
2992 					if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2993 						ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2994 						ast_processor = sprocessor;
2995 					}
2996 				}
2997 #endif /* CONFIG_SCHED_SMT */
2998 			}
2999 		}
3000 
3001 send_followup_ipi_before_idle:
3002 		/* This might not have been cleared if we didn't call sched_rt_choose_thread() */
3003 		if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
3004 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 5);
3005 		}
3006 		__kdebug_only next_processor_type_t nptype = none;
3007 		bool pset_unlocked = false;
3008 		if (sched_allow_rt_steal && pset_has_stealable_rt_threads(pset)) {
3009 			nptype = spill;
3010 			pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, true, &next_rt_processor, &next_rt_ipi_type);
3011 		} else if (pset_needs_a_followup_IPI(pset)) {
3012 			nptype = followup;
3013 			pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, false, &next_rt_processor, &next_rt_ipi_type);
3014 		}
3015 
3016 		assert(new_thread || !ast_processor);
3017 		if (new_thread || next_rt_processor) {
3018 			if (!pset_unlocked) {
3019 				pset_unlock(pset);
3020 				pset_unlocked = true;
3021 			}
3022 			if (ast_processor == next_rt_processor) {
3023 				ast_processor = PROCESSOR_NULL;
3024 				ipi_type = SCHED_IPI_NONE;
3025 			}
3026 
3027 			if (ast_processor) {
3028 				sched_ipi_perform(ast_processor, ipi_type);
3029 			}
3030 
3031 			if (next_rt_processor) {
3032 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
3033 				    next_rt_processor->cpu_id, next_rt_processor->state, nptype, 3);
3034 				sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
3035 			}
3036 
3037 			if (new_thread) {
3038 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3039 				    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 3);
3040 				return new_thread;
3041 			}
3042 		}
3043 
3044 		if (pset_unlocked) {
3045 			pset_lock(pset);
3046 		}
3047 
3048 		if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
3049 			/* Things changed while we dropped the lock */
3050 			goto restart;
3051 		}
3052 
3053 		if (processor->is_recommended) {
3054 			bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
3055 			if (sched_ok_to_run_realtime_thread(pset, processor, true) && (spill_pending || rt_runq_count(pset))) {
3056 				/* Things changed while we dropped the lock */
3057 				goto restart;
3058 			}
3059 
3060 #if CONFIG_SCHED_SMT
3061 			if ((processor->processor_primary != processor) && (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES)) {
3062 				/* secondary can only run realtime thread */
3063 				if (idle_reason == 0) {
3064 					idle_reason = 4;
3065 				}
3066 				goto idle;
3067 			}
3068 #endif /* CONFIG_SCHED_SMT */
3069 		} else if (!SCHED(processor_bound_count)(processor)) {
3070 			/* processor not recommended and no bound threads */
3071 			if (idle_reason == 0) {
3072 				idle_reason = 5;
3073 			}
3074 			goto idle;
3075 		}
3076 
3077 		processor->deadline = RT_DEADLINE_NONE;
3078 
3079 		/* No RT threads, so let's look at the regular threads. */
3080 		if ((new_thread = SCHED(choose_thread)(processor, MINPRI, current_thread_can_keep_running ? thread : THREAD_NULL, *reason)) != THREAD_NULL) {
3081 			if (new_thread != thread) {
3082 				/* Going to context-switch */
3083 				pset_commit_processor_to_new_thread(pset, processor, new_thread);
3084 
3085 				clear_pending_AST_bits(pset, processor, 4);
3086 
3087 				ast_processor = PROCESSOR_NULL;
3088 				ipi_type = SCHED_IPI_NONE;
3089 
3090 #if CONFIG_SCHED_SMT
3091 				processor_t sprocessor = processor->processor_secondary;
3092 				if (sprocessor != NULL) {
3093 					if (sprocessor->state == PROCESSOR_RUNNING) {
3094 						if (thread_no_smt(new_thread)) {
3095 							ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
3096 							ast_processor = sprocessor;
3097 						}
3098 					} else if (secondary_forced_idle && !thread_no_smt(new_thread) && pset_has_stealable_threads(pset)) {
3099 						ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_PREEMPT);
3100 						ast_processor = sprocessor;
3101 					}
3102 				}
3103 #endif /* CONFIG_SCHED_SMT */
3104 
3105 				pset_unlock(pset);
3106 
3107 				if (ast_processor) {
3108 					sched_ipi_perform(ast_processor, ipi_type);
3109 				}
3110 			} else {
3111 				/* Will continue running the current thread */
3112 				clear_pending_AST_bits(pset, processor, 4);
3113 				pset_unlock(pset);
3114 			}
3115 
3116 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3117 			    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 4);
3118 			return new_thread;
3119 		}
3120 
3121 		if (processor->must_idle) {
3122 			processor->must_idle = false;
3123 			*reason |= AST_REBALANCE;
3124 			idle_reason = 6;
3125 			goto idle;
3126 		}
3127 
3128 		if (SCHED(steal_thread_enabled)(pset)
3129 #if CONFIG_SCHED_SMT
3130 		    && (processor->processor_primary == processor)
3131 #endif /* CONFIG_SCHED_SMT */
3132 		    ) {
3133 			/*
3134 			 * No runnable threads, attempt to steal
3135 			 * from other processors. Returns with pset lock dropped.
3136 			 */
3137 
3138 			if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
3139 				pset_lock(pset);
3140 				pset_commit_processor_to_new_thread(pset, processor, new_thread);
3141 				if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
3142 					/*
3143 					 * A realtime thread choose this processor while it was DISPATCHING
3144 					 * and the pset lock was dropped
3145 					 */
3146 					ast_on(AST_URGENT | AST_PREEMPT);
3147 				}
3148 
3149 				clear_pending_AST_bits(pset, processor, 5);
3150 
3151 				pset_unlock(pset);
3152 
3153 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3154 				    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 5);
3155 				return new_thread;
3156 			}
3157 
3158 			/*
3159 			 * If other threads have appeared, shortcut
3160 			 * around again.
3161 			 */
3162 			if (SCHED(processor_bound_count)(processor)) {
3163 				continue;
3164 			}
3165 			if (processor->is_recommended) {
3166 				if (!SCHED(processor_queue_empty)(processor) || (sched_ok_to_run_realtime_thread(pset, processor, true) && (rt_runq_count(pset) > 0))) {
3167 					continue;
3168 				}
3169 			}
3170 
3171 			pset_lock(pset);
3172 		}
3173 
3174 idle:
3175 		/* Someone selected this processor while we had dropped the lock */
3176 		if ((!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) ||
3177 		    (!pending_AST_PREEMPT && bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id))) {
3178 			goto restart;
3179 		}
3180 
3181 		if ((idle_reason == 0) && current_thread_can_keep_running) {
3182 			/* This thread is the only runnable (non-idle) thread */
3183 			if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3184 				processor->deadline = thread->realtime.deadline;
3185 			} else {
3186 				processor->deadline = RT_DEADLINE_NONE;
3187 			}
3188 
3189 			sched_update_pset_load_average(pset, 0);
3190 
3191 			clear_pending_AST_bits(pset, processor, 6);
3192 
3193 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3194 			    (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 6);
3195 			pset_unlock(pset);
3196 			return thread;
3197 		}
3198 
3199 		/*
3200 		 *	Nothing is runnable, or this processor must be forced idle,
3201 		 *	so set this processor idle if it was running.
3202 		 */
3203 		if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
3204 			pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
3205 			processor_state_update_idle(processor);
3206 		}
3207 		pset_update_rt_stealable_state(pset);
3208 
3209 		clear_pending_AST_bits(pset, processor, 7);
3210 
3211 		/* Invoked with pset locked, returns with pset unlocked */
3212 		processor->next_idle_short = SCHED(processor_balance)(processor, pset);
3213 
3214 		new_thread = processor->idle_thread;
3215 	} while (new_thread == THREAD_NULL);
3216 
3217 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3218 	    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 10 + idle_reason);
3219 	return new_thread;
3220 }
3221 
3222 /*
3223  * thread_invoke
3224  *
3225  * Called at splsched with neither thread locked.
3226  *
3227  * Perform a context switch and start executing the new thread.
3228  *
3229  * Returns FALSE when the context switch didn't happen.
3230  * The reference to the new thread is still consumed.
3231  *
3232  * "self" is what is currently running on the processor,
3233  * "thread" is the new thread to context switch to
3234  * (which may be the same thread in some cases)
3235  */
3236 static boolean_t
thread_invoke(thread_t self,thread_t thread,ast_t reason)3237 thread_invoke(
3238 	thread_t                        self,
3239 	thread_t                        thread,
3240 	ast_t                           reason)
3241 {
3242 	if (__improbable(get_preemption_level() != 0)) {
3243 		int pl = get_preemption_level();
3244 		panic("thread_invoke: preemption_level %d, possible cause: %s",
3245 		    pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
3246 		    "blocking while holding a spinlock, or within interrupt context"));
3247 	}
3248 
3249 	thread_continue_t       continuation = self->continuation;
3250 	void                    *parameter   = self->parameter;
3251 
3252 	struct recount_snap snap = { 0 };
3253 	recount_snapshot(&snap);
3254 	uint64_t ctime = snap.rsn_time_mach;
3255 
3256 	check_monotonic_time(ctime);
3257 
3258 #ifdef CONFIG_MACH_APPROXIMATE_TIME
3259 	commpage_update_mach_approximate_time(ctime);
3260 #endif
3261 
3262 	if (ctime < thread->last_made_runnable_time) {
3263 		panic("Non-monotonic time: invoke at 0x%llx, runnable at 0x%llx",
3264 		    ctime, thread->last_made_runnable_time);
3265 	}
3266 
3267 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3268 	if (!((thread->state & TH_IDLE) != 0 ||
3269 	    ((reason & AST_HANDOFF) && self->sched_mode == TH_MODE_REALTIME))) {
3270 		sched_timeshare_consider_maintenance(ctime, true);
3271 	}
3272 #endif
3273 
3274 	recount_log_switch_thread(&snap);
3275 
3276 	processor_t processor = current_processor();
3277 
3278 	if (!processor->processor_online) {
3279 		panic("Invalid attempt to context switch an offline processor");
3280 	}
3281 
3282 	assert_thread_magic(self);
3283 	assert(self == current_thread());
3284 	thread_assert_runq_null(self);
3285 	assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
3286 
3287 	thread_lock(thread);
3288 
3289 	assert_thread_magic(thread);
3290 	assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
3291 	assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor);
3292 	thread_assert_runq_null(thread);
3293 
3294 	/* Update SFI class based on other factors */
3295 	thread->sfi_class = sfi_thread_classify(thread);
3296 
3297 	/* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
3298 	thread->same_pri_latency = ctime - thread->last_basepri_change_time;
3299 	/*
3300 	 * In case a base_pri update happened between the timestamp and
3301 	 * taking the thread lock
3302 	 */
3303 	if (ctime <= thread->last_basepri_change_time) {
3304 		thread->same_pri_latency = ctime - thread->last_made_runnable_time;
3305 	}
3306 
3307 	/* Allow realtime threads to hang onto a stack. */
3308 	if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
3309 		self->reserved_stack = self->kernel_stack;
3310 	}
3311 
3312 	/* Prepare for spin debugging */
3313 #if SCHED_HYGIENE_DEBUG
3314 	ml_spin_debug_clear(thread);
3315 #endif
3316 
3317 	if (continuation != NULL) {
3318 		if (!thread->kernel_stack) {
3319 			/*
3320 			 * If we are using a privileged stack,
3321 			 * check to see whether we can exchange it with
3322 			 * that of the other thread.
3323 			 */
3324 			if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
3325 				goto need_stack;
3326 			}
3327 
3328 			/*
3329 			 * Context switch by performing a stack handoff.
3330 			 * Requires both threads to be parked in a continuation.
3331 			 */
3332 			continuation = thread->continuation;
3333 			parameter = thread->parameter;
3334 
3335 			processor->active_thread = thread;
3336 			processor_state_update_from_thread(processor, thread, false);
3337 
3338 			if (thread->last_processor != processor && thread->last_processor != NULL) {
3339 				if (thread->last_processor->processor_set != processor->processor_set) {
3340 					thread->ps_switch++;
3341 				}
3342 				thread->p_switch++;
3343 			}
3344 			thread->last_processor = processor;
3345 			thread->c_switch++;
3346 			ast_context(thread);
3347 
3348 			thread_unlock(thread);
3349 
3350 			self->reason = reason;
3351 
3352 			processor->last_dispatch = ctime;
3353 			self->last_run_time = ctime;
3354 			timer_update(&thread->runnable_timer, ctime);
3355 			recount_switch_thread(&snap, self, get_threadtask(self));
3356 
3357 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3358 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
3359 			    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3360 
3361 			if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
3362 				SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3363 				    (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3364 			}
3365 
3366 			DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3367 
3368 			SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3369 
3370 #if KPERF
3371 			kperf_off_cpu(self);
3372 #endif /* KPERF */
3373 
3374 			/*
3375 			 * This is where we actually switch thread identity,
3376 			 * and address space if required.  However, register
3377 			 * state is not switched - this routine leaves the
3378 			 * stack and register state active on the current CPU.
3379 			 */
3380 			TLOG(1, "thread_invoke: calling stack_handoff\n");
3381 			stack_handoff(self, thread);
3382 
3383 			/* 'self' is now off core */
3384 			assert(thread == current_thread_volatile());
3385 
3386 			DTRACE_SCHED(on__cpu);
3387 
3388 #if KPERF
3389 			kperf_on_cpu(thread, continuation, NULL);
3390 #endif /* KPERF */
3391 
3392 
3393 			recount_log_switch_thread_on(&snap);
3394 
3395 			thread_dispatch(self, thread);
3396 
3397 #if KASAN
3398 			/* Old thread's stack has been moved to the new thread, so explicitly
3399 			 * unpoison it. */
3400 			kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3401 #endif
3402 
3403 			thread->continuation = thread->parameter = NULL;
3404 
3405 			boolean_t enable_interrupts = TRUE;
3406 
3407 			/* idle thread needs to stay interrupts-disabled */
3408 			if ((thread->state & TH_IDLE)) {
3409 				enable_interrupts = FALSE;
3410 			}
3411 
3412 			assert(continuation);
3413 			call_continuation(continuation, parameter,
3414 			    thread->wait_result, enable_interrupts);
3415 			/*NOTREACHED*/
3416 		} else if (thread == self) {
3417 			/* same thread but with continuation */
3418 			ast_context(self);
3419 
3420 			thread_unlock(self);
3421 
3422 #if KPERF
3423 			kperf_on_cpu(thread, continuation, NULL);
3424 #endif /* KPERF */
3425 
3426 			recount_log_switch_thread_on(&snap);
3427 
3428 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3429 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3430 			    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3431 
3432 #if KASAN
3433 			/* stack handoff to self - no thread_dispatch(), so clear the stack
3434 			 * and free the fakestack directly */
3435 #if KASAN_CLASSIC
3436 			kasan_fakestack_drop(self);
3437 			kasan_fakestack_gc(self);
3438 #endif /* KASAN_CLASSIC */
3439 			kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
3440 #endif /* KASAN */
3441 
3442 			self->continuation = self->parameter = NULL;
3443 
3444 			boolean_t enable_interrupts = TRUE;
3445 
3446 			/* idle thread needs to stay interrupts-disabled */
3447 			if ((self->state & TH_IDLE)) {
3448 				enable_interrupts = FALSE;
3449 			}
3450 
3451 			call_continuation(continuation, parameter,
3452 			    self->wait_result, enable_interrupts);
3453 			/*NOTREACHED*/
3454 		}
3455 	} else {
3456 		/*
3457 		 * Check that the other thread has a stack
3458 		 */
3459 		if (!thread->kernel_stack) {
3460 need_stack:
3461 			if (!stack_alloc_try(thread)) {
3462 				thread_unlock(thread);
3463 				thread_stack_enqueue(thread);
3464 				return FALSE;
3465 			}
3466 		} else if (thread == self) {
3467 			ast_context(self);
3468 			thread_unlock(self);
3469 
3470 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3471 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3472 			    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3473 
3474 			return TRUE;
3475 		}
3476 	}
3477 
3478 	/*
3479 	 * Context switch by full context save.
3480 	 */
3481 	processor->active_thread = thread;
3482 	processor_state_update_from_thread(processor, thread, false);
3483 
3484 	if (thread->last_processor != processor && thread->last_processor != NULL) {
3485 		if (thread->last_processor->processor_set != processor->processor_set) {
3486 			thread->ps_switch++;
3487 		}
3488 		thread->p_switch++;
3489 	}
3490 	thread->last_processor = processor;
3491 	thread->c_switch++;
3492 	ast_context(thread);
3493 
3494 	thread_unlock(thread);
3495 
3496 	self->reason = reason;
3497 
3498 	processor->last_dispatch = ctime;
3499 	self->last_run_time = ctime;
3500 	timer_update(&thread->runnable_timer, ctime);
3501 	recount_switch_thread(&snap, self, get_threadtask(self));
3502 
3503 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3504 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3505 	    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3506 
3507 	if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
3508 		SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3509 		    (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3510 	}
3511 
3512 	DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3513 
3514 	SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3515 
3516 #if KPERF
3517 	kperf_off_cpu(self);
3518 #endif /* KPERF */
3519 
3520 	/*
3521 	 * This is where we actually switch register context,
3522 	 * and address space if required.  We will next run
3523 	 * as a result of a subsequent context switch.
3524 	 *
3525 	 * Once registers are switched and the processor is running "thread",
3526 	 * the stack variables and non-volatile registers will contain whatever
3527 	 * was there the last time that thread blocked. No local variables should
3528 	 * be used after this point, except for the special case of "thread", which
3529 	 * the platform layer returns as the previous thread running on the processor
3530 	 * via the function call ABI as a return register, and "self", which may have
3531 	 * been stored on the stack or a non-volatile register, but a stale idea of
3532 	 * what was on the CPU is newly-accurate because that thread is again
3533 	 * running on the CPU.
3534 	 *
3535 	 * If one of the threads is using a continuation, thread_continue
3536 	 * is used to stitch up its context.
3537 	 *
3538 	 * If we are invoking a thread which is resuming from a continuation,
3539 	 * the CPU will invoke thread_continue next.
3540 	 *
3541 	 * If the current thread is parking in a continuation, then its state
3542 	 * won't be saved and the stack will be discarded. When the stack is
3543 	 * re-allocated, it will be configured to resume from thread_continue.
3544 	 */
3545 
3546 	assert(continuation == self->continuation);
3547 	thread = machine_switch_context(self, continuation, thread);
3548 	assert(self == current_thread_volatile());
3549 	TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
3550 
3551 	assert(continuation == NULL && self->continuation == NULL);
3552 
3553 	DTRACE_SCHED(on__cpu);
3554 
3555 #if KPERF
3556 	kperf_on_cpu(self, NULL, __builtin_frame_address(0));
3557 #endif /* KPERF */
3558 
3559 
3560 	/* Previous snap on the old stack is gone. */
3561 	recount_log_switch_thread_on(NULL);
3562 
3563 	/* We have been resumed and are set to run. */
3564 	thread_dispatch(thread, self);
3565 
3566 	return TRUE;
3567 }
3568 
3569 #if defined(CONFIG_SCHED_DEFERRED_AST)
3570 /*
3571  *	pset_cancel_deferred_dispatch:
3572  *
3573  *	Cancels all ASTs that we can cancel for the given processor set
3574  *	if the current processor is running the last runnable thread in the
3575  *	system.
3576  *
3577  *	This function assumes the current thread is runnable.  This must
3578  *	be called with the pset unlocked.
3579  */
3580 static void
pset_cancel_deferred_dispatch(processor_set_t pset,processor_t processor)3581 pset_cancel_deferred_dispatch(
3582 	processor_set_t         pset,
3583 	processor_t             processor)
3584 {
3585 	processor_t             active_processor = NULL;
3586 	uint32_t                sampled_sched_run_count;
3587 
3588 	pset_lock(pset);
3589 	sampled_sched_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
3590 
3591 	/*
3592 	 * If we have emptied the run queue, and our current thread is runnable, we
3593 	 * should tell any processors that are still DISPATCHING that they will
3594 	 * probably not have any work to do.  In the event that there are no
3595 	 * pending signals that we can cancel, this is also uninteresting.
3596 	 *
3597 	 * In the unlikely event that another thread becomes runnable while we are
3598 	 * doing this (sched_run_count is atomically updated, not guarded), the
3599 	 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
3600 	 * in order to dispatch it to a processor in our pset.  So, the other
3601 	 * codepath will wait while we squash all cancelable ASTs, get the pset
3602 	 * lock, and then dispatch the freshly runnable thread.  So this should be
3603 	 * correct (we won't accidentally have a runnable thread that hasn't been
3604 	 * dispatched to an idle processor), if not ideal (we may be restarting the
3605 	 * dispatch process, which could have some overhead).
3606 	 */
3607 
3608 	if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
3609 		uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
3610 		    pset->pending_deferred_AST_cpu_mask &
3611 		    ~pset->pending_AST_URGENT_cpu_mask);
3612 		for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
3613 			active_processor = processor_array[cpuid];
3614 			/*
3615 			 * If a processor is DISPATCHING, it could be because of
3616 			 * a cancelable signal.
3617 			 *
3618 			 * IF the processor is not our
3619 			 * current processor (the current processor should not
3620 			 * be DISPATCHING, so this is a bit paranoid), AND there
3621 			 * is a cancelable signal pending on the processor, AND
3622 			 * there is no non-cancelable signal pending (as there is
3623 			 * no point trying to backtrack on bringing the processor
3624 			 * up if a signal we cannot cancel is outstanding), THEN
3625 			 * it should make sense to roll back the processor state
3626 			 * to the IDLE state.
3627 			 *
3628 			 * If the racey nature of this approach (as the signal
3629 			 * will be arbitrated by hardware, and can fire as we
3630 			 * roll back state) results in the core responding
3631 			 * despite being pushed back to the IDLE state, it
3632 			 * should be no different than if the core took some
3633 			 * interrupt while IDLE.
3634 			 */
3635 			if (active_processor != processor) {
3636 				/*
3637 				 * Squash all of the processor state back to some
3638 				 * reasonable facsimile of PROCESSOR_IDLE.
3639 				 */
3640 
3641 				processor_state_update_idle(active_processor);
3642 				active_processor->deadline = RT_DEADLINE_NONE;
3643 				pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
3644 				bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
3645 				machine_signal_idle_cancel(active_processor);
3646 			}
3647 		}
3648 	}
3649 
3650 	pset_unlock(pset);
3651 }
3652 #else
3653 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
3654 #endif
3655 
3656 static void
thread_csw_callout(thread_t old,thread_t new,uint64_t timestamp)3657 thread_csw_callout(
3658 	thread_t            old,
3659 	thread_t            new,
3660 	uint64_t            timestamp)
3661 {
3662 	perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
3663 	uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
3664 	machine_switch_perfcontrol_context(event, timestamp, 0,
3665 	    same_pri_latency, old, new);
3666 }
3667 
3668 
3669 /*
3670  *	thread_dispatch:
3671  *
3672  *	Handle threads at context switch.  Re-dispatch other thread
3673  *	if still running, otherwise update run state and perform
3674  *	special actions.  Update quantum for other thread and begin
3675  *	the quantum for ourselves.
3676  *
3677  *      "thread" is the old thread that we have switched away from.
3678  *      "self" is the new current thread that we have context switched to
3679  *
3680  *	Called at splsched.
3681  *
3682  */
3683 void
thread_dispatch(thread_t thread,thread_t self)3684 thread_dispatch(
3685 	thread_t                thread,
3686 	thread_t                self)
3687 {
3688 	processor_t             processor = self->last_processor;
3689 	bool was_idle = false;
3690 	bool processor_bootstrap = (thread == THREAD_NULL);
3691 
3692 	assert(processor == current_processor());
3693 	assert(self == current_thread_volatile());
3694 	assert(thread != self);
3695 
3696 	if (thread != THREAD_NULL) {
3697 		/*
3698 		 * Do the perfcontrol callout for context switch.
3699 		 * The reason we do this here is:
3700 		 * - thread_dispatch() is called from various places that are not
3701 		 *   the direct context switch path for eg. processor shutdown etc.
3702 		 *   So adding the callout here covers all those cases.
3703 		 * - We want this callout as early as possible to be close
3704 		 *   to the timestamp taken in thread_invoke()
3705 		 * - We want to avoid holding the thread lock while doing the
3706 		 *   callout
3707 		 * - We do not want to callout if "thread" is NULL.
3708 		 */
3709 		thread_csw_callout(thread, self, processor->last_dispatch);
3710 
3711 #if KASAN
3712 		if (thread->continuation != NULL) {
3713 			/*
3714 			 * Thread has a continuation and the normal stack is going away.
3715 			 * Unpoison the stack and mark all fakestack objects as unused.
3716 			 */
3717 #if KASAN_CLASSIC
3718 			kasan_fakestack_drop(thread);
3719 #endif /* KASAN_CLASSIC */
3720 			if (thread->kernel_stack) {
3721 				kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3722 			}
3723 		}
3724 
3725 
3726 #if KASAN_CLASSIC
3727 		/*
3728 		 * Free all unused fakestack objects.
3729 		 */
3730 		kasan_fakestack_gc(thread);
3731 #endif /* KASAN_CLASSIC */
3732 #endif /* KASAN */
3733 
3734 		/*
3735 		 *	If blocked at a continuation, discard
3736 		 *	the stack.
3737 		 */
3738 		if (thread->continuation != NULL && thread->kernel_stack != 0) {
3739 			stack_free(thread);
3740 		}
3741 
3742 		if (thread->state & TH_IDLE) {
3743 			was_idle = true;
3744 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3745 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3746 			    (uintptr_t)thread_tid(thread), 0, thread->state,
3747 			    sched_run_buckets[TH_BUCKET_RUN], 0);
3748 		} else {
3749 			int64_t consumed;
3750 			int64_t remainder = 0;
3751 
3752 			if (processor->quantum_end > processor->last_dispatch) {
3753 				remainder = processor->quantum_end -
3754 				    processor->last_dispatch;
3755 			}
3756 
3757 			consumed = thread->quantum_remaining - remainder;
3758 
3759 			if ((thread->reason & AST_LEDGER) == 0) {
3760 				/*
3761 				 * Bill CPU time to both the task and
3762 				 * the individual thread.
3763 				 */
3764 				ledger_credit_thread(thread, thread->t_ledger,
3765 				    task_ledgers.cpu_time, consumed);
3766 				ledger_credit_thread(thread, thread->t_threadledger,
3767 				    thread_ledgers.cpu_time, consumed);
3768 				if (thread->t_bankledger) {
3769 					ledger_credit_thread(thread, thread->t_bankledger,
3770 					    bank_ledgers.cpu_time,
3771 					    (consumed - thread->t_deduct_bank_ledger_time));
3772 				}
3773 				thread->t_deduct_bank_ledger_time = 0;
3774 				if (consumed > 0) {
3775 					/*
3776 					 * This should never be negative, but in traces we are seeing some instances
3777 					 * of consumed being negative.
3778 					 * <rdar://problem/57782596> thread_dispatch() thread CPU consumed calculation sometimes results in negative value
3779 					 */
3780 					sched_update_pset_avg_execution_time(current_processor()->processor_set, consumed, processor->last_dispatch, thread->th_sched_bucket);
3781 				}
3782 			}
3783 
3784 			/* For the thread that we just context switched away from, figure
3785 			 * out if we have expired the wq quantum and set the AST if we have
3786 			 */
3787 			if (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) {
3788 				thread_evaluate_workqueue_quantum_expiry(thread);
3789 			}
3790 
3791 			if (__improbable(thread->rwlock_count != 0)) {
3792 				smr_mark_active_trackers_stalled(thread);
3793 			}
3794 
3795 			/*
3796 			 * Pairs with task_restartable_ranges_synchronize
3797 			 */
3798 			wake_lock(thread);
3799 			thread_lock(thread);
3800 
3801 			/*
3802 			 * Same as ast_check(), in case we missed the IPI
3803 			 */
3804 			thread_reset_pcs_ack_IPI(thread);
3805 
3806 			/*
3807 			 * Apply a priority floor if the thread holds a kernel resource
3808 			 * or explicitly requested it.
3809 			 * Do this before checking starting_pri to avoid overpenalizing
3810 			 * repeated rwlock blockers.
3811 			 */
3812 			if (__improbable(thread->rwlock_count != 0)) {
3813 				lck_rw_set_promotion_locked(thread);
3814 			}
3815 			if (__improbable(thread->priority_floor_count != 0)) {
3816 				thread_floor_boost_set_promotion_locked(thread);
3817 			}
3818 
3819 			boolean_t keep_quantum = processor->first_timeslice;
3820 
3821 			/*
3822 			 * Treat a thread which has dropped priority since it got on core
3823 			 * as having expired its quantum.
3824 			 */
3825 			if (processor->starting_pri > thread->sched_pri) {
3826 				keep_quantum = FALSE;
3827 			}
3828 
3829 			/* Compute remainder of current quantum. */
3830 			if (keep_quantum &&
3831 			    processor->quantum_end > processor->last_dispatch) {
3832 				thread->quantum_remaining = (uint32_t)remainder;
3833 			} else {
3834 				thread->quantum_remaining = 0;
3835 			}
3836 
3837 			if (thread->sched_mode == TH_MODE_REALTIME) {
3838 				/*
3839 				 *	Cancel the deadline if the thread has
3840 				 *	consumed the entire quantum.
3841 				 */
3842 				if (thread->quantum_remaining == 0) {
3843 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CANCEL_RT_DEADLINE) | DBG_FUNC_NONE,
3844 					    (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
3845 					thread->realtime.deadline = RT_DEADLINE_QUANTUM_EXPIRED;
3846 				}
3847 			} else {
3848 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3849 				/*
3850 				 *	For non-realtime threads treat a tiny
3851 				 *	remaining quantum as an expired quantum
3852 				 *	but include what's left next time.
3853 				 */
3854 				if (thread->quantum_remaining < min_std_quantum) {
3855 					thread->reason |= AST_QUANTUM;
3856 					thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
3857 				}
3858 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3859 			}
3860 
3861 			/*
3862 			 *	If we are doing a direct handoff then
3863 			 *	take the remainder of the quantum.
3864 			 */
3865 			if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
3866 				self->quantum_remaining = thread->quantum_remaining;
3867 				thread->reason |= AST_QUANTUM;
3868 				thread->quantum_remaining = 0;
3869 			}
3870 
3871 			thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
3872 
3873 			if (!(thread->state & TH_WAIT)) {
3874 				/*
3875 				 *	Still runnable.
3876 				 */
3877 				thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
3878 
3879 				machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
3880 
3881 				ast_t reason = thread->reason;
3882 				sched_options_t options = SCHED_NONE;
3883 
3884 				if (reason & AST_REBALANCE) {
3885 					options |= SCHED_REBALANCE;
3886 					if (reason & AST_QUANTUM) {
3887 						/*
3888 						 * Having gone to the trouble of forcing this thread off a less preferred core,
3889 						 * we should force the preferable core to reschedule immediately to give this
3890 						 * thread a chance to run instead of just sitting on the run queue where
3891 						 * it may just be stolen back by the idle core we just forced it off.
3892 						 * But only do this at the end of a quantum to prevent cascading effects.
3893 						 */
3894 						options |= SCHED_PREEMPT;
3895 					}
3896 				}
3897 
3898 				if (reason & AST_QUANTUM) {
3899 					options |= SCHED_TAILQ;
3900 				} else if (reason & AST_PREEMPT) {
3901 					options |= SCHED_HEADQ;
3902 				} else {
3903 					options |= (SCHED_PREEMPT | SCHED_TAILQ);
3904 				}
3905 
3906 				thread_setrun(thread, options);
3907 
3908 				KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3909 				    MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3910 				    (uintptr_t)thread_tid(thread), thread->reason, thread->state,
3911 				    sched_run_buckets[TH_BUCKET_RUN], 0);
3912 
3913 				if (thread->wake_active) {
3914 					thread->wake_active = FALSE;
3915 					thread_unlock(thread);
3916 
3917 					thread_wakeup(&thread->wake_active);
3918 				} else {
3919 					thread_unlock(thread);
3920 				}
3921 
3922 				wake_unlock(thread);
3923 			} else {
3924 				/*
3925 				 *	Waiting.
3926 				 */
3927 				boolean_t should_terminate = FALSE;
3928 				uint32_t new_run_count;
3929 				int thread_state = thread->state;
3930 
3931 				/* Only the first call to thread_dispatch
3932 				 * after explicit termination should add
3933 				 * the thread to the termination queue
3934 				 */
3935 				if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
3936 					should_terminate = TRUE;
3937 					thread_state |= TH_TERMINATE2;
3938 				}
3939 
3940 				timer_stop(&thread->runnable_timer, processor->last_dispatch);
3941 
3942 				thread_state &= ~TH_RUN;
3943 				thread->state = thread_state;
3944 
3945 				thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
3946 				thread->chosen_processor = PROCESSOR_NULL;
3947 
3948 				new_run_count = SCHED(run_count_decr)(thread);
3949 
3950 #if CONFIG_SCHED_AUTO_JOIN
3951 				if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0) {
3952 					work_interval_auto_join_unwind(thread);
3953 				}
3954 #endif /* CONFIG_SCHED_AUTO_JOIN */
3955 
3956 #if CONFIG_SCHED_SFI
3957 				if (thread->reason & AST_SFI) {
3958 					thread->wait_sfi_begin_time = processor->last_dispatch;
3959 				}
3960 #endif
3961 				machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
3962 
3963 				KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3964 				    MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3965 				    (uintptr_t)thread_tid(thread), thread->reason, thread_state,
3966 				    new_run_count, 0);
3967 
3968 				if (thread_state & TH_WAIT_REPORT) {
3969 					(*thread->sched_call)(SCHED_CALL_BLOCK, thread);
3970 				}
3971 
3972 				if (thread->wake_active) {
3973 					thread->wake_active = FALSE;
3974 					thread_unlock(thread);
3975 
3976 					thread_wakeup(&thread->wake_active);
3977 				} else {
3978 					thread_unlock(thread);
3979 				}
3980 
3981 				wake_unlock(thread);
3982 
3983 				if (should_terminate) {
3984 					thread_terminate_enqueue(thread);
3985 				}
3986 			}
3987 		}
3988 		/*
3989 		 * The thread could have been added to the termination queue, so it's
3990 		 * unsafe to use after this point.
3991 		 */
3992 		thread = THREAD_NULL;
3993 	}
3994 
3995 	int urgency = THREAD_URGENCY_NONE;
3996 	uint64_t latency = 0;
3997 
3998 	/* Update (new) current thread and reprogram running timers */
3999 	thread_lock(self);
4000 
4001 	if (!(self->state & TH_IDLE)) {
4002 		uint64_t        arg1, arg2;
4003 
4004 #if CONFIG_SCHED_SFI
4005 		ast_t                   new_ast;
4006 
4007 		new_ast = sfi_thread_needs_ast(self, NULL);
4008 
4009 		if (new_ast != AST_NONE) {
4010 			ast_on(new_ast);
4011 		}
4012 #endif
4013 
4014 		if (processor->last_dispatch < self->last_made_runnable_time) {
4015 			panic("Non-monotonic time: dispatch at 0x%llx, runnable at 0x%llx",
4016 			    processor->last_dispatch, self->last_made_runnable_time);
4017 		}
4018 
4019 		assert(self->last_made_runnable_time <= self->last_basepri_change_time);
4020 
4021 		latency = processor->last_dispatch - self->last_made_runnable_time;
4022 		assert(latency >= self->same_pri_latency);
4023 
4024 		urgency = thread_get_urgency(self, &arg1, &arg2);
4025 
4026 		thread_tell_urgency(urgency, arg1, arg2, latency, self);
4027 
4028 		/*
4029 		 *	Start a new CPU limit interval if the previous one has
4030 		 *	expired. This should happen before initializing a new
4031 		 *	quantum.
4032 		 */
4033 		if (cpulimit_affects_quantum &&
4034 		    thread_cpulimit_interval_has_expired(processor->last_dispatch)) {
4035 			thread_cpulimit_restart(processor->last_dispatch);
4036 		}
4037 
4038 		/*
4039 		 *	Get a new quantum if none remaining.
4040 		 */
4041 		if (self->quantum_remaining == 0) {
4042 			thread_quantum_init(self, processor->last_dispatch);
4043 		}
4044 
4045 		/*
4046 		 *	Set up quantum timer and timeslice.
4047 		 */
4048 		processor->quantum_end = processor->last_dispatch +
4049 		    self->quantum_remaining;
4050 
4051 		running_timer_setup(processor, RUNNING_TIMER_QUANTUM, self,
4052 		    processor->quantum_end, processor->last_dispatch);
4053 		if (was_idle) {
4054 			/*
4055 			 * kperf's running timer is active whenever the idle thread for a
4056 			 * CPU is not running.
4057 			 */
4058 			kperf_running_setup(processor, processor->last_dispatch);
4059 		}
4060 		running_timers_activate(processor);
4061 		processor->first_timeslice = TRUE;
4062 	} else {
4063 		if (!processor_bootstrap) {
4064 			running_timers_deactivate(processor);
4065 		}
4066 		processor->first_timeslice = FALSE;
4067 		thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
4068 	}
4069 
4070 	assert(self->block_hint == kThreadWaitNone);
4071 	self->computation_epoch = processor->last_dispatch;
4072 	/*
4073 	 * This relies on the interrupt time being tallied up to the thread in the
4074 	 * exception handler epilogue, which is before AST context where preemption
4075 	 * is considered (and the scheduler is potentially invoked to
4076 	 * context switch, here).
4077 	 */
4078 	self->computation_interrupt_epoch = recount_current_thread_interrupt_time_mach();
4079 	self->reason = AST_NONE;
4080 	processor->starting_pri = self->sched_pri;
4081 
4082 	thread_unlock(self);
4083 
4084 	machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
4085 	    processor->last_dispatch);
4086 
4087 #if defined(CONFIG_SCHED_DEFERRED_AST)
4088 	/*
4089 	 * TODO: Can we state that redispatching our old thread is also
4090 	 * uninteresting?
4091 	 */
4092 	if ((os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) == 1) && !(self->state & TH_IDLE)) {
4093 		pset_cancel_deferred_dispatch(processor->processor_set, processor);
4094 	}
4095 #endif
4096 }
4097 
4098 /*
4099  *	thread_block_reason:
4100  *
4101  *	Forces a reschedule, blocking the caller if a wait
4102  *	has been asserted.
4103  *
4104  *	If a continuation is specified, then thread_invoke will
4105  *	attempt to discard the thread's kernel stack.  When the
4106  *	thread resumes, it will execute the continuation function
4107  *	on a new kernel stack.
4108  */
4109 wait_result_t
thread_block_reason(thread_continue_t continuation,void * parameter,ast_t reason)4110 thread_block_reason(
4111 	thread_continue_t       continuation,
4112 	void                            *parameter,
4113 	ast_t                           reason)
4114 {
4115 	thread_t        self = current_thread();
4116 	processor_t     processor;
4117 	thread_t        new_thread;
4118 	spl_t           s;
4119 
4120 	s = splsched();
4121 
4122 	processor = current_processor();
4123 
4124 	/* If we're explicitly yielding, force a subsequent quantum */
4125 	if (reason & AST_YIELD) {
4126 		processor->first_timeslice = FALSE;
4127 	}
4128 
4129 	/* We're handling all scheduling AST's */
4130 	ast_off(AST_SCHEDULING);
4131 
4132 	clear_pending_nonurgent_preemption(processor);
4133 
4134 #if PROC_REF_DEBUG
4135 	if ((continuation != NULL) && (get_threadtask(self) != kernel_task)) {
4136 		uthread_assert_zero_proc_refcount(get_bsdthread_info(self));
4137 	}
4138 #endif
4139 
4140 #if CONFIG_EXCLAVES
4141 	if (continuation != NULL) {
4142 		assert3u(self->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
4143 	}
4144 #endif /* CONFIG_EXCLAVES */
4145 
4146 	self->continuation = continuation;
4147 	self->parameter = parameter;
4148 
4149 	if (self->state & ~(TH_RUN | TH_IDLE)) {
4150 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4151 		    MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
4152 		    reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
4153 	}
4154 
4155 	do {
4156 		thread_lock(self);
4157 		new_thread = thread_select(self, processor, &reason);
4158 		thread_unlock(self);
4159 	} while (!thread_invoke(self, new_thread, reason));
4160 
4161 	splx(s);
4162 
4163 	return self->wait_result;
4164 }
4165 
4166 /*
4167  *	thread_block:
4168  *
4169  *	Block the current thread if a wait has been asserted.
4170  */
4171 wait_result_t
thread_block(thread_continue_t continuation)4172 thread_block(
4173 	thread_continue_t       continuation)
4174 {
4175 	return thread_block_reason(continuation, NULL, AST_NONE);
4176 }
4177 
4178 wait_result_t
thread_block_parameter(thread_continue_t continuation,void * parameter)4179 thread_block_parameter(
4180 	thread_continue_t       continuation,
4181 	void                            *parameter)
4182 {
4183 	return thread_block_reason(continuation, parameter, AST_NONE);
4184 }
4185 
4186 /*
4187  *	thread_run:
4188  *
4189  *	Switch directly from the current thread to the
4190  *	new thread, handing off our quantum if appropriate.
4191  *
4192  *	New thread must be runnable, and not on a run queue.
4193  *
4194  *	Called at splsched.
4195  */
4196 int
thread_run(thread_t self,thread_continue_t continuation,void * parameter,thread_t new_thread)4197 thread_run(
4198 	thread_t                        self,
4199 	thread_continue_t       continuation,
4200 	void                            *parameter,
4201 	thread_t                        new_thread)
4202 {
4203 	ast_t reason = AST_NONE;
4204 
4205 	if ((self->state & TH_IDLE) == 0) {
4206 		reason = AST_HANDOFF;
4207 	}
4208 
4209 	/* Must not get here without a chosen processor */
4210 	assert(new_thread->chosen_processor);
4211 
4212 	self->continuation = continuation;
4213 	self->parameter = parameter;
4214 
4215 	while (!thread_invoke(self, new_thread, reason)) {
4216 		/* the handoff failed, so we have to fall back to the normal block path */
4217 		processor_t processor = current_processor();
4218 
4219 		reason = AST_NONE;
4220 
4221 		thread_lock(self);
4222 		new_thread = thread_select(self, processor, &reason);
4223 		thread_unlock(self);
4224 	}
4225 
4226 	return self->wait_result;
4227 }
4228 
4229 /*
4230  *	thread_continue:
4231  *
4232  *	Called at splsched when a thread first receives
4233  *	a new stack after a continuation.
4234  *
4235  *	Called with THREAD_NULL as the old thread when
4236  *	invoked by machine_load_context.
4237  */
4238 void
thread_continue(thread_t thread)4239 thread_continue(
4240 	thread_t        thread)
4241 {
4242 	thread_t                self = current_thread();
4243 	thread_continue_t       continuation;
4244 	void                    *parameter;
4245 
4246 	DTRACE_SCHED(on__cpu);
4247 
4248 	continuation = self->continuation;
4249 	parameter = self->parameter;
4250 
4251 	assert(continuation != NULL);
4252 
4253 #if KPERF
4254 	kperf_on_cpu(self, continuation, NULL);
4255 #endif
4256 
4257 
4258 	thread_dispatch(thread, self);
4259 
4260 	self->continuation = self->parameter = NULL;
4261 
4262 #if SCHED_HYGIENE_DEBUG
4263 	/* Reset interrupt-masked spin debugging timeout */
4264 	ml_spin_debug_clear(self);
4265 #endif
4266 
4267 	TLOG(1, "thread_continue: calling call_continuation\n");
4268 
4269 	boolean_t enable_interrupts = TRUE;
4270 
4271 	/* bootstrap thread, idle thread need to stay interrupts-disabled */
4272 	if (thread == THREAD_NULL || (self->state & TH_IDLE)) {
4273 		enable_interrupts = FALSE;
4274 	}
4275 
4276 #if KASAN_TBI
4277 	kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
4278 #endif /* KASAN_TBI */
4279 
4280 
4281 	call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
4282 	/*NOTREACHED*/
4283 }
4284 
4285 void
thread_quantum_init(thread_t thread,uint64_t now)4286 thread_quantum_init(thread_t thread, uint64_t now)
4287 {
4288 	uint64_t new_quantum = 0;
4289 
4290 	switch (thread->sched_mode) {
4291 	case TH_MODE_REALTIME:
4292 		new_quantum = thread->realtime.computation;
4293 		new_quantum = MIN(new_quantum, max_unsafe_rt_computation);
4294 		break;
4295 
4296 	case TH_MODE_FIXED:
4297 		new_quantum = SCHED(initial_quantum_size)(thread);
4298 		new_quantum = MIN(new_quantum, max_unsafe_fixed_computation);
4299 		break;
4300 
4301 	default:
4302 		new_quantum = SCHED(initial_quantum_size)(thread);
4303 		break;
4304 	}
4305 
4306 	if (cpulimit_affects_quantum) {
4307 		const uint64_t cpulimit_remaining = thread_cpulimit_remaining(now);
4308 
4309 		/*
4310 		 * If there's no remaining CPU time, the ledger system will
4311 		 * notice and put the thread to sleep.
4312 		 */
4313 		if (cpulimit_remaining > 0) {
4314 			new_quantum = MIN(new_quantum, cpulimit_remaining);
4315 		}
4316 	}
4317 
4318 	assert3u(new_quantum, <, UINT32_MAX);
4319 	assert3u(new_quantum, >, 0);
4320 
4321 	thread->quantum_remaining = (uint32_t)new_quantum;
4322 }
4323 
4324 uint32_t
sched_timeshare_initial_quantum_size(thread_t thread)4325 sched_timeshare_initial_quantum_size(thread_t thread)
4326 {
4327 	if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
4328 		return bg_quantum;
4329 	} else {
4330 		return std_quantum;
4331 	}
4332 }
4333 
4334 /*
4335  *	run_queue_init:
4336  *
4337  *	Initialize a run queue before first use.
4338  */
4339 void
run_queue_init(run_queue_t rq)4340 run_queue_init(
4341 	run_queue_t             rq)
4342 {
4343 	rq->highq = NOPRI;
4344 	for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
4345 		rq->bitmap[i] = 0;
4346 	}
4347 	rq->urgency = rq->count = 0;
4348 	for (int i = 0; i < NRQS; i++) {
4349 		circle_queue_init(&rq->queues[i]);
4350 	}
4351 }
4352 
4353 /*
4354  *	run_queue_dequeue:
4355  *
4356  *	Perform a dequeue operation on a run queue,
4357  *	and return the resulting thread.
4358  *
4359  *	The run queue must be locked (see thread_run_queue_remove()
4360  *	for more info), and not empty.
4361  */
4362 thread_t
run_queue_dequeue(run_queue_t rq,sched_options_t options)4363 run_queue_dequeue(
4364 	run_queue_t     rq,
4365 	sched_options_t options)
4366 {
4367 	thread_t        thread;
4368 	circle_queue_t  queue = &rq->queues[rq->highq];
4369 
4370 	if (options & SCHED_HEADQ) {
4371 		thread = cqe_dequeue_head(queue, struct thread, runq_links);
4372 	} else {
4373 		thread = cqe_dequeue_tail(queue, struct thread, runq_links);
4374 	}
4375 
4376 	assert(thread != THREAD_NULL);
4377 	assert_thread_magic(thread);
4378 
4379 	thread_clear_runq(thread);
4380 	SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4381 	rq->count--;
4382 	if (SCHED(priority_is_urgent)(rq->highq)) {
4383 		rq->urgency--; assert(rq->urgency >= 0);
4384 	}
4385 	if (circle_queue_empty(queue)) {
4386 		bitmap_clear(rq->bitmap, rq->highq);
4387 		rq->highq = bitmap_first(rq->bitmap, NRQS);
4388 	}
4389 
4390 	return thread;
4391 }
4392 
4393 /*
4394  *	run_queue_enqueue:
4395  *
4396  *	Perform a enqueue operation on a run queue.
4397  *
4398  *	The run queue must be locked (see thread_run_queue_remove()
4399  *	for more info).
4400  */
4401 boolean_t
run_queue_enqueue(run_queue_t rq,thread_t thread,sched_options_t options)4402 run_queue_enqueue(
4403 	run_queue_t      rq,
4404 	thread_t         thread,
4405 	sched_options_t  options)
4406 {
4407 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
4408 	boolean_t       result = FALSE;
4409 
4410 	assert_thread_magic(thread);
4411 
4412 	if (circle_queue_empty(queue)) {
4413 		circle_enqueue_tail(queue, &thread->runq_links);
4414 
4415 		rq_bitmap_set(rq->bitmap, thread->sched_pri);
4416 		if (thread->sched_pri > rq->highq) {
4417 			rq->highq = thread->sched_pri;
4418 			result = TRUE;
4419 		}
4420 	} else {
4421 		if (options & SCHED_TAILQ) {
4422 			circle_enqueue_tail(queue, &thread->runq_links);
4423 		} else {
4424 			circle_enqueue_head(queue, &thread->runq_links);
4425 		}
4426 	}
4427 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4428 		rq->urgency++;
4429 	}
4430 	SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4431 	rq->count++;
4432 
4433 	return result;
4434 }
4435 
4436 /*
4437  *	run_queue_remove:
4438  *
4439  *	Remove a specific thread from a runqueue.
4440  *
4441  *	The run queue must be locked.
4442  */
4443 void
run_queue_remove(run_queue_t rq,thread_t thread)4444 run_queue_remove(
4445 	run_queue_t    rq,
4446 	thread_t       thread)
4447 {
4448 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
4449 
4450 	thread_assert_runq_nonnull(thread);
4451 	assert_thread_magic(thread);
4452 
4453 	circle_dequeue(queue, &thread->runq_links);
4454 	SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4455 	rq->count--;
4456 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4457 		rq->urgency--; assert(rq->urgency >= 0);
4458 	}
4459 
4460 	if (circle_queue_empty(queue)) {
4461 		/* update run queue status */
4462 		bitmap_clear(rq->bitmap, thread->sched_pri);
4463 		rq->highq = bitmap_first(rq->bitmap, NRQS);
4464 	}
4465 
4466 	thread_clear_runq(thread);
4467 }
4468 
4469 /*
4470  *      run_queue_peek
4471  *
4472  *      Peek at the runq and return the highest
4473  *      priority thread from the runq.
4474  *
4475  *	The run queue must be locked.
4476  */
4477 thread_t
run_queue_peek(run_queue_t rq)4478 run_queue_peek(
4479 	run_queue_t    rq)
4480 {
4481 	if (rq->count > 0) {
4482 		circle_queue_t queue = &rq->queues[rq->highq];
4483 		thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
4484 		assert_thread_magic(thread);
4485 		return thread;
4486 	} else {
4487 		return THREAD_NULL;
4488 	}
4489 }
4490 
4491 static bool
rt_runq_enqueue(rt_queue_t rt_run_queue,thread_t thread,processor_t processor)4492 rt_runq_enqueue(rt_queue_t rt_run_queue, thread_t thread, processor_t processor)
4493 {
4494 	int pri = thread->sched_pri;
4495 	assert((pri >= BASEPRI_RTQUEUES) && (pri <= MAXPRI));
4496 	int i = pri - BASEPRI_RTQUEUES;
4497 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4498 	bitmap_t *map = rt_run_queue->bitmap;
4499 
4500 	bitmap_set(map, i);
4501 
4502 	queue_t     queue       = &rt_runq->pri_queue;
4503 	uint64_t    deadline    = thread->realtime.deadline;
4504 	bool        preempt     = false;
4505 	bool        earliest    = false;
4506 
4507 	if (queue_empty(queue)) {
4508 		enqueue_tail(queue, &thread->runq_links);
4509 		preempt = true;
4510 		earliest = true;
4511 		rt_runq->pri_earliest_deadline = deadline;
4512 		rt_runq->pri_constraint = thread->realtime.constraint;
4513 	} else {
4514 		/* Insert into rt_runq in thread deadline order */
4515 		queue_entry_t iter;
4516 		qe_foreach(iter, queue) {
4517 			thread_t iter_thread = qe_element(iter, struct thread, runq_links);
4518 			assert_thread_magic(iter_thread);
4519 
4520 			if (deadline < iter_thread->realtime.deadline) {
4521 				if (iter == queue_first(queue)) {
4522 					preempt = true;
4523 					earliest = true;
4524 					rt_runq->pri_earliest_deadline = deadline;
4525 					rt_runq->pri_constraint = thread->realtime.constraint;
4526 				}
4527 				insque(&thread->runq_links, queue_prev(iter));
4528 				break;
4529 			} else if (iter == queue_last(queue)) {
4530 				enqueue_tail(queue, &thread->runq_links);
4531 				break;
4532 			}
4533 		}
4534 	}
4535 	if (earliest && (deadline < os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed))) {
4536 		os_atomic_store_wide(&rt_run_queue->earliest_deadline, deadline, relaxed);
4537 		os_atomic_store(&rt_run_queue->constraint, thread->realtime.constraint, relaxed);
4538 		os_atomic_store(&rt_run_queue->ed_index, pri - BASEPRI_RTQUEUES, relaxed);
4539 	}
4540 
4541 	SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4542 	rt_runq->pri_count++;
4543 	os_atomic_inc(&rt_run_queue->count, relaxed);
4544 
4545 	thread_set_runq_locked(thread, processor);
4546 
4547 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, thread);
4548 
4549 	return preempt;
4550 }
4551 
4552 static thread_t
rt_runq_dequeue(rt_queue_t rt_run_queue)4553 rt_runq_dequeue(rt_queue_t rt_run_queue)
4554 {
4555 	bitmap_t *map = rt_run_queue->bitmap;
4556 	int i = bitmap_first(map, NRTQS);
4557 	assert((i >= 0) && (i < NRTQS));
4558 
4559 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4560 
4561 	if (!sched_rt_runq_strict_priority) {
4562 		int ed_index = os_atomic_load(&rt_run_queue->ed_index, relaxed);
4563 		if (ed_index != i) {
4564 			assert((ed_index >= 0) && (ed_index < NRTQS));
4565 			rt_queue_pri_t *ed_runq = &rt_run_queue->rt_queue_pri[ed_index];
4566 
4567 			thread_t ed_thread = qe_queue_first(&ed_runq->pri_queue, struct thread, runq_links);
4568 			thread_t hi_thread = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4569 
4570 			if (ed_thread->realtime.computation + hi_thread->realtime.computation + rt_deadline_epsilon < hi_thread->realtime.constraint) {
4571 				/* choose the earliest deadline thread */
4572 				rt_runq = ed_runq;
4573 				i = ed_index;
4574 			}
4575 		}
4576 	}
4577 
4578 	assert(rt_runq->pri_count > 0);
4579 	uint64_t earliest_deadline = RT_DEADLINE_NONE;
4580 	uint32_t constraint = RT_CONSTRAINT_NONE;
4581 	int ed_index = NOPRI;
4582 	thread_t new_thread = qe_dequeue_head(&rt_runq->pri_queue, struct thread, runq_links);
4583 	SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4584 	if (--rt_runq->pri_count > 0) {
4585 		thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4586 		assert(next_rt != THREAD_NULL);
4587 		earliest_deadline = next_rt->realtime.deadline;
4588 		constraint = next_rt->realtime.constraint;
4589 		ed_index = i;
4590 	} else {
4591 		bitmap_clear(map, i);
4592 	}
4593 	rt_runq->pri_earliest_deadline = earliest_deadline;
4594 	rt_runq->pri_constraint = constraint;
4595 
4596 	for (i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4597 		rt_runq = &rt_run_queue->rt_queue_pri[i];
4598 		if (rt_runq->pri_earliest_deadline < earliest_deadline) {
4599 			earliest_deadline = rt_runq->pri_earliest_deadline;
4600 			constraint = rt_runq->pri_constraint;
4601 			ed_index = i;
4602 		}
4603 	}
4604 	os_atomic_store_wide(&rt_run_queue->earliest_deadline, earliest_deadline, relaxed);
4605 	os_atomic_store(&rt_run_queue->constraint, constraint, relaxed);
4606 	os_atomic_store(&rt_run_queue->ed_index, ed_index, relaxed);
4607 	os_atomic_dec(&rt_run_queue->count, relaxed);
4608 
4609 	thread_clear_runq(new_thread);
4610 
4611 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, THREAD_NULL);
4612 
4613 	return new_thread;
4614 }
4615 
4616 static thread_t
rt_runq_first(rt_queue_t rt_run_queue)4617 rt_runq_first(rt_queue_t rt_run_queue)
4618 {
4619 	bitmap_t *map = rt_run_queue->bitmap;
4620 	int i = bitmap_first(map, NRTQS);
4621 	if (i < 0) {
4622 		return THREAD_NULL;
4623 	}
4624 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4625 	thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4626 
4627 	return next_rt;
4628 }
4629 
4630 static void
rt_runq_remove(rt_queue_t rt_run_queue,thread_t thread)4631 rt_runq_remove(rt_queue_t rt_run_queue, thread_t thread)
4632 {
4633 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, thread);
4634 
4635 	int pri = thread->sched_pri;
4636 	assert((pri >= BASEPRI_RTQUEUES) && (pri <= MAXPRI));
4637 	int i = pri - BASEPRI_RTQUEUES;
4638 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4639 	bitmap_t *map = rt_run_queue->bitmap;
4640 
4641 	assert(rt_runq->pri_count > 0);
4642 	uint64_t earliest_deadline = RT_DEADLINE_NONE;
4643 	uint32_t constraint = RT_CONSTRAINT_NONE;
4644 	int ed_index = NOPRI;
4645 	remqueue(&thread->runq_links);
4646 	SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4647 	if (--rt_runq->pri_count > 0) {
4648 		thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4649 		earliest_deadline = next_rt->realtime.deadline;
4650 		constraint = next_rt->realtime.constraint;
4651 		ed_index = i;
4652 	} else {
4653 		bitmap_clear(map, i);
4654 	}
4655 	rt_runq->pri_earliest_deadline = earliest_deadline;
4656 	rt_runq->pri_constraint = constraint;
4657 
4658 	for (i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4659 		rt_runq = &rt_run_queue->rt_queue_pri[i];
4660 		if (rt_runq->pri_earliest_deadline < earliest_deadline) {
4661 			earliest_deadline = rt_runq->pri_earliest_deadline;
4662 			constraint = rt_runq->pri_constraint;
4663 			ed_index = i;
4664 		}
4665 	}
4666 	os_atomic_store_wide(&rt_run_queue->earliest_deadline, earliest_deadline, relaxed);
4667 	os_atomic_store(&rt_run_queue->constraint, constraint, relaxed);
4668 	os_atomic_store(&rt_run_queue->ed_index, ed_index, relaxed);
4669 	os_atomic_dec(&rt_run_queue->count, relaxed);
4670 
4671 	thread_clear_runq_locked(thread);
4672 
4673 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, THREAD_NULL);
4674 }
4675 
4676 rt_queue_t
sched_rtlocal_runq(processor_set_t pset)4677 sched_rtlocal_runq(processor_set_t pset)
4678 {
4679 	return &pset->rt_runq;
4680 }
4681 
4682 void
sched_rtlocal_init(processor_set_t pset)4683 sched_rtlocal_init(processor_set_t pset)
4684 {
4685 	pset_rt_init(pset);
4686 }
4687 
4688 void
sched_rtlocal_queue_shutdown(processor_t processor)4689 sched_rtlocal_queue_shutdown(processor_t processor)
4690 {
4691 	processor_set_t pset = processor->processor_set;
4692 	thread_t        thread;
4693 	queue_head_t    tqueue;
4694 
4695 	pset_lock(pset);
4696 
4697 	/* We only need to migrate threads if this is the last active or last recommended processor in the pset */
4698 	if (bit_count(pset_available_cpumap(pset)) > 0) {
4699 		pset_unlock(pset);
4700 		return;
4701 	}
4702 
4703 	queue_init(&tqueue);
4704 
4705 	while (rt_runq_count(pset) > 0) {
4706 		thread = rt_runq_dequeue(&pset->rt_runq);
4707 		enqueue_tail(&tqueue, &thread->runq_links);
4708 	}
4709 	sched_update_pset_load_average(pset, 0);
4710 	pset_update_rt_stealable_state(pset);
4711 	pset_unlock(pset);
4712 
4713 	qe_foreach_element_safe(thread, &tqueue, runq_links) {
4714 		remqueue(&thread->runq_links);
4715 
4716 		thread_lock(thread);
4717 
4718 		thread_setrun(thread, SCHED_TAILQ);
4719 
4720 		thread_unlock(thread);
4721 	}
4722 }
4723 
4724 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
4725 void
sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)4726 sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)
4727 {
4728 	thread_t        thread;
4729 
4730 	pset_node_t node = &pset_node0;
4731 	processor_set_t pset = node->psets;
4732 
4733 	spl_t s = splsched();
4734 	do {
4735 		while (pset != NULL) {
4736 			pset_lock(pset);
4737 
4738 			bitmap_t *map = pset->rt_runq.bitmap;
4739 			for (int i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4740 				rt_queue_pri_t *rt_runq = &pset->rt_runq.rt_queue_pri[i];
4741 
4742 				qe_foreach_element_safe(thread, &rt_runq->pri_queue, runq_links) {
4743 					if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
4744 						scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
4745 					}
4746 				}
4747 			}
4748 
4749 			pset_unlock(pset);
4750 
4751 			pset = pset->pset_list;
4752 		}
4753 	} while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
4754 	splx(s);
4755 }
4756 
4757 int64_t
sched_rtlocal_runq_count_sum(void)4758 sched_rtlocal_runq_count_sum(void)
4759 {
4760 	pset_node_t node = &pset_node0;
4761 	processor_set_t pset = node->psets;
4762 	int64_t count = 0;
4763 
4764 	do {
4765 		while (pset != NULL) {
4766 			count += pset->rt_runq.runq_stats.count_sum;
4767 
4768 			pset = pset->pset_list;
4769 		}
4770 	} while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
4771 
4772 	return count;
4773 }
4774 
4775 /*
4776  * Called with stealing_pset locked and
4777  * returns with stealing_pset locked
4778  * but the lock will have been dropped
4779  * if a thread is returned.
4780  */
4781 thread_t
sched_rtlocal_steal_thread(processor_set_t stealing_pset,uint64_t earliest_deadline)4782 sched_rtlocal_steal_thread(processor_set_t stealing_pset, uint64_t earliest_deadline)
4783 {
4784 	if (!sched_allow_rt_steal) {
4785 		return THREAD_NULL;
4786 	}
4787 	pset_map_t pset_map = stealing_pset->node->pset_map;
4788 
4789 	bit_clear(pset_map, stealing_pset->pset_id);
4790 
4791 	processor_set_t pset = stealing_pset;
4792 
4793 	processor_set_t target_pset;
4794 	uint64_t target_deadline;
4795 
4796 retry:
4797 	target_pset = NULL;
4798 	target_deadline = earliest_deadline - rt_deadline_epsilon;
4799 
4800 	for (int pset_id = lsb_first(pset_map); pset_id >= 0; pset_id = lsb_next(pset_map, pset_id)) {
4801 		processor_set_t nset = pset_array[pset_id];
4802 
4803 		/*
4804 		 * During startup, while pset_array[] and node->pset_map are still being initialized,
4805 		 * the update to pset_map may become visible to this cpu before the update to pset_array[].
4806 		 * It would be good to avoid inserting a memory barrier here that is only needed during startup,
4807 		 * so just check nset is not NULL instead.
4808 		 */
4809 		if (nset && (nset->stealable_rt_threads_earliest_deadline < target_deadline)) {
4810 			target_deadline = nset->stealable_rt_threads_earliest_deadline;
4811 			target_pset = nset;
4812 		}
4813 	}
4814 
4815 	if (target_pset != NULL) {
4816 		pset = change_locked_pset(pset, target_pset);
4817 		if (pset->stealable_rt_threads_earliest_deadline <= target_deadline) {
4818 			thread_t new_thread = rt_runq_dequeue(&pset->rt_runq);
4819 			pset_update_rt_stealable_state(pset);
4820 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_STEAL) | DBG_FUNC_NONE, (uintptr_t)thread_tid(new_thread), pset->pset_id, pset->cpu_set_low, 0);
4821 
4822 			pset = change_locked_pset(pset, stealing_pset);
4823 			return new_thread;
4824 		}
4825 		pset = change_locked_pset(pset, stealing_pset);
4826 		earliest_deadline = rt_runq_earliest_deadline(pset);
4827 		goto retry;
4828 	}
4829 
4830 	pset = change_locked_pset(pset, stealing_pset);
4831 	return THREAD_NULL;
4832 }
4833 
4834 /*
4835  * pset is locked
4836  */
4837 thread_t
sched_rt_choose_thread(processor_set_t pset)4838 sched_rt_choose_thread(processor_set_t pset)
4839 {
4840 	processor_t processor = current_processor();
4841 
4842 	if (SCHED(steal_thread_enabled)(pset)) {
4843 		do {
4844 			bool spill_pending = bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
4845 			if (spill_pending) {
4846 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 2);
4847 			}
4848 			thread_t new_thread = SCHED(rt_steal_thread)(pset, rt_runq_earliest_deadline(pset));
4849 			if (new_thread != THREAD_NULL) {
4850 				if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
4851 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 3);
4852 				}
4853 				return new_thread;
4854 			}
4855 		} while (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id));
4856 	}
4857 
4858 	if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
4859 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 4);
4860 	}
4861 
4862 	if (rt_runq_count(pset) > 0) {
4863 		thread_t new_thread = rt_runq_dequeue(SCHED(rt_runq)(pset));
4864 		assert(new_thread != THREAD_NULL);
4865 		pset_update_rt_stealable_state(pset);
4866 		return new_thread;
4867 	}
4868 
4869 	return THREAD_NULL;
4870 }
4871 
4872 /*
4873  *	realtime_queue_insert:
4874  *
4875  *	Enqueue a thread for realtime execution.
4876  */
4877 static bool
realtime_queue_insert(processor_t processor,processor_set_t pset,thread_t thread)4878 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
4879 {
4880 	pset_assert_locked(pset);
4881 
4882 	bool preempt = rt_runq_enqueue(SCHED(rt_runq)(pset), thread, processor);
4883 	pset_update_rt_stealable_state(pset);
4884 
4885 	return preempt;
4886 }
4887 
4888 /*
4889  *	realtime_setrun:
4890  *
4891  *	Dispatch a thread for realtime execution.
4892  *
4893  *	Thread must be locked.  Associated pset must
4894  *	be locked, and is returned unlocked.
4895  */
4896 static void
realtime_setrun(processor_t chosen_processor,thread_t thread)4897 realtime_setrun(
4898 	processor_t                     chosen_processor,
4899 	thread_t                        thread)
4900 {
4901 	processor_set_t pset = chosen_processor->processor_set;
4902 	pset_assert_locked(pset);
4903 	bool pset_is_locked = true;
4904 
4905 	int n_backup = 0;
4906 
4907 	if (thread->realtime.constraint <= rt_constraint_threshold) {
4908 		n_backup = sched_rt_n_backup_processors;
4909 	}
4910 	assert((n_backup >= 0) && (n_backup <= SCHED_MAX_BACKUP_PROCESSORS));
4911 
4912 	int existing_backups = bit_count(pset->pending_AST_URGENT_cpu_mask) - rt_runq_count(pset);
4913 	if (existing_backups > 0) {
4914 		n_backup = n_backup - existing_backups;
4915 		if (n_backup < 0) {
4916 			n_backup = 0;
4917 		}
4918 	}
4919 
4920 	sched_ipi_type_t ipi_type[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4921 	processor_t ipi_processor[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4922 
4923 	thread->chosen_processor = chosen_processor;
4924 
4925 	/* <rdar://problem/15102234> */
4926 	assert(thread->bound_processor == PROCESSOR_NULL);
4927 
4928 	realtime_queue_insert(chosen_processor, pset, thread);
4929 
4930 	processor_t processor = chosen_processor;
4931 
4932 	int count = 0;
4933 	for (int i = 0; i <= n_backup; i++) {
4934 		if (i == 0) {
4935 			ipi_type[i] = SCHED_IPI_NONE;
4936 			ipi_processor[i] = processor;
4937 			count++;
4938 
4939 			ast_t preempt = AST_NONE;
4940 			if (thread->sched_pri > processor->current_pri) {
4941 				preempt = (AST_PREEMPT | AST_URGENT);
4942 			} else if (thread->sched_pri == processor->current_pri) {
4943 				if (deadline_add(thread->realtime.deadline, rt_deadline_epsilon) < processor->deadline) {
4944 					preempt = (AST_PREEMPT | AST_URGENT);
4945 				}
4946 			}
4947 
4948 			if (preempt != AST_NONE) {
4949 				if (processor->state == PROCESSOR_IDLE) {
4950 					if (processor == current_processor()) {
4951 						pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
4952 						ast_on(preempt);
4953 
4954 						if ((preempt & AST_URGENT) == AST_URGENT) {
4955 							if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4956 								KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4957 								    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 1);
4958 							}
4959 						}
4960 
4961 						if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4962 							bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4963 						}
4964 					} else {
4965 						ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4966 					}
4967 				} else if (processor->state == PROCESSOR_DISPATCHING) {
4968 					if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4969 						KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4970 						    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 2);
4971 					}
4972 				} else {
4973 					if (processor == current_processor()) {
4974 						ast_on(preempt);
4975 
4976 						if ((preempt & AST_URGENT) == AST_URGENT) {
4977 							if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4978 								KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4979 								    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 3);
4980 							}
4981 						}
4982 
4983 						if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4984 							bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4985 						}
4986 					} else {
4987 						ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4988 					}
4989 				}
4990 			} else {
4991 				/* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
4992 			}
4993 		} else {
4994 			if (!pset_is_locked) {
4995 				pset_lock(pset);
4996 			}
4997 			ipi_type[i] = SCHED_IPI_NONE;
4998 			ipi_processor[i] = PROCESSOR_NULL;
4999 			pset_is_locked = !choose_next_rt_processor_for_IPI(pset, chosen_processor, false, &ipi_processor[i], &ipi_type[i]);
5000 			if (ipi_processor[i] == PROCESSOR_NULL) {
5001 				break;
5002 			}
5003 			count++;
5004 
5005 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
5006 			    ipi_processor[i]->cpu_id, ipi_processor[i]->state, backup, 1);
5007 #if CONFIG_SCHED_SMT
5008 #define p_is_good(p) (((p)->processor_primary == (p)) && ((sched_avoid_cpu0 != 1) || ((p)->cpu_id != 0)))
5009 			if (n_backup == SCHED_DEFAULT_BACKUP_PROCESSORS_SMT) {
5010 				processor_t p0 = ipi_processor[0];
5011 				processor_t p1 = ipi_processor[1];
5012 				assert(p0 && p1);
5013 				if (p_is_good(p0) && p_is_good(p1)) {
5014 					/*
5015 					 * Both the chosen processor and the first backup are non-cpu0 primaries,
5016 					 * so there is no need for a 2nd backup processor.
5017 					 */
5018 					break;
5019 				}
5020 			}
5021 #endif /* CONFIG_SCHED_SMT */
5022 		}
5023 	}
5024 
5025 	if (pset_is_locked) {
5026 		pset_unlock(pset);
5027 	}
5028 
5029 	assert((count > 0) && (count <= (n_backup + 1)));
5030 	for (int i = 0; i < count; i++) {
5031 		assert(ipi_processor[i] != PROCESSOR_NULL);
5032 		sched_ipi_perform(ipi_processor[i], ipi_type[i]);
5033 	}
5034 }
5035 
5036 
5037 sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset,processor_t dst,thread_t thread,__unused sched_ipi_event_t event)5038 sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
5039     thread_t thread, __unused sched_ipi_event_t event)
5040 {
5041 #if defined(CONFIG_SCHED_DEFERRED_AST)
5042 #if CONFIG_THREAD_GROUPS
5043 	if (thread) {
5044 		struct thread_group *tg = thread_group_get(thread);
5045 		if (thread_group_uses_immediate_ipi(tg)) {
5046 			return SCHED_IPI_IMMEDIATE;
5047 		}
5048 	}
5049 #endif /* CONFIG_THREAD_GROUPS */
5050 	if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
5051 		return SCHED_IPI_DEFERRED;
5052 	}
5053 #else /* CONFIG_SCHED_DEFERRED_AST */
5054 	(void) thread;
5055 	panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
5056 #endif /* CONFIG_SCHED_DEFERRED_AST */
5057 	return SCHED_IPI_NONE;
5058 }
5059 
5060 sched_ipi_type_t
sched_ipi_action(processor_t dst,thread_t thread,sched_ipi_event_t event)5061 sched_ipi_action(processor_t dst, thread_t thread, sched_ipi_event_t event)
5062 {
5063 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
5064 	assert(dst != NULL);
5065 
5066 	processor_set_t pset = dst->processor_set;
5067 	if (current_processor() == dst) {
5068 		return SCHED_IPI_NONE;
5069 	}
5070 
5071 	bool dst_idle = (dst->state == PROCESSOR_IDLE);
5072 	if (dst_idle) {
5073 		pset_update_processor_state(pset, dst, PROCESSOR_DISPATCHING);
5074 	}
5075 
5076 	ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
5077 	switch (ipi_type) {
5078 	case SCHED_IPI_NONE:
5079 		return SCHED_IPI_NONE;
5080 #if defined(CONFIG_SCHED_DEFERRED_AST)
5081 	case SCHED_IPI_DEFERRED:
5082 		bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
5083 		break;
5084 #endif /* CONFIG_SCHED_DEFERRED_AST */
5085 	default:
5086 		if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
5087 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5088 			    dst->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 4);
5089 		}
5090 		bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
5091 		break;
5092 	}
5093 	return ipi_type;
5094 }
5095 
5096 sched_ipi_type_t
sched_ipi_policy(processor_t dst,thread_t thread,boolean_t dst_idle,sched_ipi_event_t event)5097 sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
5098 {
5099 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
5100 	boolean_t deferred_ipi_supported = false;
5101 	processor_set_t pset = dst->processor_set;
5102 
5103 #if defined(CONFIG_SCHED_DEFERRED_AST)
5104 	deferred_ipi_supported = true;
5105 #endif /* CONFIG_SCHED_DEFERRED_AST */
5106 
5107 	switch (event) {
5108 	case SCHED_IPI_EVENT_SPILL:
5109 	case SCHED_IPI_EVENT_SMT_REBAL:
5110 	case SCHED_IPI_EVENT_REBALANCE:
5111 	case SCHED_IPI_EVENT_BOUND_THR:
5112 	case SCHED_IPI_EVENT_RT_PREEMPT:
5113 		/*
5114 		 * The RT preempt, spill, SMT rebalance, rebalance and the bound thread
5115 		 * scenarios use immediate IPIs always.
5116 		 */
5117 		ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
5118 		break;
5119 	case SCHED_IPI_EVENT_PREEMPT:
5120 		/* In the preemption case, use immediate IPIs for RT threads */
5121 		if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
5122 			ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
5123 			break;
5124 		}
5125 
5126 		/*
5127 		 * For Non-RT threads preemption,
5128 		 * If the core is active, use immediate IPIs.
5129 		 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
5130 		 */
5131 		if (deferred_ipi_supported && dst_idle) {
5132 			return sched_ipi_deferred_policy(pset, dst, thread, event);
5133 		}
5134 		ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
5135 		break;
5136 	default:
5137 		panic("Unrecognized scheduler IPI event type %d", event);
5138 	}
5139 	assert(ipi_type != SCHED_IPI_NONE);
5140 	return ipi_type;
5141 }
5142 
5143 void
sched_ipi_perform(processor_t dst,sched_ipi_type_t ipi)5144 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
5145 {
5146 	switch (ipi) {
5147 	case SCHED_IPI_NONE:
5148 		break;
5149 	case SCHED_IPI_IDLE:
5150 		machine_signal_idle(dst);
5151 		break;
5152 	case SCHED_IPI_IMMEDIATE:
5153 		cause_ast_check(dst);
5154 		break;
5155 	case SCHED_IPI_DEFERRED:
5156 		machine_signal_idle_deferred(dst);
5157 		break;
5158 	default:
5159 		panic("Unrecognized scheduler IPI type: %d", ipi);
5160 	}
5161 }
5162 
5163 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5164 
5165 boolean_t
priority_is_urgent(int priority)5166 priority_is_urgent(int priority)
5167 {
5168 	return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
5169 }
5170 
5171 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5172 
5173 /*
5174  *	processor_setrun:
5175  *
5176  *	Dispatch a thread for execution on a
5177  *	processor.
5178  *
5179  *	Thread must be locked.  Associated pset must
5180  *	be locked, and is returned unlocked.
5181  */
5182 static void
processor_setrun(processor_t processor,thread_t thread,integer_t options)5183 processor_setrun(
5184 	processor_t                     processor,
5185 	thread_t                        thread,
5186 	integer_t                       options)
5187 {
5188 	processor_set_t pset = processor->processor_set;
5189 	pset_assert_locked(pset);
5190 	ast_t preempt = AST_NONE;
5191 	enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
5192 
5193 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
5194 
5195 	thread->chosen_processor = processor;
5196 
5197 	/*
5198 	 *	Set preemption mode.
5199 	 */
5200 #if defined(CONFIG_SCHED_DEFERRED_AST)
5201 	/* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
5202 #endif
5203 	if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
5204 		preempt = (AST_PREEMPT | AST_URGENT);
5205 	} else if (processor->current_is_eagerpreempt) {
5206 		preempt = (AST_PREEMPT | AST_URGENT);
5207 	} else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
5208 		if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
5209 			preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
5210 		} else {
5211 			preempt = AST_NONE;
5212 		}
5213 	} else {
5214 		preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
5215 	}
5216 
5217 	if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) {
5218 		/*
5219 		 * Having gone to the trouble of forcing this thread off a less preferred core,
5220 		 * we should force the preferable core to reschedule immediately to give this
5221 		 * thread a chance to run instead of just sitting on the run queue where
5222 		 * it may just be stolen back by the idle core we just forced it off.
5223 		 */
5224 		preempt |= AST_PREEMPT;
5225 	}
5226 
5227 	SCHED(processor_enqueue)(processor, thread, options);
5228 	sched_update_pset_load_average(pset, 0);
5229 
5230 	if (preempt != AST_NONE) {
5231 		if (processor->state == PROCESSOR_IDLE) {
5232 			ipi_action = eExitIdle;
5233 		} else if (processor->state == PROCESSOR_DISPATCHING) {
5234 			if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5235 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5236 				    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 5);
5237 			}
5238 		} else if (processor->state == PROCESSOR_RUNNING &&
5239 		    (thread->sched_pri >= processor->current_pri)) {
5240 			ipi_action = eInterruptRunning;
5241 		}
5242 	} else {
5243 		/*
5244 		 * New thread is not important enough to preempt what is running, but
5245 		 * special processor states may need special handling
5246 		 */
5247 		if (processor->state == PROCESSOR_IDLE) {
5248 			ipi_action = eExitIdle;
5249 		} else if (processor->state == PROCESSOR_DISPATCHING) {
5250 			if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5251 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5252 				    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 6);
5253 			}
5254 		}
5255 	}
5256 
5257 	if (ipi_action != eDoNothing) {
5258 		if (processor == current_processor()) {
5259 			if (ipi_action == eExitIdle) {
5260 				pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
5261 			}
5262 			if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
5263 				ast_on(preempt);
5264 			}
5265 
5266 			if ((preempt & AST_URGENT) == AST_URGENT) {
5267 				if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5268 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5269 					    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 7);
5270 				}
5271 			} else {
5272 				if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5273 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 7);
5274 				}
5275 			}
5276 
5277 			if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
5278 				bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
5279 			} else {
5280 				bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
5281 			}
5282 		} else {
5283 			sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
5284 			ipi_type = sched_ipi_action(processor, thread, event);
5285 		}
5286 	}
5287 
5288 	pset_unlock(pset);
5289 	sched_ipi_perform(processor, ipi_type);
5290 
5291 	if (ipi_action != eDoNothing && processor == current_processor()) {
5292 		ast_t new_preempt = update_pending_nonurgent_preemption(processor, preempt);
5293 		ast_on(new_preempt);
5294 	}
5295 }
5296 
5297 /*
5298  *	choose_next_pset:
5299  *
5300  *	Return the next sibling pset containing
5301  *	available processors.
5302  *
5303  *	Returns the original pset if none other is
5304  *	suitable.
5305  */
5306 static processor_set_t
choose_next_pset(processor_set_t pset)5307 choose_next_pset(
5308 	processor_set_t         pset)
5309 {
5310 	processor_set_t         nset = pset;
5311 
5312 	do {
5313 		nset = next_pset(nset);
5314 
5315 		/*
5316 		 * Sometimes during startup the pset_map can contain a bit
5317 		 * for a pset that isn't fully published in pset_array because
5318 		 * the pset_map read isn't an acquire load.
5319 		 *
5320 		 * In order to avoid needing an acquire barrier here, just bail
5321 		 * out.
5322 		 */
5323 		if (nset == PROCESSOR_SET_NULL) {
5324 			return pset;
5325 		}
5326 	} while (nset->online_processor_count < 1 && nset != pset);
5327 
5328 	return nset;
5329 }
5330 
5331 #if CONFIG_SCHED_SMT
5332 /*
5333  *	choose_processor_smt:
5334  *
5335  *  SMT-aware implementation of choose_processor.
5336  */
5337 processor_t
choose_processor_smt(processor_set_t starting_pset,processor_t processor,thread_t thread)5338 choose_processor_smt(
5339 	processor_set_t         starting_pset,
5340 	processor_t             processor,
5341 	thread_t                thread)
5342 {
5343 	processor_set_t pset = starting_pset;
5344 	processor_set_t nset;
5345 
5346 	assert(thread->sched_pri <= MAXPRI);
5347 
5348 	/*
5349 	 * Prefer the hinted processor, when appropriate.
5350 	 */
5351 
5352 	/* Fold last processor hint from secondary processor to its primary */
5353 	if (processor != PROCESSOR_NULL) {
5354 		processor = processor->processor_primary;
5355 	}
5356 
5357 	/*
5358 	 * Only consult platform layer if pset is active, which
5359 	 * it may not be in some cases when a multi-set system
5360 	 * is going to sleep.
5361 	 */
5362 	if (pset->online_processor_count) {
5363 		if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
5364 			processor_t mc_processor = machine_choose_processor(pset, processor);
5365 			if (mc_processor != PROCESSOR_NULL) {
5366 				processor = mc_processor->processor_primary;
5367 			}
5368 		}
5369 	}
5370 
5371 	/*
5372 	 * At this point, we may have a processor hint, and we may have
5373 	 * an initial starting pset. If the hint is not in the pset, or
5374 	 * if the hint is for a processor in an invalid state, discard
5375 	 * the hint.
5376 	 */
5377 	if (processor != PROCESSOR_NULL) {
5378 		if (processor->processor_set != pset) {
5379 			processor = PROCESSOR_NULL;
5380 		} else if (!processor->is_recommended) {
5381 			processor = PROCESSOR_NULL;
5382 		} else {
5383 			switch (processor->state) {
5384 			case PROCESSOR_START:
5385 			case PROCESSOR_PENDING_OFFLINE:
5386 			case PROCESSOR_OFF_LINE:
5387 				/*
5388 				 * Hint is for a processor that cannot support running new threads.
5389 				 */
5390 				processor = PROCESSOR_NULL;
5391 				break;
5392 			case PROCESSOR_IDLE:
5393 				/*
5394 				 * Hint is for an idle processor. Assume it is no worse than any other
5395 				 * idle processor. The platform layer had an opportunity to provide
5396 				 * the "least cost idle" processor above.
5397 				 */
5398 				if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5399 					uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->primary_map & pset->recommended_bitmask);
5400 					uint64_t non_avoided_idle_primary_map = idle_primary_map & ~pset->perfcontrol_cpu_migration_bitmask;
5401 					/*
5402 					 * If the rotation bitmask to force a migration is set for this core and there's an idle core that
5403 					 * that needn't be avoided, don't continue running on the same core.
5404 					 */
5405 					if (!(bit_test(processor->processor_set->perfcontrol_cpu_migration_bitmask, processor->cpu_id) && non_avoided_idle_primary_map != 0)) {
5406 						return processor;
5407 					}
5408 				}
5409 				processor = PROCESSOR_NULL;
5410 				break;
5411 			case PROCESSOR_RUNNING:
5412 			case PROCESSOR_DISPATCHING:
5413 				/*
5414 				 * Hint is for an active CPU. This fast-path allows
5415 				 * realtime threads to preempt non-realtime threads
5416 				 * to regain their previous executing processor.
5417 				 */
5418 				if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5419 					if (processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5420 						return processor;
5421 					}
5422 					processor = PROCESSOR_NULL;
5423 				}
5424 
5425 				/* Otherwise, use hint as part of search below */
5426 				break;
5427 			default:
5428 				processor = PROCESSOR_NULL;
5429 				break;
5430 			}
5431 		}
5432 	}
5433 
5434 	/*
5435 	 * Iterate through the processor sets to locate
5436 	 * an appropriate processor. Seed results with
5437 	 * a last-processor hint, if available, so that
5438 	 * a search must find something strictly better
5439 	 * to replace it.
5440 	 *
5441 	 * A primary/secondary pair of SMT processors are
5442 	 * "unpaired" if the primary is busy but its
5443 	 * corresponding secondary is idle (so the physical
5444 	 * core has full use of its resources).
5445 	 */
5446 
5447 	integer_t lowest_priority = MAXPRI + 1;
5448 	integer_t lowest_secondary_priority = MAXPRI + 1;
5449 	integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
5450 	integer_t lowest_idle_secondary_priority = MAXPRI + 1;
5451 	integer_t lowest_count = INT_MAX;
5452 	processor_t lp_processor = PROCESSOR_NULL;
5453 	processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
5454 	processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
5455 	processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
5456 	processor_t lc_processor = PROCESSOR_NULL;
5457 
5458 	if (processor != PROCESSOR_NULL) {
5459 		/* All other states should be enumerated above. */
5460 		assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
5461 		assert(thread->sched_pri < BASEPRI_RTQUEUES);
5462 
5463 		lowest_priority = processor->current_pri;
5464 		lp_processor = processor;
5465 
5466 		lowest_count = SCHED(processor_runq_count)(processor);
5467 		lc_processor = processor;
5468 	}
5469 
5470 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5471 		pset_node_t node = pset->node;
5472 		bool include_ast_urgent_pending_cpus = false;
5473 		cpumap_t ast_urgent_pending;
5474 try_again:
5475 		ast_urgent_pending = 0;
5476 		int consider_secondaries = (!pset->is_SMT) || (bit_count(node->pset_map) == 1) || (node->pset_non_rt_primary_map == 0) || include_ast_urgent_pending_cpus;
5477 		for (; consider_secondaries < 2; consider_secondaries++) {
5478 			pset = change_locked_pset(pset, starting_pset);
5479 			do {
5480 				cpumap_t available_map = pset_available_cpumap(pset);
5481 				if (available_map == 0) {
5482 					goto no_available_cpus;
5483 				}
5484 
5485 				processor = choose_processor_for_realtime_thread_smt(pset, PROCESSOR_NULL, consider_secondaries, false);
5486 				if (processor) {
5487 					return processor;
5488 				}
5489 
5490 				if (consider_secondaries) {
5491 					processor = choose_furthest_deadline_processor_for_realtime_thread(pset, thread->sched_pri, thread->realtime.deadline, PROCESSOR_NULL, false, include_ast_urgent_pending_cpus);
5492 					if (processor) {
5493 						/*
5494 						 * Instead of looping through all the psets to find the global
5495 						 * furthest deadline processor, preempt the first candidate found.
5496 						 * The preempted thread will then find any other available far deadline
5497 						 * processors to preempt.
5498 						 */
5499 						return processor;
5500 					}
5501 
5502 					ast_urgent_pending |= pset->pending_AST_URGENT_cpu_mask;
5503 
5504 					if (rt_runq_count(pset) < lowest_count) {
5505 						int cpuid = bit_first(available_map);
5506 						assert(cpuid >= 0);
5507 						lc_processor = processor_array[cpuid];
5508 						lowest_count = rt_runq_count(pset);
5509 					}
5510 				}
5511 
5512 no_available_cpus:
5513 				nset = next_pset(pset);
5514 
5515 				if (nset != starting_pset) {
5516 					pset = change_locked_pset(pset, nset);
5517 				}
5518 			} while (nset != starting_pset);
5519 		}
5520 
5521 		/* Short cut for single pset nodes */
5522 		if (bit_count(node->pset_map) == 1) {
5523 			if (lc_processor) {
5524 				pset_assert_locked(lc_processor->processor_set);
5525 				return lc_processor;
5526 			}
5527 		} else {
5528 			if (ast_urgent_pending && !include_ast_urgent_pending_cpus) {
5529 				/* See the comment in choose_furthest_deadline_processor_for_realtime_thread() */
5530 				include_ast_urgent_pending_cpus = true;
5531 				goto try_again;
5532 			}
5533 		}
5534 
5535 		processor = lc_processor;
5536 
5537 		if (processor) {
5538 			pset = change_locked_pset(pset, processor->processor_set);
5539 			/* Check that chosen processor is still usable */
5540 			cpumap_t available_map = pset_available_cpumap(pset);
5541 			if (bit_test(available_map, processor->cpu_id)) {
5542 				return processor;
5543 			}
5544 
5545 			/* processor is no longer usable */
5546 			processor = PROCESSOR_NULL;
5547 		}
5548 
5549 		pset_assert_locked(pset);
5550 		pset_unlock(pset);
5551 		return PROCESSOR_NULL;
5552 	}
5553 
5554 	/* No realtime threads from this point on */
5555 	assert(thread->sched_pri < BASEPRI_RTQUEUES);
5556 
5557 	do {
5558 		/*
5559 		 * Choose an idle processor, in pset traversal order
5560 		 */
5561 		uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->primary_map & pset->recommended_bitmask);
5562 		uint64_t preferred_idle_primary_map = idle_primary_map & pset->perfcontrol_cpu_preferred_bitmask;
5563 
5564 		/* there shouldn't be a pending AST if the processor is idle */
5565 		assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
5566 
5567 		/*
5568 		 * Look at the preferred cores first.
5569 		 */
5570 		int cpuid = lsb_next(preferred_idle_primary_map, pset->cpu_preferred_last_chosen);
5571 		if (cpuid < 0) {
5572 			cpuid = lsb_first(preferred_idle_primary_map);
5573 		}
5574 		if (cpuid >= 0) {
5575 			processor = processor_array[cpuid];
5576 			pset->cpu_preferred_last_chosen = cpuid;
5577 			return processor;
5578 		}
5579 
5580 		/*
5581 		 * Look at the cores that don't need to be avoided next.
5582 		 */
5583 		if (pset->perfcontrol_cpu_migration_bitmask != 0) {
5584 			uint64_t non_avoided_idle_primary_map = idle_primary_map & ~pset->perfcontrol_cpu_migration_bitmask;
5585 			cpuid = lsb_next(non_avoided_idle_primary_map, pset->cpu_preferred_last_chosen);
5586 			if (cpuid < 0) {
5587 				cpuid = lsb_first(non_avoided_idle_primary_map);
5588 			}
5589 			if (cpuid >= 0) {
5590 				processor = processor_array[cpuid];
5591 				pset->cpu_preferred_last_chosen = cpuid;
5592 				return processor;
5593 			}
5594 		}
5595 
5596 		/*
5597 		 * Fall back to any remaining idle cores if none of the preferred ones and non-avoided ones are available.
5598 		 */
5599 		cpuid = lsb_first(idle_primary_map);
5600 		if (cpuid >= 0) {
5601 			processor = processor_array[cpuid];
5602 			return processor;
5603 		}
5604 
5605 		/*
5606 		 * Otherwise, enumerate active and idle processors to find primary candidates
5607 		 * with lower priority/etc.
5608 		 */
5609 
5610 		uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
5611 		    pset->recommended_bitmask &
5612 		    ~pset->pending_AST_URGENT_cpu_mask);
5613 
5614 		if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
5615 			active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
5616 		}
5617 
5618 		active_map = bit_ror64(active_map, (pset->last_chosen + 1));
5619 		for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
5620 			cpuid = ((rotid + pset->last_chosen + 1) & 63);
5621 			processor = processor_array[cpuid];
5622 
5623 			integer_t cpri = processor->current_pri;
5624 			processor_t primary = processor->processor_primary;
5625 			if (primary != processor) {
5626 				/* If primary is running a NO_SMT thread, don't choose its secondary */
5627 				if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
5628 					if (cpri < lowest_secondary_priority) {
5629 						lowest_secondary_priority = cpri;
5630 						lp_paired_secondary_processor = processor;
5631 					}
5632 				}
5633 			} else {
5634 				if (cpri < lowest_priority) {
5635 					lowest_priority = cpri;
5636 					lp_processor = processor;
5637 				}
5638 			}
5639 
5640 			integer_t ccount = SCHED(processor_runq_count)(processor);
5641 			if (ccount < lowest_count) {
5642 				lowest_count = ccount;
5643 				lc_processor = processor;
5644 			}
5645 		}
5646 
5647 		/*
5648 		 * For SMT configs, these idle secondary processors must have active primary. Otherwise
5649 		 * the idle primary would have short-circuited the loop above
5650 		 */
5651 		uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
5652 		    ~pset->primary_map &
5653 		    pset->recommended_bitmask);
5654 
5655 		/* there shouldn't be a pending AST if the processor is idle */
5656 		assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
5657 		assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
5658 
5659 		for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
5660 			processor = processor_array[cpuid];
5661 
5662 			processor_t cprimary = processor->processor_primary;
5663 
5664 			integer_t primary_pri = cprimary->current_pri;
5665 
5666 			/*
5667 			 * TODO: This should also make the same decisions
5668 			 * as secondary_can_run_realtime_thread
5669 			 *
5670 			 * TODO: Keep track of the pending preemption priority
5671 			 * of the primary to make this more accurate.
5672 			 */
5673 
5674 			/* If the primary is running a no-smt thread, then don't choose its secondary */
5675 			if (cprimary->state == PROCESSOR_RUNNING &&
5676 			    processor_active_thread_no_smt(cprimary)) {
5677 				continue;
5678 			}
5679 
5680 			/*
5681 			 * Find the idle secondary processor with the lowest priority primary
5682 			 *
5683 			 * We will choose this processor as a fallback if we find no better
5684 			 * primary to preempt.
5685 			 */
5686 			if (primary_pri < lowest_idle_secondary_priority) {
5687 				lp_idle_secondary_processor = processor;
5688 				lowest_idle_secondary_priority = primary_pri;
5689 			}
5690 
5691 			/* Find the the lowest priority active primary with idle secondary */
5692 			if (primary_pri < lowest_unpaired_primary_priority) {
5693 				/* If the primary processor is offline or starting up, it's not a candidate for this path */
5694 				if (cprimary->state != PROCESSOR_RUNNING &&
5695 				    cprimary->state != PROCESSOR_DISPATCHING) {
5696 					continue;
5697 				}
5698 
5699 				if (!cprimary->is_recommended) {
5700 					continue;
5701 				}
5702 
5703 				/* if the primary is pending preemption, don't try to re-preempt it */
5704 				if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
5705 					continue;
5706 				}
5707 
5708 				if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
5709 				    bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
5710 					continue;
5711 				}
5712 
5713 				lowest_unpaired_primary_priority = primary_pri;
5714 				lp_unpaired_primary_processor = cprimary;
5715 			}
5716 		}
5717 
5718 		/*
5719 		 * We prefer preempting a primary processor over waking up its secondary.
5720 		 * The secondary will then be woken up by the preempted thread.
5721 		 */
5722 		if (thread->sched_pri > lowest_unpaired_primary_priority) {
5723 			pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
5724 			return lp_unpaired_primary_processor;
5725 		}
5726 
5727 		/*
5728 		 * We prefer preempting a lower priority active processor over directly
5729 		 * waking up an idle secondary.
5730 		 * The preempted thread will then find the idle secondary.
5731 		 */
5732 		if (thread->sched_pri > lowest_priority) {
5733 			pset->last_chosen = lp_processor->cpu_id;
5734 			return lp_processor;
5735 		}
5736 
5737 		/*
5738 		 * lc_processor is used to indicate the best processor set run queue
5739 		 * on which to enqueue a thread when all available CPUs are busy with
5740 		 * higher priority threads, so try to make sure it is initialized.
5741 		 */
5742 		if (lc_processor == PROCESSOR_NULL) {
5743 			cpumap_t available_map = pset_available_cpumap(pset);
5744 			cpuid = lsb_first(available_map);
5745 			if (cpuid >= 0) {
5746 				lc_processor = processor_array[cpuid];
5747 				lowest_count = SCHED(processor_runq_count)(lc_processor);
5748 			}
5749 		}
5750 
5751 		/*
5752 		 * Move onto the next processor set.
5753 		 *
5754 		 * If all primary processors in this pset are running a higher
5755 		 * priority thread, move on to next pset. Only when we have
5756 		 * exhausted the search for primary processors do we
5757 		 * fall back to secondaries.
5758 		 */
5759 #if CONFIG_SCHED_EDGE
5760 		/*
5761 		 * The edge scheduler expects a CPU to be selected from the pset it passed in
5762 		 * as the starting pset for non-RT workloads. The edge migration algorithm
5763 		 * should already have considered idle CPUs and loads to decide the starting_pset;
5764 		 * which means that this loop can be short-circuted.
5765 		 */
5766 		nset = starting_pset;
5767 #else /* CONFIG_SCHED_EDGE */
5768 		nset = next_pset(pset);
5769 #endif /* CONFIG_SCHED_EDGE */
5770 
5771 		if (nset != starting_pset) {
5772 			pset = change_locked_pset(pset, nset);
5773 		}
5774 	} while (nset != starting_pset);
5775 
5776 	/*
5777 	 * Make sure that we pick a running processor,
5778 	 * and that the correct processor set is locked.
5779 	 * Since we may have unlocked the candidate processor's
5780 	 * pset, it may have changed state.
5781 	 *
5782 	 * All primary processors are running a higher priority
5783 	 * thread, so the only options left are enqueuing on
5784 	 * the secondary processor that would perturb the least priority
5785 	 * primary, or the least busy primary.
5786 	 */
5787 
5788 	/* lowest_priority is evaluated in the main loops above */
5789 	if (lp_idle_secondary_processor != PROCESSOR_NULL) {
5790 		processor = lp_idle_secondary_processor;
5791 	} else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
5792 		processor = lp_paired_secondary_processor;
5793 	} else if (lc_processor != PROCESSOR_NULL) {
5794 		processor = lc_processor;
5795 	} else {
5796 		processor = PROCESSOR_NULL;
5797 	}
5798 
5799 	if (processor) {
5800 		pset = change_locked_pset(pset, processor->processor_set);
5801 		/* Check that chosen processor is still usable */
5802 		cpumap_t available_map = pset_available_cpumap(pset);
5803 		if (bit_test(available_map, processor->cpu_id)) {
5804 			pset->last_chosen = processor->cpu_id;
5805 			return processor;
5806 		}
5807 
5808 		/* processor is no longer usable */
5809 		processor = PROCESSOR_NULL;
5810 	}
5811 
5812 	pset_assert_locked(pset);
5813 	pset_unlock(pset);
5814 	return PROCESSOR_NULL;
5815 }
5816 #else /* CONFIG_SCHED_SMT */
5817 /*
5818  *	choose_processor:
5819  *
5820  *	Choose a processor for the thread, beginning at
5821  *	the pset.  Accepts an optional processor hint in
5822  *	the pset.
5823  *
5824  *	Returns a processor, possibly from a different pset.
5825  *
5826  *	The thread must be locked.  The pset must be locked,
5827  *	and the resulting pset is locked on return.
5828  */
5829 processor_t
choose_processor(processor_set_t starting_pset,processor_t processor,thread_t thread)5830 choose_processor(
5831 	processor_set_t         starting_pset,
5832 	processor_t             processor,
5833 	thread_t                thread)
5834 {
5835 	processor_set_t pset = starting_pset;
5836 	processor_set_t nset;
5837 
5838 	assert(thread->sched_pri <= MAXPRI);
5839 
5840 	/*
5841 	 * At this point, we may have a processor hint, and we may have
5842 	 * an initial starting pset. If the hint is not in the pset, or
5843 	 * if the hint is for a processor in an invalid state, discard
5844 	 * the hint.
5845 	 */
5846 	if (processor != PROCESSOR_NULL) {
5847 		if (processor->processor_set != pset) {
5848 			processor = PROCESSOR_NULL;
5849 		} else if (!processor->is_recommended) {
5850 			processor = PROCESSOR_NULL;
5851 		} else {
5852 			switch (processor->state) {
5853 			case PROCESSOR_START:
5854 			case PROCESSOR_PENDING_OFFLINE:
5855 			case PROCESSOR_OFF_LINE:
5856 				/*
5857 				 * Hint is for a processor that cannot support running new threads.
5858 				 */
5859 				processor = PROCESSOR_NULL;
5860 				break;
5861 			case PROCESSOR_IDLE:
5862 				/*
5863 				 * Hint is for an idle processor. Assume it is no worse than any other
5864 				 * idle processor. The platform layer had an opportunity to provide
5865 				 * the "least cost idle" processor above.
5866 				 */
5867 				if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5868 					uint64_t idle_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->recommended_bitmask);
5869 					uint64_t non_avoided_idle_map = idle_map & ~pset->perfcontrol_cpu_migration_bitmask;
5870 					/*
5871 					 * If the rotation bitmask to force a migration is set for this core and there's an idle core that
5872 					 * that needn't be avoided, don't continue running on the same core.
5873 					 */
5874 					if (!(bit_test(processor->processor_set->perfcontrol_cpu_migration_bitmask, processor->cpu_id) && non_avoided_idle_map != 0)) {
5875 						return processor;
5876 					}
5877 				}
5878 				processor = PROCESSOR_NULL;
5879 				break;
5880 			case PROCESSOR_RUNNING:
5881 			case PROCESSOR_DISPATCHING:
5882 				/*
5883 				 * Hint is for an active CPU. This fast-path allows
5884 				 * realtime threads to preempt non-realtime threads
5885 				 * to regain their previous executing processor.
5886 				 */
5887 				if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5888 					if (processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5889 						return processor;
5890 					}
5891 					processor = PROCESSOR_NULL;
5892 				}
5893 
5894 				/* Otherwise, use hint as part of search below */
5895 				break;
5896 			default:
5897 				processor = PROCESSOR_NULL;
5898 				break;
5899 			}
5900 		}
5901 	}
5902 
5903 	/*
5904 	 * Iterate through the processor sets to locate
5905 	 * an appropriate processor. Seed results with
5906 	 * a last-processor hint, if available, so that
5907 	 * a search must find something strictly better
5908 	 * to replace it.
5909 	 */
5910 
5911 	integer_t lowest_priority = MAXPRI + 1;
5912 	integer_t lowest_count = INT_MAX;
5913 	processor_t lp_processor = PROCESSOR_NULL;
5914 	processor_t lc_processor = PROCESSOR_NULL;
5915 
5916 	if (processor != PROCESSOR_NULL) {
5917 		/* All other states should be enumerated above. */
5918 		assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
5919 		assert(thread->sched_pri < BASEPRI_RTQUEUES);
5920 
5921 		lowest_priority = processor->current_pri;
5922 		lp_processor = processor;
5923 
5924 		lowest_count = SCHED(processor_runq_count)(processor);
5925 		lc_processor = processor;
5926 	}
5927 
5928 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5929 		pset_node_t node = pset->node;
5930 		bool include_ast_urgent_pending_cpus = false;
5931 		cpumap_t ast_urgent_pending;
5932 try_again:
5933 		ast_urgent_pending = 0;
5934 		pset = change_locked_pset(pset, starting_pset);
5935 		do {
5936 			cpumap_t available_map = pset_available_cpumap(pset);
5937 			if (available_map == 0) {
5938 				goto no_available_cpus;
5939 			}
5940 
5941 			processor = choose_processor_for_realtime_thread(pset, PROCESSOR_NULL, false);
5942 			if (processor) {
5943 				return processor;
5944 			}
5945 
5946 			processor = choose_furthest_deadline_processor_for_realtime_thread(pset, thread->sched_pri, thread->realtime.deadline, PROCESSOR_NULL, false, include_ast_urgent_pending_cpus);
5947 			if (processor) {
5948 				/*
5949 				 * Instead of looping through all the psets to find the global
5950 				 * furthest deadline processor, preempt the first candidate found.
5951 				 * The preempted thread will then find any other available far deadline
5952 				 * processors to preempt.
5953 				 */
5954 				return processor;
5955 			}
5956 
5957 			ast_urgent_pending |= pset->pending_AST_URGENT_cpu_mask;
5958 
5959 			if (rt_runq_count(pset) < lowest_count) {
5960 				int cpuid = bit_first(available_map);
5961 				assert(cpuid >= 0);
5962 				lc_processor = processor_array[cpuid];
5963 				lowest_count = rt_runq_count(pset);
5964 			}
5965 
5966 no_available_cpus:
5967 			nset = next_pset(pset);
5968 
5969 			if (nset != starting_pset) {
5970 				pset = change_locked_pset(pset, nset);
5971 			}
5972 		} while (nset != starting_pset);
5973 
5974 
5975 		/* Short cut for single pset nodes */
5976 		if (bit_count(node->pset_map) == 1) {
5977 			if (lc_processor) {
5978 				pset_assert_locked(lc_processor->processor_set);
5979 				return lc_processor;
5980 			}
5981 		} else {
5982 			if (ast_urgent_pending && !include_ast_urgent_pending_cpus) {
5983 				/* See the comment in choose_furthest_deadline_processor_for_realtime_thread() */
5984 				include_ast_urgent_pending_cpus = true;
5985 				goto try_again;
5986 			}
5987 		}
5988 
5989 		processor = lc_processor;
5990 
5991 		if (processor) {
5992 			pset = change_locked_pset(pset, processor->processor_set);
5993 			/* Check that chosen processor is still usable */
5994 			cpumap_t available_map = pset_available_cpumap(pset);
5995 			if (bit_test(available_map, processor->cpu_id)) {
5996 				return processor;
5997 			}
5998 
5999 			/* processor is no longer usable */
6000 			processor = PROCESSOR_NULL;
6001 		}
6002 
6003 		pset_assert_locked(pset);
6004 		pset_unlock(pset);
6005 		return PROCESSOR_NULL;
6006 	}
6007 
6008 	/* No realtime threads from this point on */
6009 	assert(thread->sched_pri < BASEPRI_RTQUEUES);
6010 
6011 	do {
6012 		/*
6013 		 * Choose an idle processor, in pset traversal order
6014 		 */
6015 		uint64_t idle_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->recommended_bitmask);
6016 		uint64_t preferred_idle_map = idle_map & pset->perfcontrol_cpu_preferred_bitmask;
6017 
6018 		/* there shouldn't be a pending AST if the processor is idle */
6019 		assert((idle_map & pset->pending_AST_URGENT_cpu_mask) == 0);
6020 
6021 		/*
6022 		 * Look at the preferred cores first.
6023 		 */
6024 		int cpuid = lsb_next(preferred_idle_map, pset->cpu_preferred_last_chosen);
6025 		if (cpuid < 0) {
6026 			cpuid = lsb_first(preferred_idle_map);
6027 		}
6028 		if (cpuid >= 0) {
6029 			processor = processor_array[cpuid];
6030 			pset->cpu_preferred_last_chosen = cpuid;
6031 			return processor;
6032 		}
6033 
6034 		/*
6035 		 * Look at the cores that don't need to be avoided next.
6036 		 */
6037 		if (pset->perfcontrol_cpu_migration_bitmask != 0) {
6038 			uint64_t non_avoided_idle_map = idle_map & ~pset->perfcontrol_cpu_migration_bitmask;
6039 			cpuid = lsb_next(non_avoided_idle_map, pset->cpu_preferred_last_chosen);
6040 			if (cpuid < 0) {
6041 				cpuid = lsb_first(non_avoided_idle_map);
6042 			}
6043 			if (cpuid >= 0) {
6044 				processor = processor_array[cpuid];
6045 				pset->cpu_preferred_last_chosen = cpuid;
6046 				return processor;
6047 			}
6048 		}
6049 
6050 		/*
6051 		 * Fall back to any remaining idle cores if none of the preferred ones and non-avoided ones are available.
6052 		 */
6053 		cpuid = lsb_first(idle_map);
6054 		if (cpuid >= 0) {
6055 			processor = processor_array[cpuid];
6056 			return processor;
6057 		}
6058 
6059 		/*
6060 		 * Otherwise, enumerate active and idle processors to find primary candidates
6061 		 * with lower priority/etc.
6062 		 */
6063 
6064 		uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
6065 		    pset->recommended_bitmask &
6066 		    ~pset->pending_AST_URGENT_cpu_mask);
6067 
6068 		if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
6069 			active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
6070 		}
6071 
6072 		active_map = bit_ror64(active_map, (pset->last_chosen + 1));
6073 		for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
6074 			cpuid = ((rotid + pset->last_chosen + 1) & 63);
6075 			processor = processor_array[cpuid];
6076 
6077 			integer_t cpri = processor->current_pri;
6078 			if (cpri < lowest_priority) {
6079 				lowest_priority = cpri;
6080 				lp_processor = processor;
6081 			}
6082 
6083 			integer_t ccount = SCHED(processor_runq_count)(processor);
6084 			if (ccount < lowest_count) {
6085 				lowest_count = ccount;
6086 				lc_processor = processor;
6087 			}
6088 		}
6089 
6090 		/*
6091 		 * We prefer preempting a lower priority active processor over directly
6092 		 * waking up an idle secondary.
6093 		 * The preempted thread will then find the idle secondary.
6094 		 */
6095 		if (thread->sched_pri > lowest_priority) {
6096 			pset->last_chosen = lp_processor->cpu_id;
6097 			return lp_processor;
6098 		}
6099 
6100 		/*
6101 		 * lc_processor is used to indicate the best processor set run queue
6102 		 * on which to enqueue a thread when all available CPUs are busy with
6103 		 * higher priority threads, so try to make sure it is initialized.
6104 		 */
6105 		if (lc_processor == PROCESSOR_NULL) {
6106 			cpumap_t available_map = pset_available_cpumap(pset);
6107 			cpuid = lsb_first(available_map);
6108 			if (cpuid >= 0) {
6109 				lc_processor = processor_array[cpuid];
6110 				lowest_count = SCHED(processor_runq_count)(lc_processor);
6111 			}
6112 		}
6113 
6114 		/*
6115 		 * Move onto the next processor set.
6116 		 *
6117 		 * If all primary processors in this pset are running a higher
6118 		 * priority thread, move on to next pset. Only when we have
6119 		 * exhausted the search for primary processors do we
6120 		 * fall back to secondaries.
6121 		 */
6122 #if CONFIG_SCHED_EDGE
6123 		/*
6124 		 * The edge scheduler expects a CPU to be selected from the pset it passed in
6125 		 * as the starting pset for non-RT workloads. The edge migration algorithm
6126 		 * should already have considered idle CPUs and loads to decide the starting_pset;
6127 		 * which means that this loop can be short-circuted.
6128 		 */
6129 		nset = starting_pset;
6130 #else /* CONFIG_SCHED_EDGE */
6131 		nset = next_pset(pset);
6132 #endif /* CONFIG_SCHED_EDGE */
6133 
6134 		if (nset != starting_pset) {
6135 			pset = change_locked_pset(pset, nset);
6136 		}
6137 	} while (nset != starting_pset);
6138 
6139 	processor = lc_processor;
6140 
6141 	if (processor) {
6142 		pset = change_locked_pset(pset, processor->processor_set);
6143 		/* Check that chosen processor is still usable */
6144 		cpumap_t available_map = pset_available_cpumap(pset);
6145 		if (bit_test(available_map, processor->cpu_id)) {
6146 			pset->last_chosen = processor->cpu_id;
6147 			return processor;
6148 		}
6149 
6150 		/* processor is no longer usable */
6151 		processor = PROCESSOR_NULL;
6152 	}
6153 
6154 	pset_assert_locked(pset);
6155 	pset_unlock(pset);
6156 	return PROCESSOR_NULL;
6157 }
6158 #endif /* CHOOSE_PROCESSOR_SMT*/
6159 
6160 
6161 
6162 /*
6163  * Default implementation of SCHED(choose_node)()
6164  * for single node systems
6165  */
6166 pset_node_t
sched_choose_node(__unused thread_t thread)6167 sched_choose_node(__unused thread_t thread)
6168 {
6169 	return &pset_node0;
6170 }
6171 
6172 /*
6173  *	choose_starting_pset:
6174  *
6175  *	Choose a starting processor set for the thread.
6176  *	May return a processor hint within the pset.
6177  *
6178  *	Returns a starting processor set, to be used by
6179  *      choose_processor.
6180  *
6181  *	The thread must be locked.  The resulting pset is unlocked on return,
6182  *      and is chosen without taking any pset locks.
6183  */
6184 processor_set_t
choose_starting_pset(pset_node_t node,thread_t thread,processor_t * processor_hint)6185 choose_starting_pset(pset_node_t node, thread_t thread, processor_t *processor_hint)
6186 {
6187 	processor_set_t pset;
6188 	processor_t processor = PROCESSOR_NULL;
6189 
6190 	if (thread->affinity_set != AFFINITY_SET_NULL) {
6191 		/*
6192 		 * Use affinity set policy hint.
6193 		 */
6194 		pset = thread->affinity_set->aset_pset;
6195 	} else if (thread->last_processor != PROCESSOR_NULL) {
6196 		/*
6197 		 *	Simple (last processor) affinity case.
6198 		 */
6199 		processor = thread->last_processor;
6200 		pset = processor->processor_set;
6201 	} else {
6202 		/*
6203 		 *	No Affinity case:
6204 		 *
6205 		 *	Utilitize a per task hint to spread threads
6206 		 *	among the available processor sets.
6207 		 * NRG this seems like the wrong thing to do.
6208 		 * See also task->pset_hint = pset in thread_setrun()
6209 		 */
6210 		pset = get_threadtask(thread)->pset_hint;
6211 		if (pset == PROCESSOR_SET_NULL) {
6212 			pset = current_processor()->processor_set;
6213 		}
6214 
6215 		pset = choose_next_pset(pset);
6216 	}
6217 
6218 	if (!bit_test(node->pset_map, pset->pset_id)) {
6219 		/* pset is not from this node so choose one that is */
6220 		int id = lsb_first(node->pset_map);
6221 		if (id < 0) {
6222 			/* startup race, so check again under the node lock */
6223 			lck_spin_lock(&pset_node_lock);
6224 			if (bit_test(node->pset_map, pset->pset_id)) {
6225 				id = pset->pset_id;
6226 			} else {
6227 				id = lsb_first(node->pset_map);
6228 			}
6229 			lck_spin_unlock(&pset_node_lock);
6230 		}
6231 		assert(id >= 0);
6232 		pset = pset_array[id];
6233 	}
6234 
6235 	if (bit_count(node->pset_map) == 1) {
6236 		/* Only a single pset in this node */
6237 		goto out;
6238 	}
6239 
6240 	bool avoid_cpu0 = false;
6241 
6242 #if defined(__x86_64__)
6243 	if ((thread->sched_pri >= BASEPRI_RTQUEUES) && sched_avoid_cpu0) {
6244 		/* Avoid the pset containing cpu0 */
6245 		avoid_cpu0 = true;
6246 		/* Assert that cpu0 is in pset0.  I expect this to be true on __x86_64__ */
6247 		assert(bit_test(pset_array[0]->cpu_bitmask, 0));
6248 	}
6249 #endif
6250 
6251 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
6252 		pset_map_t rt_target_map;
6253 #if CONFIG_SCHED_SMT
6254 		rt_target_map = atomic_load(&node->pset_non_rt_primary_map);
6255 		if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
6256 			if (avoid_cpu0) {
6257 				rt_target_map = bit_ror64(rt_target_map, 1);
6258 			}
6259 			int rotid = lsb_first(rt_target_map);
6260 			if (rotid >= 0) {
6261 				int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
6262 				pset = pset_array[id];
6263 				goto out;
6264 			}
6265 		}
6266 		if (!pset->is_SMT || !sched_allow_rt_smt) {
6267 			/* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
6268 			goto out;
6269 		}
6270 #endif /* CONFIG_SCHED_SMT*/
6271 		rt_target_map = atomic_load(&node->pset_non_rt_map);
6272 		if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
6273 			if (avoid_cpu0) {
6274 				rt_target_map = bit_ror64(rt_target_map, 1);
6275 			}
6276 			int rotid = lsb_first(rt_target_map);
6277 			if (rotid >= 0) {
6278 				int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
6279 				pset = pset_array[id];
6280 				goto out;
6281 			}
6282 		}
6283 		/* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
6284 	} else {
6285 		pset_map_t idle_map = atomic_load(&node->pset_idle_map);
6286 		if (!bit_test(idle_map, pset->pset_id)) {
6287 			int next_idle_pset_id = lsb_first(idle_map);
6288 			if (next_idle_pset_id >= 0) {
6289 				pset = pset_array[next_idle_pset_id];
6290 			}
6291 		}
6292 	}
6293 
6294 out:
6295 	if ((processor != PROCESSOR_NULL) && (processor->processor_set != pset)) {
6296 		processor = PROCESSOR_NULL;
6297 	}
6298 	if (processor != PROCESSOR_NULL) {
6299 		*processor_hint = processor;
6300 	}
6301 
6302 	assert(pset != NULL);
6303 	return pset;
6304 }
6305 
6306 /*
6307  *	thread_setrun:
6308  *
6309  *	Dispatch thread for execution, onto an idle
6310  *	processor or run queue, and signal a preemption
6311  *	as appropriate.
6312  *
6313  *	Thread must be locked.
6314  */
6315 void
thread_setrun(thread_t thread,sched_options_t options)6316 thread_setrun(
6317 	thread_t                        thread,
6318 	sched_options_t                 options)
6319 {
6320 	processor_t                     processor = PROCESSOR_NULL;
6321 	processor_set_t         pset;
6322 
6323 	assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
6324 	thread_assert_runq_null(thread);
6325 
6326 #if CONFIG_PREADOPT_TG
6327 	/* We know that the thread is not in the runq by virtue of being in this
6328 	 * function and the thread is not self since we are running. We can safely
6329 	 * resolve the thread group hierarchy and modify the thread's thread group
6330 	 * here. */
6331 	thread_resolve_and_enforce_thread_group_hierarchy_if_needed(thread);
6332 #endif
6333 
6334 	/*
6335 	 *	Update priority if needed.
6336 	 */
6337 	if (SCHED(can_update_priority)(thread)) {
6338 		SCHED(update_priority)(thread);
6339 	}
6340 	thread->sfi_class = sfi_thread_classify(thread);
6341 
6342 	if (thread->bound_processor == PROCESSOR_NULL) {
6343 		/*
6344 		 * Unbound case.
6345 		 *
6346 		 * Usually, this loop will only be executed once,
6347 		 * but if CLPC derecommends a processor after it has been chosen,
6348 		 * or if a processor is shut down after it is chosen,
6349 		 * choose_processor() may return NULL, so a retry
6350 		 * may be necessary.  A single retry will usually
6351 		 * be enough, and we can't afford to retry too many times
6352 		 * because interrupts are disabled.
6353 		 */
6354 #define CHOOSE_PROCESSOR_MAX_RETRIES 3
6355 		for (int retry = 0; retry <= CHOOSE_PROCESSOR_MAX_RETRIES; retry++) {
6356 			processor_t processor_hint = PROCESSOR_NULL;
6357 			pset_node_t node = SCHED(choose_node)(thread);
6358 			processor_set_t starting_pset = choose_starting_pset(node, thread, &processor_hint);
6359 
6360 			pset_lock(starting_pset);
6361 
6362 			processor = SCHED(choose_processor)(starting_pset, processor_hint, thread);
6363 			if (processor != PROCESSOR_NULL) {
6364 				pset = processor->processor_set;
6365 				pset_assert_locked(pset);
6366 				break;
6367 			}
6368 		}
6369 		/*
6370 		 * If choose_processor() still returns NULL,
6371 		 * which is very unlikely, we need a fallback.
6372 		 */
6373 		if (processor == PROCESSOR_NULL) {
6374 			bool unlock_available_cores_lock = false;
6375 			if (sched_all_cpus_offline()) {
6376 				/*
6377 				 * There are no available processors
6378 				 * because we're in final system shutdown.
6379 				 * Enqueue on the master processor and we'll
6380 				 * handle it when it powers back up.
6381 				 */
6382 				processor = master_processor;
6383 			} else if (support_bootcpu_shutdown) {
6384 				/*
6385 				 * Grab the sched_available_cores_lock to select
6386 				 * some available processor and prevent it from
6387 				 * becoming offline while we enqueue the thread.
6388 				 *
6389 				 * This is very close to a lock inversion, but
6390 				 * places that do call thread_setrun with this
6391 				 * lock held know that the current cpu will be
6392 				 * schedulable, so we won't fall out of
6393 				 * choose_processor.
6394 				 */
6395 				simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
6396 				unlock_available_cores_lock = true;
6397 
6398 				int last_resort_cpu = sched_last_resort_cpu();
6399 
6400 				processor = processor_array[last_resort_cpu];
6401 			} else {
6402 				/*
6403 				 * The master processor is never shut down, always safe to choose.
6404 				 */
6405 				processor = master_processor;
6406 			}
6407 			pset = processor->processor_set;
6408 			pset_lock(pset);
6409 			assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
6410 			if (unlock_available_cores_lock) {
6411 				simple_unlock(&sched_available_cores_lock);
6412 			}
6413 		}
6414 		task_t task = get_threadtask(thread);
6415 		if (!(task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE)) {
6416 			task->pset_hint = pset; /* NRG this is done without holding the task lock */
6417 		}
6418 		SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
6419 		    (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
6420 		assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
6421 	} else {
6422 		/*
6423 		 *	Bound case:
6424 		 *
6425 		 *	Unconditionally dispatch on the processor.
6426 		 */
6427 		processor = thread->bound_processor;
6428 		pset = processor->processor_set;
6429 		pset_lock(pset);
6430 
6431 		SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
6432 		    (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
6433 	}
6434 
6435 	/*
6436 	 *	Dispatch the thread on the chosen processor.
6437 	 *	TODO: This should be based on sched_mode, not sched_pri
6438 	 */
6439 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
6440 		realtime_setrun(processor, thread);
6441 	} else {
6442 		processor_setrun(processor, thread, options);
6443 	}
6444 	/* pset is now unlocked */
6445 	if (thread->bound_processor == PROCESSOR_NULL) {
6446 		SCHED(check_spill)(pset, thread);
6447 	}
6448 }
6449 
6450 processor_set_t
task_choose_pset(task_t task)6451 task_choose_pset(
6452 	task_t          task)
6453 {
6454 	processor_set_t         pset = task->pset_hint;
6455 
6456 	if (pset != PROCESSOR_SET_NULL) {
6457 		pset = choose_next_pset(pset);
6458 	}
6459 
6460 	return pset;
6461 }
6462 
6463 /*
6464  *	Check for a preemption point in
6465  *	the current context.
6466  *
6467  *	Called at splsched with thread locked.
6468  */
6469 ast_t
csw_check(thread_t thread,processor_t processor,ast_t check_reason)6470 csw_check(
6471 	thread_t                thread,
6472 	processor_t             processor,
6473 	ast_t                   check_reason)
6474 {
6475 	processor_set_t pset = processor->processor_set;
6476 
6477 	assert(thread == processor->active_thread);
6478 
6479 	pset_lock(pset);
6480 
6481 	processor_state_update_from_thread(processor, thread, true);
6482 
6483 	ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
6484 
6485 	/* Acknowledge the IPI if we decided not to preempt */
6486 
6487 	if ((preempt & AST_URGENT) == 0) {
6488 		if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
6489 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 8);
6490 		}
6491 	}
6492 
6493 	if ((preempt & AST_PREEMPT) == 0) {
6494 		bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
6495 	}
6496 
6497 	pset_unlock(pset);
6498 
6499 	return update_pending_nonurgent_preemption(processor, preempt);
6500 }
6501 
6502 void
clear_pending_nonurgent_preemption(processor_t processor)6503 clear_pending_nonurgent_preemption(processor_t processor)
6504 {
6505 	if (!processor->pending_nonurgent_preemption) {
6506 		return;
6507 	}
6508 
6509 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE) | DBG_FUNC_END);
6510 
6511 	processor->pending_nonurgent_preemption = false;
6512 	running_timer_clear(processor, RUNNING_TIMER_PREEMPT);
6513 }
6514 
6515 ast_t
update_pending_nonurgent_preemption(processor_t processor,ast_t reason)6516 update_pending_nonurgent_preemption(processor_t processor, ast_t reason)
6517 {
6518 	if ((reason & (AST_URGENT | AST_PREEMPT)) != (AST_PREEMPT)) {
6519 		clear_pending_nonurgent_preemption(processor);
6520 		return reason;
6521 	}
6522 
6523 	if (nonurgent_preemption_timer_abs == 0) {
6524 		/* Preemption timer not enabled */
6525 		return reason;
6526 	}
6527 
6528 	if (current_thread()->state & TH_IDLE) {
6529 		/* idle threads don't need nonurgent preemption */
6530 		return reason;
6531 	}
6532 
6533 	if (processor->pending_nonurgent_preemption) {
6534 		/* Timer is already armed, no need to do it again */
6535 		return reason;
6536 	}
6537 
6538 	if (ml_did_interrupt_userspace()) {
6539 		/*
6540 		 * We're preempting userspace here, so we don't need
6541 		 * to defer the preemption.  Force AST_URGENT
6542 		 * so that we can avoid arming this timer without risking
6543 		 * ast_taken_user deciding to spend too long in kernel
6544 		 * space to handle other ASTs.
6545 		 */
6546 
6547 		return reason | AST_URGENT;
6548 	}
6549 
6550 	/*
6551 	 * We've decided to do a nonurgent preemption when running in
6552 	 * kernelspace. We defer the preemption until reaching userspace boundary
6553 	 * to give a grace period for locks etc to be dropped and to reach
6554 	 * a clean preemption point, so that the preempting thread doesn't
6555 	 * always immediately hit the lock that the waking thread still holds.
6556 	 *
6557 	 * Arm a timer to enforce that the preemption executes within a bounded
6558 	 * time if the thread doesn't block or return to userspace quickly.
6559 	 */
6560 
6561 	processor->pending_nonurgent_preemption = true;
6562 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE) | DBG_FUNC_START,
6563 	    reason);
6564 
6565 	uint64_t now = mach_absolute_time();
6566 
6567 	uint64_t deadline = now + nonurgent_preemption_timer_abs;
6568 
6569 	running_timer_enter(processor, RUNNING_TIMER_PREEMPT, NULL,
6570 	    now, deadline);
6571 
6572 	return reason;
6573 }
6574 
6575 /*
6576  * Check for preemption at splsched with
6577  * pset locked and processor as the current
6578  * processor.
6579  */
6580 ast_t
csw_check_locked(thread_t thread,processor_t processor,processor_set_t pset,ast_t check_reason)6581 csw_check_locked(
6582 	thread_t                thread,
6583 	processor_t             processor,
6584 	processor_set_t         pset,
6585 	ast_t                   check_reason)
6586 {
6587 	assert(processor == current_processor());
6588 	/*
6589 	 * If the current thread is running on a processor that is no longer recommended,
6590 	 * urgently preempt it, at which point thread_select() should
6591 	 * try to idle the processor and re-dispatch the thread to a recommended processor.
6592 	 */
6593 	if (!processor->is_recommended) {
6594 		return check_reason | AST_PREEMPT | AST_URGENT;
6595 	}
6596 
6597 	if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
6598 		return check_reason | AST_PREEMPT | AST_URGENT;
6599 	}
6600 
6601 	if (rt_runq_count(pset) > 0) {
6602 		if ((rt_runq_priority(pset) > processor->current_pri) || !processor->first_timeslice) {
6603 			return check_reason | AST_PREEMPT | AST_URGENT;
6604 		} else if (deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < processor->deadline) {
6605 			return check_reason | AST_PREEMPT | AST_URGENT;
6606 		} else {
6607 			return check_reason | AST_PREEMPT;
6608 		}
6609 	}
6610 
6611 	ast_t result = SCHED(processor_csw_check)(processor);
6612 	if (result != AST_NONE) {
6613 		return check_reason | result | (thread_is_eager_preempt(thread) ? AST_URGENT : AST_NONE);
6614 	}
6615 
6616 	/*
6617 	 * Same for avoid-processor
6618 	 *
6619 	 * TODO: Should these set AST_REBALANCE?
6620 	 */
6621 	if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread, check_reason)) {
6622 		return check_reason | AST_PREEMPT;
6623 	}
6624 
6625 #if CONFIG_SCHED_SMT
6626 	/*
6627 	 * Even though we could continue executing on this processor, a
6628 	 * secondary SMT core should try to shed load to another primary core.
6629 	 *
6630 	 * TODO: Should this do the same check that thread_select does? i.e.
6631 	 * if no bound threads target this processor, and idle primaries exist, preempt
6632 	 * The case of RT threads existing is already taken care of above
6633 	 */
6634 
6635 	if (processor->current_pri < BASEPRI_RTQUEUES &&
6636 	    processor->processor_primary != processor) {
6637 		return check_reason | AST_PREEMPT;
6638 	}
6639 #endif /* CONFIG_SCHED_SMT*/
6640 
6641 	if (thread->state & TH_SUSP) {
6642 		return check_reason | AST_PREEMPT;
6643 	}
6644 
6645 #if CONFIG_SCHED_SFI
6646 	/*
6647 	 * Current thread may not need to be preempted, but maybe needs
6648 	 * an SFI wait?
6649 	 */
6650 	result = sfi_thread_needs_ast(thread, NULL);
6651 	if (result != AST_NONE) {
6652 		return result;
6653 	}
6654 #endif
6655 
6656 	return AST_NONE;
6657 }
6658 
6659 /*
6660  * Handle preemption IPI or IPI in response to setting an AST flag
6661  * Triggered by cause_ast_check
6662  * Called at splsched
6663  */
6664 void
ast_check(processor_t processor)6665 ast_check(processor_t processor)
6666 {
6667 	smr_ack_ipi();
6668 
6669 	if (processor->state != PROCESSOR_RUNNING) {
6670 		return;
6671 	}
6672 
6673 	SCHED_DEBUG_AST_CHECK_KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED,
6674 	    MACH_SCHED_AST_CHECK) | DBG_FUNC_START);
6675 
6676 	thread_t thread = processor->active_thread;
6677 
6678 	assert(thread == current_thread());
6679 
6680 	/*
6681 	 * Pairs with task_restartable_ranges_synchronize
6682 	 */
6683 	thread_lock(thread);
6684 
6685 	thread_reset_pcs_ack_IPI(thread);
6686 
6687 	/*
6688 	 * Propagate thread ast to processor.
6689 	 * (handles IPI in response to setting AST flag)
6690 	 */
6691 	ast_propagate(thread);
6692 
6693 	/*
6694 	 * Stash the old urgency and perfctl values to find out if
6695 	 * csw_check updates them.
6696 	 */
6697 	thread_urgency_t old_urgency = processor->current_urgency;
6698 	perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
6699 
6700 	ast_t preempt;
6701 
6702 	if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
6703 		ast_on(preempt);
6704 	}
6705 
6706 	if (old_urgency != processor->current_urgency) {
6707 		/*
6708 		 * Urgency updates happen with the thread lock held (ugh).
6709 		 * TODO: This doesn't notice QoS changes...
6710 		 */
6711 		uint64_t urgency_param1, urgency_param2;
6712 
6713 		thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
6714 		thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
6715 	}
6716 
6717 	thread_unlock(thread);
6718 
6719 	if (old_perfctl_class != processor->current_perfctl_class) {
6720 		/*
6721 		 * We updated the perfctl class of this thread from another core.
6722 		 * Let CLPC know that the currently running thread has a new
6723 		 * class.
6724 		 */
6725 
6726 		machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
6727 		    mach_approximate_time(), 0, thread);
6728 	}
6729 
6730 	SCHED_DEBUG_AST_CHECK_KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED,
6731 	    MACH_SCHED_AST_CHECK) | DBG_FUNC_END, preempt);
6732 }
6733 
6734 
6735 void
thread_preempt_expire(timer_call_param_t p0,__unused timer_call_param_t p1)6736 thread_preempt_expire(
6737 	timer_call_param_t      p0,
6738 	__unused timer_call_param_t      p1)
6739 {
6740 	processor_t processor = p0;
6741 
6742 	assert(processor == current_processor());
6743 	assert(p1 == NULL);
6744 
6745 	thread_t thread = current_thread();
6746 
6747 	/*
6748 	 * This is set and cleared by the current core, so we will
6749 	 * never see a race with running timer expiration
6750 	 */
6751 	assert(processor->pending_nonurgent_preemption);
6752 
6753 	clear_pending_nonurgent_preemption(processor);
6754 
6755 	thread_lock(thread);
6756 
6757 	/*
6758 	 * Check again to see if it's still worth a
6759 	 * context switch, but this time force enable kernel preemption
6760 	 */
6761 
6762 	ast_t preempt = csw_check(thread, processor, AST_URGENT);
6763 
6764 	if (preempt) {
6765 		ast_on(preempt);
6766 	}
6767 
6768 	thread_unlock(thread);
6769 
6770 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE), preempt);
6771 }
6772 
6773 
6774 /*
6775  *	set_sched_pri:
6776  *
6777  *	Set the scheduled priority of the specified thread.
6778  *
6779  *	This may cause the thread to change queues.
6780  *
6781  *	Thread must be locked.
6782  */
6783 void
set_sched_pri(thread_t thread,int16_t new_priority,set_sched_pri_options_t options)6784 set_sched_pri(
6785 	thread_t        thread,
6786 	int16_t         new_priority,
6787 	set_sched_pri_options_t options)
6788 {
6789 	bool is_current_thread = (thread == current_thread());
6790 	bool removed_from_runq = false;
6791 	bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
6792 
6793 	int16_t old_priority = thread->sched_pri;
6794 
6795 	/* If we're already at this priority, no need to mess with the runqueue */
6796 	if (new_priority == old_priority) {
6797 #if CONFIG_SCHED_CLUTCH
6798 		/* For the first thread in the system, the priority is correct but
6799 		 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
6800 		 * scheduler relies on the bucket being set for all threads, update
6801 		 * its bucket here.
6802 		 */
6803 		if (thread->th_sched_bucket == TH_BUCKET_RUN) {
6804 			assert(thread == vm_pageout_scan_thread);
6805 			SCHED(update_thread_bucket)(thread);
6806 		}
6807 #endif /* CONFIG_SCHED_CLUTCH */
6808 
6809 		return;
6810 	}
6811 
6812 	if (is_current_thread) {
6813 		assert(thread->state & TH_RUN);
6814 		thread_assert_runq_null(thread);
6815 	} else {
6816 		removed_from_runq = thread_run_queue_remove(thread);
6817 	}
6818 
6819 	thread->sched_pri = new_priority;
6820 
6821 #if CONFIG_SCHED_CLUTCH
6822 	/*
6823 	 * Since for the clutch scheduler, the thread's bucket determines its runq
6824 	 * in the hierarchy it is important to update the bucket when the thread
6825 	 * lock is held and the thread has been removed from the runq hierarchy.
6826 	 */
6827 	SCHED(update_thread_bucket)(thread);
6828 
6829 #endif /* CONFIG_SCHED_CLUTCH */
6830 
6831 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
6832 	    (uintptr_t)thread_tid(thread),
6833 	    thread->base_pri,
6834 	    thread->sched_pri,
6835 	    thread->sched_usage,
6836 	    0);
6837 
6838 	if (removed_from_runq) {
6839 		thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
6840 	} else if (is_current_thread) {
6841 		processor_t processor = thread->last_processor;
6842 		assert(processor == current_processor());
6843 
6844 		thread_urgency_t old_urgency = processor->current_urgency;
6845 
6846 		/*
6847 		 * When dropping in priority, check if the thread no longer belongs on core.
6848 		 * If a thread raises its own priority, don't aggressively rebalance it.
6849 		 * <rdar://problem/31699165>
6850 		 *
6851 		 * csw_check does a processor_state_update_from_thread, but
6852 		 * we should do our own if we're being lazy.
6853 		 */
6854 		if (!lazy_update && new_priority < old_priority) {
6855 			ast_t preempt;
6856 
6857 			if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
6858 				ast_on(preempt);
6859 			}
6860 		} else {
6861 			processor_state_update_from_thread(processor, thread, false);
6862 		}
6863 
6864 		/*
6865 		 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
6866 		 * class alterations from user space to occur relatively infrequently, hence
6867 		 * those are lazily handled. QoS classes have distinct priority bands, and QoS
6868 		 * inheritance is expected to involve priority changes.
6869 		 */
6870 		if (processor->current_urgency != old_urgency) {
6871 			uint64_t urgency_param1, urgency_param2;
6872 
6873 			thread_urgency_t new_urgency = thread_get_urgency(thread,
6874 			    &urgency_param1, &urgency_param2);
6875 
6876 			thread_tell_urgency(new_urgency, urgency_param1,
6877 			    urgency_param2, 0, thread);
6878 		}
6879 
6880 		/* TODO: only call this if current_perfctl_class changed */
6881 		uint64_t ctime = mach_approximate_time();
6882 		machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
6883 	} else if (thread->state & TH_RUN) {
6884 		processor_t processor = thread->last_processor;
6885 
6886 		if (!lazy_update &&
6887 		    processor != PROCESSOR_NULL &&
6888 		    processor != current_processor() &&
6889 		    processor->active_thread == thread) {
6890 			cause_ast_check(processor);
6891 		}
6892 	}
6893 }
6894 
6895 /*
6896  * thread_run_queue_remove_for_handoff
6897  *
6898  * Pull a thread or its (recursive) push target out of the runqueue
6899  * so that it is ready for thread_run()
6900  *
6901  * Called at splsched
6902  *
6903  * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6904  * This may be different than the thread that was passed in.
6905  */
6906 thread_t
thread_run_queue_remove_for_handoff(thread_t thread)6907 thread_run_queue_remove_for_handoff(thread_t thread)
6908 {
6909 	thread_t pulled_thread = THREAD_NULL;
6910 
6911 	thread_lock(thread);
6912 
6913 	/*
6914 	 * Check that the thread is not bound to a different processor,
6915 	 * NO_SMT flag is not set on the thread, cluster type of
6916 	 * processor matches with thread if the thread is pinned to a
6917 	 * particular cluster and that realtime is not involved.
6918 	 *
6919 	 * Next, pull it off its run queue.  If it doesn't come, it's not eligible.
6920 	 */
6921 	processor_t processor = current_processor();
6922 	if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6923 #if CONFIG_SCHED_SMT
6924 	    && (!thread_no_smt(thread))
6925 #endif /* CONFIG_SCHED_SMT */
6926 	    && (processor->current_pri < BASEPRI_RTQUEUES)
6927 	    && (thread->sched_pri < BASEPRI_RTQUEUES)
6928 #if __AMP__
6929 	    && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6930 	    processor->processor_set->pset_id == thread->th_bound_cluster_id)
6931 #endif /* __AMP__ */
6932 	    ) {
6933 		if (thread_run_queue_remove(thread)) {
6934 			pulled_thread = thread;
6935 		}
6936 	}
6937 
6938 	thread_unlock(thread);
6939 
6940 	return pulled_thread;
6941 }
6942 
6943 /*
6944  * thread_prepare_for_handoff
6945  *
6946  * Make the thread ready for handoff.
6947  * If the thread was runnable then pull it off the runq, if the thread could
6948  * not be pulled, return NULL.
6949  *
6950  * If the thread was woken up from wait for handoff, make sure it is not bound to
6951  * different processor.
6952  *
6953  * Called at splsched
6954  *
6955  * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6956  * This may be different than the thread that was passed in.
6957  */
6958 thread_t
thread_prepare_for_handoff(thread_t thread,thread_handoff_option_t option)6959 thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option)
6960 {
6961 	thread_t pulled_thread = THREAD_NULL;
6962 
6963 	if (option & THREAD_HANDOFF_SETRUN_NEEDED) {
6964 		processor_t processor = current_processor();
6965 		thread_lock(thread);
6966 
6967 		/*
6968 		 * Check that the thread is not bound to a different processor,
6969 		 * NO_SMT flag is not set on the thread and cluster type of
6970 		 * processor matches with thread if the thread is pinned to a
6971 		 * particular cluster. Call setrun instead if above conditions
6972 		 * are not satisfied.
6973 		 */
6974 		if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6975 #if CONFIG_SCHED_SMT
6976 		    && (!thread_no_smt(thread))
6977 #endif /* CONFIG_SCHED_SMT */
6978 #if __AMP__
6979 		    && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6980 		    processor->processor_set->pset_id == thread->th_bound_cluster_id)
6981 #endif /* __AMP__ */
6982 		    ) {
6983 			pulled_thread = thread;
6984 		} else {
6985 			thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
6986 		}
6987 		thread_unlock(thread);
6988 	} else {
6989 		pulled_thread = thread_run_queue_remove_for_handoff(thread);
6990 	}
6991 
6992 	return pulled_thread;
6993 }
6994 
6995 /*
6996  *	thread_run_queue_remove:
6997  *
6998  *	Remove a thread from its current run queue and
6999  *	return TRUE if successful.
7000  *
7001  *	Thread must be locked.
7002  *
7003  *	If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
7004  *	run queues because the caller locked the thread.  Otherwise
7005  *	the thread is on a run queue, but could be chosen for dispatch
7006  *	and removed by another processor under a different lock, which
7007  *	will set thread->runq to PROCESSOR_NULL.
7008  *
7009  *	Hence the thread select path must not rely on anything that could
7010  *	be changed under the thread lock after calling this function,
7011  *	most importantly thread->sched_pri.
7012  */
7013 boolean_t
thread_run_queue_remove(thread_t thread)7014 thread_run_queue_remove(
7015 	thread_t        thread)
7016 {
7017 	boolean_t removed = FALSE;
7018 
7019 	if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
7020 		/* Thread isn't runnable */
7021 		thread_assert_runq_null(thread);
7022 		return FALSE;
7023 	}
7024 
7025 	processor_t processor = thread_get_runq(thread);
7026 	if (processor == PROCESSOR_NULL) {
7027 		/*
7028 		 * The thread is either not on the runq,
7029 		 * or is in the midst of being removed from the runq.
7030 		 *
7031 		 * runq is set to NULL under the pset lock, not the thread
7032 		 * lock, so the thread may still be in the process of being dequeued
7033 		 * from the runq. It will wait in invoke for the thread lock to be
7034 		 * dropped.
7035 		 */
7036 
7037 		return FALSE;
7038 	}
7039 
7040 	if (thread->sched_pri < BASEPRI_RTQUEUES) {
7041 		return SCHED(processor_queue_remove)(processor, thread);
7042 	}
7043 
7044 	processor_set_t pset = processor->processor_set;
7045 
7046 	pset_lock(pset);
7047 
7048 	/*
7049 	 * Must re-read the thread runq after acquiring the pset lock, in
7050 	 * case another core swooped in before us to dequeue the thread.
7051 	 */
7052 	if (thread_get_runq_locked(thread) != PROCESSOR_NULL) {
7053 		/*
7054 		 *	Thread is on the RT run queue and we have a lock on
7055 		 *	that run queue.
7056 		 */
7057 		rt_runq_remove(SCHED(rt_runq)(pset), thread);
7058 		pset_update_rt_stealable_state(pset);
7059 
7060 		removed = TRUE;
7061 	}
7062 
7063 	pset_unlock(pset);
7064 
7065 	return removed;
7066 }
7067 
7068 /*
7069  * Put the thread back where it goes after a thread_run_queue_remove
7070  *
7071  * Thread must have been removed under the same thread lock hold
7072  *
7073  * thread locked, at splsched
7074  */
7075 void
thread_run_queue_reinsert(thread_t thread,sched_options_t options)7076 thread_run_queue_reinsert(thread_t thread, sched_options_t options)
7077 {
7078 	thread_assert_runq_null(thread);
7079 	assert(thread->state & (TH_RUN));
7080 
7081 	thread_setrun(thread, options);
7082 }
7083 
7084 void
sys_override_cpu_throttle(boolean_t enable_override)7085 sys_override_cpu_throttle(boolean_t enable_override)
7086 {
7087 	if (enable_override) {
7088 		cpu_throttle_enabled = 0;
7089 	} else {
7090 		cpu_throttle_enabled = 1;
7091 	}
7092 }
7093 
7094 thread_urgency_t
thread_get_urgency(thread_t thread,uint64_t * arg1,uint64_t * arg2)7095 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
7096 {
7097 	uint64_t urgency_param1 = 0, urgency_param2 = 0;
7098 	task_t task = get_threadtask_early(thread);
7099 
7100 	thread_urgency_t urgency;
7101 
7102 	if (thread == NULL || task == TASK_NULL || (thread->state & TH_IDLE)) {
7103 		urgency_param1 = 0;
7104 		urgency_param2 = 0;
7105 
7106 		urgency = THREAD_URGENCY_NONE;
7107 	} else if (thread->sched_mode == TH_MODE_REALTIME) {
7108 		urgency_param1 = thread->realtime.period;
7109 		urgency_param2 = thread->realtime.deadline;
7110 
7111 		urgency = THREAD_URGENCY_REAL_TIME;
7112 	} else if (cpu_throttle_enabled &&
7113 	    (thread->sched_pri <= MAXPRI_THROTTLE) &&
7114 	    (thread->base_pri <= MAXPRI_THROTTLE)) {
7115 		/*
7116 		 * Threads that are running at low priority but are not
7117 		 * tagged with a specific QoS are separated out from
7118 		 * the "background" urgency. Performance management
7119 		 * subsystem can decide to either treat these threads
7120 		 * as normal threads or look at other signals like thermal
7121 		 * levels for optimal power/perf tradeoffs for a platform.
7122 		 */
7123 		boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
7124 		boolean_t task_is_suppressed = (proc_get_effective_task_policy(task, TASK_POLICY_SUP_ACTIVE) == 0x1);
7125 
7126 		/*
7127 		 * Background urgency applied when thread priority is
7128 		 * MAXPRI_THROTTLE or lower and thread is not promoted
7129 		 * and thread has a QoS specified
7130 		 */
7131 		urgency_param1 = thread->sched_pri;
7132 		urgency_param2 = thread->base_pri;
7133 
7134 		if (thread_lacks_qos && !task_is_suppressed) {
7135 			urgency = THREAD_URGENCY_LOWPRI;
7136 		} else {
7137 			urgency = THREAD_URGENCY_BACKGROUND;
7138 		}
7139 	} else {
7140 		/* For otherwise unclassified threads, report throughput QoS parameters */
7141 		urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
7142 		urgency_param2 = proc_get_effective_task_policy(task, TASK_POLICY_THROUGH_QOS);
7143 		urgency = THREAD_URGENCY_NORMAL;
7144 	}
7145 
7146 	if (arg1 != NULL) {
7147 		*arg1 = urgency_param1;
7148 	}
7149 	if (arg2 != NULL) {
7150 		*arg2 = urgency_param2;
7151 	}
7152 
7153 	return urgency;
7154 }
7155 
7156 perfcontrol_class_t
thread_get_perfcontrol_class(thread_t thread)7157 thread_get_perfcontrol_class(thread_t thread)
7158 {
7159 	/* Special case handling */
7160 	if (thread->state & TH_IDLE) {
7161 		return PERFCONTROL_CLASS_IDLE;
7162 	}
7163 
7164 	if (thread->sched_mode == TH_MODE_REALTIME) {
7165 		return PERFCONTROL_CLASS_REALTIME;
7166 	}
7167 
7168 	/* perfcontrol_class based on base_pri */
7169 	if (thread->base_pri <= MAXPRI_THROTTLE) {
7170 		return PERFCONTROL_CLASS_BACKGROUND;
7171 	} else if (thread->base_pri <= BASEPRI_UTILITY) {
7172 		return PERFCONTROL_CLASS_UTILITY;
7173 	} else if (thread->base_pri <= BASEPRI_DEFAULT) {
7174 		return PERFCONTROL_CLASS_NONUI;
7175 	} else if (thread->base_pri <= BASEPRI_USER_INITIATED) {
7176 		return PERFCONTROL_CLASS_USER_INITIATED;
7177 	} else if (thread->base_pri <= BASEPRI_FOREGROUND) {
7178 		return PERFCONTROL_CLASS_UI;
7179 	} else {
7180 		if (get_threadtask(thread) == kernel_task) {
7181 			/*
7182 			 * Classify Above UI kernel threads as PERFCONTROL_CLASS_KERNEL.
7183 			 * All other lower priority kernel threads should be treated
7184 			 * as regular threads for performance control purposes.
7185 			 */
7186 			return PERFCONTROL_CLASS_KERNEL;
7187 		}
7188 		return PERFCONTROL_CLASS_ABOVEUI;
7189 	}
7190 }
7191 
7192 /*
7193  *	This is the processor idle loop, which just looks for other threads
7194  *	to execute.  Processor idle threads invoke this without supplying a
7195  *	current thread to idle without an asserted wait state.
7196  *
7197  *	Returns a the next thread to execute if dispatched directly.
7198  */
7199 
7200 #if 0
7201 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
7202 #else
7203 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
7204 #endif
7205 
7206 #if (DEVELOPMENT || DEBUG)
7207 int sched_idle_delay_cpuid = -1;
7208 #endif
7209 
7210 thread_t
processor_idle(thread_t thread,processor_t processor)7211 processor_idle(
7212 	thread_t                        thread,
7213 	processor_t                     processor)
7214 {
7215 	processor_set_t         pset = processor->processor_set;
7216 	struct recount_snap snap = { 0 };
7217 
7218 	(void)splsched();
7219 
7220 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
7221 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
7222 	    (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
7223 
7224 	SCHED_STATS_INC(idle_transitions);
7225 	assert(processor->running_timers_active == false);
7226 
7227 	recount_snapshot(&snap);
7228 	recount_processor_idle(&processor->pr_recount, &snap);
7229 
7230 	while (1) {
7231 		/*
7232 		 * Ensure that updates to my processor and pset state,
7233 		 * made by the IPI source processor before sending the IPI,
7234 		 * are visible on this processor now (even though we don't
7235 		 * take the pset lock yet).
7236 		 */
7237 		atomic_thread_fence(memory_order_acquire);
7238 
7239 		if (processor->state != PROCESSOR_IDLE) {
7240 			break;
7241 		}
7242 		if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
7243 			break;
7244 		}
7245 #if defined(CONFIG_SCHED_DEFERRED_AST)
7246 		if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
7247 			break;
7248 		}
7249 #endif
7250 		if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
7251 			break;
7252 		}
7253 
7254 		if (
7255 			processor->is_recommended
7256 #if CONFIG_SCHED_SMT
7257 			&& (processor->processor_primary == processor)
7258 #endif /* CONFIG_SCHED_SMT */
7259 			) {
7260 			if (rt_runq_count(pset)) {
7261 				break;
7262 			}
7263 		} else {
7264 			if (SCHED(processor_bound_count)(processor)) {
7265 				break;
7266 			}
7267 		}
7268 
7269 		IDLE_KERNEL_DEBUG_CONSTANT(
7270 			MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
7271 
7272 		machine_track_platform_idle(TRUE);
7273 
7274 		machine_idle();
7275 		/* returns with interrupts enabled */
7276 
7277 		machine_track_platform_idle(FALSE);
7278 
7279 #if (DEVELOPMENT || DEBUG)
7280 		if (processor->cpu_id == sched_idle_delay_cpuid) {
7281 			delay(500);
7282 		}
7283 #endif
7284 
7285 		(void)splsched();
7286 
7287 		atomic_thread_fence(memory_order_acquire);
7288 
7289 		IDLE_KERNEL_DEBUG_CONSTANT(
7290 			MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
7291 
7292 		/*
7293 		 * Check if we should call sched_timeshare_consider_maintenance() here.
7294 		 * The CPU was woken out of idle due to an interrupt and we should do the
7295 		 * call only if the processor is still idle. If the processor is non-idle,
7296 		 * the threads running on the processor would do the call as part of
7297 		 * context swithing.
7298 		 */
7299 		if (processor->state == PROCESSOR_IDLE) {
7300 			sched_timeshare_consider_maintenance(mach_absolute_time(), true);
7301 		}
7302 
7303 		if (!SCHED(processor_queue_empty)(processor)) {
7304 #if CONFIG_SCHED_SMT
7305 			/* Secondary SMT processors respond to directed wakeups
7306 			 * exclusively. Some platforms induce 'spurious' SMT wakeups.
7307 			 */
7308 			if (processor->processor_primary == processor) {
7309 				break;
7310 			}
7311 #else /* CONFIG_SCHED_SMT*/
7312 			break;
7313 #endif /* CONFIG_SCHED_SMT*/
7314 		}
7315 	}
7316 
7317 	recount_snapshot(&snap);
7318 	recount_processor_run(&processor->pr_recount, &snap);
7319 	smr_cpu_join(processor, snap.rsn_time_mach);
7320 
7321 	ast_t reason = AST_NONE;
7322 
7323 	/* We're handling all scheduling AST's */
7324 	ast_off(AST_SCHEDULING);
7325 
7326 	/*
7327 	 * thread_select will move the processor from dispatching to running,
7328 	 * or put it in idle if there's nothing to do.
7329 	 */
7330 	thread_t cur_thread = current_thread();
7331 
7332 	thread_lock(cur_thread);
7333 	thread_t new_thread = thread_select(cur_thread, processor, &reason);
7334 	thread_unlock(cur_thread);
7335 
7336 	assert(processor->running_timers_active == false);
7337 
7338 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
7339 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
7340 	    (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
7341 
7342 	return new_thread;
7343 }
7344 
7345 /*
7346  *	Each processor has a dedicated thread which
7347  *	executes the idle loop when there is no suitable
7348  *	previous context.
7349  *
7350  *	This continuation is entered with interrupts disabled.
7351  */
7352 void
idle_thread(__assert_only void * parameter,__unused wait_result_t result)7353 idle_thread(__assert_only void* parameter,
7354     __unused wait_result_t result)
7355 {
7356 	assert(ml_get_interrupts_enabled() == FALSE);
7357 	assert(parameter == NULL);
7358 
7359 	processor_t processor = current_processor();
7360 
7361 	smr_cpu_leave(processor, processor->last_dispatch);
7362 
7363 	/*
7364 	 * Ensure that anything running in idle context triggers
7365 	 * preemption-disabled checks.
7366 	 */
7367 	disable_preemption_without_measurements();
7368 
7369 	/*
7370 	 * Enable interrupts temporarily to handle any pending interrupts
7371 	 * or IPIs before deciding to sleep
7372 	 */
7373 	spllo();
7374 
7375 	thread_t new_thread = processor_idle(THREAD_NULL, processor);
7376 	/* returns with interrupts disabled */
7377 
7378 	enable_preemption();
7379 
7380 	if (new_thread != THREAD_NULL) {
7381 		thread_run(processor->idle_thread,
7382 		    idle_thread, NULL, new_thread);
7383 		/*NOTREACHED*/
7384 	}
7385 
7386 	thread_block(idle_thread);
7387 	/*NOTREACHED*/
7388 }
7389 
7390 void
idle_thread_create(processor_t processor,thread_continue_t continuation)7391 idle_thread_create(
7392 	processor_t             processor,
7393 	thread_continue_t       continuation)
7394 {
7395 	kern_return_t   result;
7396 	thread_t                thread;
7397 	spl_t                   s;
7398 	char                    name[MAXTHREADNAMESIZE];
7399 
7400 	result = kernel_thread_create(continuation, NULL, MAXPRI_KERNEL, &thread);
7401 	if (result != KERN_SUCCESS) {
7402 		panic("idle_thread_create failed: %d", result);
7403 	}
7404 
7405 	snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
7406 	thread_set_thread_name(thread, name);
7407 
7408 	s = splsched();
7409 	thread_lock(thread);
7410 	thread->bound_processor = processor;
7411 	thread->chosen_processor = processor;
7412 	processor->idle_thread = thread;
7413 	thread->sched_pri = thread->base_pri = IDLEPRI;
7414 	thread->state = (TH_RUN | TH_IDLE);
7415 	thread->options |= TH_OPT_IDLE_THREAD;
7416 	thread->last_made_runnable_time = thread->last_basepri_change_time = mach_absolute_time();
7417 	thread_unlock(thread);
7418 	splx(s);
7419 
7420 	thread_deallocate(thread);
7421 }
7422 
7423 /*
7424  * sched_startup:
7425  *
7426  * Kicks off scheduler services.
7427  *
7428  * Called at splsched.
7429  */
7430 void
sched_startup(void)7431 sched_startup(void)
7432 {
7433 	kern_return_t   result;
7434 	thread_t                thread;
7435 
7436 	simple_lock_init(&sched_vm_group_list_lock, 0);
7437 
7438 	result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
7439 	    NULL, MAXPRI_KERNEL, &thread);
7440 	if (result != KERN_SUCCESS) {
7441 		panic("sched_startup");
7442 	}
7443 
7444 	thread_deallocate(thread);
7445 
7446 	assert_thread_magic(thread);
7447 
7448 	/*
7449 	 * Yield to the sched_init_thread once, to
7450 	 * initialize our own thread after being switched
7451 	 * back to.
7452 	 *
7453 	 * The current thread is the only other thread
7454 	 * active at this point.
7455 	 */
7456 	thread_block(THREAD_CONTINUE_NULL);
7457 
7458 	assert_thread_magic(thread);
7459 }
7460 
7461 #if __arm64__
7462 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
7463 #endif /* __arm64__ */
7464 
7465 
7466 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
7467 
7468 static volatile uint64_t                sched_maintenance_deadline;
7469 static uint64_t                         sched_tick_last_abstime;
7470 static uint64_t                         sched_tick_delta;
7471 uint64_t                                sched_tick_max_delta;
7472 
7473 
7474 /*
7475  *	sched_init_thread:
7476  *
7477  *	Perform periodic bookkeeping functions about ten
7478  *	times per second.
7479  */
7480 void
sched_timeshare_maintenance_continue(void)7481 sched_timeshare_maintenance_continue(void)
7482 {
7483 	uint64_t        sched_tick_ctime, late_time;
7484 
7485 	struct sched_update_scan_context scan_context = {
7486 		.earliest_bg_make_runnable_time = UINT64_MAX,
7487 		.earliest_normal_make_runnable_time = UINT64_MAX,
7488 		.earliest_rt_make_runnable_time = UINT64_MAX
7489 	};
7490 
7491 	sched_tick_ctime = mach_absolute_time();
7492 
7493 	if (__improbable(sched_tick_last_abstime == 0)) {
7494 		sched_tick_last_abstime = sched_tick_ctime;
7495 		late_time = 0;
7496 		sched_tick_delta = 1;
7497 	} else {
7498 		late_time = sched_tick_ctime - sched_tick_last_abstime;
7499 		sched_tick_delta = late_time / sched_tick_interval;
7500 		/* Ensure a delta of 1, since the interval could be slightly
7501 		 * smaller than the sched_tick_interval due to dispatch
7502 		 * latencies.
7503 		 */
7504 		sched_tick_delta = MAX(sched_tick_delta, 1);
7505 
7506 		/* In the event interrupt latencies or platform
7507 		 * idle events that advanced the timebase resulted
7508 		 * in periods where no threads were dispatched,
7509 		 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
7510 		 * iterations.
7511 		 */
7512 		sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
7513 
7514 		sched_tick_last_abstime = sched_tick_ctime;
7515 		sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
7516 	}
7517 
7518 	scan_context.sched_tick_last_abstime = sched_tick_last_abstime;
7519 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
7520 	    sched_tick_delta, late_time, 0, 0, 0);
7521 
7522 	/* Add a number of pseudo-ticks corresponding to the elapsed interval
7523 	 * This could be greater than 1 if substantial intervals where
7524 	 * all processors are idle occur, which rarely occurs in practice.
7525 	 */
7526 
7527 	sched_tick += sched_tick_delta;
7528 
7529 	update_vm_info();
7530 
7531 	/*
7532 	 *  Compute various averages.
7533 	 */
7534 	compute_averages(sched_tick_delta);
7535 
7536 	/*
7537 	 *  Scan the run queues for threads which
7538 	 *  may need to be updated, and find the earliest runnable thread on the runqueue
7539 	 *  to report its latency.
7540 	 */
7541 	SCHED(thread_update_scan)(&scan_context);
7542 
7543 	SCHED(rt_runq_scan)(&scan_context);
7544 
7545 	uint64_t ctime = mach_absolute_time();
7546 
7547 	uint64_t bg_max_latency       = (ctime > scan_context.earliest_bg_make_runnable_time) ?
7548 	    ctime - scan_context.earliest_bg_make_runnable_time : 0;
7549 
7550 	uint64_t default_max_latency  = (ctime > scan_context.earliest_normal_make_runnable_time) ?
7551 	    ctime - scan_context.earliest_normal_make_runnable_time : 0;
7552 
7553 	uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
7554 	    ctime - scan_context.earliest_rt_make_runnable_time : 0;
7555 
7556 	machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
7557 
7558 	/*
7559 	 * Check to see if the special sched VM group needs attention.
7560 	 */
7561 	sched_vm_group_maintenance();
7562 
7563 #if __arm64__
7564 	/* Check to see if the recommended cores failsafe is active */
7565 	sched_recommended_cores_maintenance();
7566 #endif /* __arm64__ */
7567 
7568 
7569 #if DEBUG || DEVELOPMENT
7570 #if __x86_64__
7571 #include <i386/misc_protos.h>
7572 	/* Check for long-duration interrupts */
7573 	mp_interrupt_watchdog();
7574 #endif /* __x86_64__ */
7575 #endif /* DEBUG || DEVELOPMENT */
7576 
7577 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
7578 	    sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
7579 	    sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
7580 
7581 	assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
7582 	thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
7583 	/*NOTREACHED*/
7584 }
7585 
7586 static uint64_t sched_maintenance_wakeups;
7587 
7588 /*
7589  * Determine if the set of routines formerly driven by a maintenance timer
7590  * must be invoked, based on a deadline comparison. Signals the scheduler
7591  * maintenance thread on deadline expiration. Must be invoked at an interval
7592  * lower than the "sched_tick_interval", currently accomplished by
7593  * invocation via the quantum expiration timer and at context switch time.
7594  * Performance matters: this routine reuses a timestamp approximating the
7595  * current absolute time received from the caller, and should perform
7596  * no more than a comparison against the deadline in the common case.
7597  */
7598 void
sched_timeshare_consider_maintenance(uint64_t ctime,bool safe_point)7599 sched_timeshare_consider_maintenance(uint64_t ctime, bool safe_point)
7600 {
7601 	uint64_t deadline = sched_maintenance_deadline;
7602 
7603 	if (__improbable(ctime >= deadline)) {
7604 		if (__improbable(current_thread() == sched_maintenance_thread)) {
7605 			return;
7606 		}
7607 		OSMemoryBarrier();
7608 
7609 		uint64_t ndeadline = ctime + sched_tick_interval;
7610 
7611 		if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline, deadline, ndeadline, seq_cst))) {
7612 			thread_wakeup((event_t)sched_timeshare_maintenance_continue);
7613 			sched_maintenance_wakeups++;
7614 			smr_maintenance(ctime);
7615 		}
7616 	}
7617 
7618 	smr_cpu_tick(ctime, safe_point);
7619 
7620 #if !CONFIG_SCHED_CLUTCH
7621 	/*
7622 	 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
7623 	 * scheduler, the load is maintained at the thread group and bucket level.
7624 	 */
7625 	uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed);
7626 
7627 	if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
7628 		uint64_t new_deadline = 0;
7629 		if (os_atomic_cmpxchg(&sched_load_compute_deadline, load_compute_deadline, new_deadline, relaxed)) {
7630 			compute_sched_load();
7631 			new_deadline = ctime + sched_load_compute_interval_abs;
7632 			os_atomic_store_wide(&sched_load_compute_deadline, new_deadline, relaxed);
7633 		}
7634 	}
7635 #endif /* CONFIG_SCHED_CLUTCH */
7636 
7637 #if __arm64__
7638 	uint64_t perf_deadline = os_atomic_load(&sched_perfcontrol_callback_deadline, relaxed);
7639 
7640 	if (__improbable(perf_deadline && ctime >= perf_deadline)) {
7641 		/* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
7642 		if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline, perf_deadline, 0, relaxed)) {
7643 			machine_perfcontrol_deadline_passed(perf_deadline);
7644 		}
7645 	}
7646 #endif /* __arm64__ */
7647 }
7648 
7649 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
7650 
7651 void
sched_init_thread(void)7652 sched_init_thread(void)
7653 {
7654 	thread_block(THREAD_CONTINUE_NULL);
7655 
7656 	thread_t thread = current_thread();
7657 
7658 	thread_set_thread_name(thread, "sched_maintenance_thread");
7659 
7660 	sched_maintenance_thread = thread;
7661 
7662 	SCHED(maintenance_continuation)();
7663 
7664 	/*NOTREACHED*/
7665 }
7666 
7667 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
7668 
7669 /*
7670  *	thread_update_scan / runq_scan:
7671  *
7672  *	Scan the run queues to account for timesharing threads
7673  *	which need to be updated.
7674  *
7675  *	Scanner runs in two passes.  Pass one squirrels likely
7676  *	threads away in an array, pass two does the update.
7677  *
7678  *	This is necessary because the run queue is locked for
7679  *	the candidate scan, but	the thread is locked for the update.
7680  *
7681  *	Array should be sized to make forward progress, without
7682  *	disabling preemption for long periods.
7683  */
7684 
7685 #define THREAD_UPDATE_SIZE              128
7686 
7687 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
7688 static uint32_t thread_update_count = 0;
7689 
7690 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
7691 boolean_t
thread_update_add_thread(thread_t thread)7692 thread_update_add_thread(thread_t thread)
7693 {
7694 	if (thread_update_count == THREAD_UPDATE_SIZE) {
7695 		return FALSE;
7696 	}
7697 
7698 	thread_update_array[thread_update_count++] = thread;
7699 	thread_reference(thread);
7700 	return TRUE;
7701 }
7702 
7703 /* Returns whether the kernel should report that a thread triggered the fail-safe. */
7704 static bool
thread_should_report_failsafe(thread_t thread)7705 thread_should_report_failsafe(thread_t thread)
7706 {
7707 	if ((thread->sched_flags & TH_SFLAG_FAILSAFE) && !(thread->sched_flags & TH_SFLAG_FAILSAFE_REPORTED)) {
7708 		/* disarm the trigger for subsequent invocations */
7709 		thread->sched_flags |= TH_SFLAG_FAILSAFE_REPORTED;
7710 		return true;
7711 	}
7712 	return false;
7713 }
7714 
7715 void
thread_update_process_threads(void)7716 thread_update_process_threads(void)
7717 {
7718 	assert(thread_update_count <= THREAD_UPDATE_SIZE);
7719 
7720 	for (uint32_t i = 0; i < thread_update_count; i++) {
7721 		thread_t thread = thread_update_array[i];
7722 		assert_thread_magic(thread);
7723 		thread_update_array[i] = THREAD_NULL;
7724 
7725 		spl_t s = splsched();
7726 		thread_lock(thread);
7727 
7728 		const bool should_report_failsafe = thread_should_report_failsafe(thread);
7729 		const sched_mode_t saved_mode = thread->saved_mode; // if reporting
7730 
7731 		if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
7732 			SCHED(update_priority)(thread);
7733 		}
7734 		thread_unlock(thread);
7735 		splx(s);
7736 
7737 		/* now that interrupts are enabled, it is safe to report fail-safe triggers */
7738 		if (should_report_failsafe) {
7739 			assert((saved_mode & TH_MODE_REALTIME) || (saved_mode & TH_MODE_FIXED));
7740 			uint64_t th_id = thread->thread_id;
7741 			char th_name[MAXTHREADNAMESIZE] = "unknown";
7742 			if (thread_has_thread_name(thread)) {
7743 				thread_get_thread_name(thread, th_name);
7744 			}
7745 			task_t task = get_threadtask(thread);
7746 			assert(task != NULL);
7747 			const char* t_name = task_best_name(task);
7748 			pid_t t_pid = task_pid(task);
7749 			const int quanta = (saved_mode & TH_MODE_REALTIME) ? max_unsafe_rt_quanta : max_unsafe_fixed_quanta;
7750 			const char* mode = (saved_mode & TH_MODE_REALTIME) ? "realtime" : "fixed";
7751 			os_log_error(OS_LOG_DEFAULT, "scheduler: thread %s [%llx] in "
7752 			    "process %s [%d] triggered fail-safe by spinning for at least %d"
7753 			    "us at %s priority\n",
7754 			    th_name,
7755 			    th_id,
7756 			    t_name,
7757 			    t_pid,
7758 			    quanta * (int) sched_get_quantum_us(),
7759 			    mode);
7760 		}
7761 
7762 		thread_deallocate(thread);
7763 	}
7764 
7765 	thread_update_count = 0;
7766 }
7767 
7768 static boolean_t
runq_scan_thread(thread_t thread,sched_update_scan_context_t scan_context)7769 runq_scan_thread(
7770 	thread_t thread,
7771 	sched_update_scan_context_t scan_context)
7772 {
7773 	assert_thread_magic(thread);
7774 
7775 	if (thread->sched_stamp != sched_tick &&
7776 	    thread->sched_mode == TH_MODE_TIMESHARE) {
7777 		if (thread_update_add_thread(thread) == FALSE) {
7778 			return TRUE;
7779 		}
7780 	}
7781 
7782 	if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
7783 		if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
7784 			scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
7785 		}
7786 	} else {
7787 		if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
7788 			scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
7789 		}
7790 	}
7791 
7792 	return FALSE;
7793 }
7794 
7795 /*
7796  *	Scan a runq for candidate threads.
7797  *
7798  *	Returns TRUE if retry is needed.
7799  */
7800 boolean_t
runq_scan(run_queue_t runq,sched_update_scan_context_t scan_context)7801 runq_scan(
7802 	run_queue_t                   runq,
7803 	sched_update_scan_context_t   scan_context)
7804 {
7805 	int count       = runq->count;
7806 	int queue_index;
7807 
7808 	assert(count >= 0);
7809 
7810 	if (count == 0) {
7811 		return FALSE;
7812 	}
7813 
7814 	for (queue_index = bitmap_first(runq->bitmap, NRQS);
7815 	    queue_index >= 0;
7816 	    queue_index = bitmap_next(runq->bitmap, queue_index)) {
7817 		thread_t thread;
7818 		circle_queue_t queue = &runq->queues[queue_index];
7819 
7820 		cqe_foreach_element(thread, queue, runq_links) {
7821 			assert(count > 0);
7822 			if (runq_scan_thread(thread, scan_context) == TRUE) {
7823 				return TRUE;
7824 			}
7825 			count--;
7826 		}
7827 	}
7828 
7829 	return FALSE;
7830 }
7831 
7832 #if CONFIG_SCHED_CLUTCH
7833 
7834 boolean_t
sched_clutch_timeshare_scan(queue_t thread_queue,uint16_t thread_count,sched_update_scan_context_t scan_context)7835 sched_clutch_timeshare_scan(
7836 	queue_t thread_queue,
7837 	uint16_t thread_count,
7838 	sched_update_scan_context_t scan_context)
7839 {
7840 	if (thread_count == 0) {
7841 		return FALSE;
7842 	}
7843 
7844 	thread_t thread;
7845 	qe_foreach_element_safe(thread, thread_queue, th_clutch_timeshare_link) {
7846 		if (runq_scan_thread(thread, scan_context) == TRUE) {
7847 			return TRUE;
7848 		}
7849 		thread_count--;
7850 	}
7851 
7852 	assert(thread_count == 0);
7853 	return FALSE;
7854 }
7855 
7856 
7857 #endif /* CONFIG_SCHED_CLUTCH */
7858 
7859 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
7860 
7861 bool
thread_is_eager_preempt(thread_t thread)7862 thread_is_eager_preempt(thread_t thread)
7863 {
7864 	return thread->sched_flags & TH_SFLAG_EAGERPREEMPT;
7865 }
7866 
7867 void
thread_set_eager_preempt(thread_t thread)7868 thread_set_eager_preempt(thread_t thread)
7869 {
7870 	spl_t s = splsched();
7871 	thread_lock(thread);
7872 
7873 	assert(!thread_is_eager_preempt(thread));
7874 
7875 	thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
7876 
7877 	if (thread == current_thread()) {
7878 		/* csw_check updates current_is_eagerpreempt on the processor */
7879 		ast_t ast = csw_check(thread, current_processor(), AST_NONE);
7880 
7881 		thread_unlock(thread);
7882 
7883 		if (ast != AST_NONE) {
7884 			thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
7885 		}
7886 	} else {
7887 		processor_t last_processor = thread->last_processor;
7888 
7889 		if (last_processor != PROCESSOR_NULL &&
7890 		    last_processor->state == PROCESSOR_RUNNING &&
7891 		    last_processor->active_thread == thread) {
7892 			cause_ast_check(last_processor);
7893 		}
7894 
7895 		thread_unlock(thread);
7896 	}
7897 
7898 	splx(s);
7899 }
7900 
7901 void
thread_clear_eager_preempt(thread_t thread)7902 thread_clear_eager_preempt(thread_t thread)
7903 {
7904 	spl_t s = splsched();
7905 	thread_lock(thread);
7906 
7907 	assert(thread_is_eager_preempt(thread));
7908 
7909 	thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
7910 
7911 	if (thread == current_thread()) {
7912 		current_processor()->current_is_eagerpreempt = false;
7913 	}
7914 
7915 	thread_unlock(thread);
7916 	splx(s);
7917 }
7918 
7919 /*
7920  * Scheduling statistics
7921  */
7922 void
sched_stats_handle_csw(processor_t processor,int reasons,int selfpri,int otherpri)7923 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
7924 {
7925 	struct sched_statistics *stats;
7926 	boolean_t to_realtime = FALSE;
7927 
7928 	stats = PERCPU_GET_RELATIVE(sched_stats, processor, processor);
7929 	stats->csw_count++;
7930 
7931 	if (otherpri >= BASEPRI_REALTIME) {
7932 		stats->rt_sched_count++;
7933 		to_realtime = TRUE;
7934 	}
7935 
7936 	if ((reasons & AST_PREEMPT) != 0) {
7937 		stats->preempt_count++;
7938 
7939 		if (selfpri >= BASEPRI_REALTIME) {
7940 			stats->preempted_rt_count++;
7941 		}
7942 
7943 		if (to_realtime) {
7944 			stats->preempted_by_rt_count++;
7945 		}
7946 	}
7947 }
7948 
7949 void
sched_stats_handle_runq_change(struct runq_stats * stats,int old_count)7950 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
7951 {
7952 	uint64_t timestamp = mach_absolute_time();
7953 
7954 	stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
7955 	stats->last_change_timestamp = timestamp;
7956 }
7957 
7958 /*
7959  *     For calls from assembly code
7960  */
7961 #undef thread_wakeup
7962 void
7963 thread_wakeup(
7964 	event_t         x);
7965 
7966 void
thread_wakeup(event_t x)7967 thread_wakeup(
7968 	event_t         x)
7969 {
7970 	thread_wakeup_with_result(x, THREAD_AWAKENED);
7971 }
7972 
7973 boolean_t
preemption_enabled(void)7974 preemption_enabled(void)
7975 {
7976 	return get_preemption_level() == 0 && ml_get_interrupts_enabled();
7977 }
7978 
7979 static void
sched_timer_deadline_tracking_init(void)7980 sched_timer_deadline_tracking_init(void)
7981 {
7982 	nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
7983 	nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
7984 }
7985 
7986 /*
7987  * Check that all CPUs are successfully powered up in places where that's expected.
7988  */
7989 static void
check_all_cpus_are_done_starting(processor_start_kind_t start_kind)7990 check_all_cpus_are_done_starting(processor_start_kind_t start_kind)
7991 {
7992 	/*
7993 	 * `processor_count` may include registered CPUs above cpus= or cpumask= limit.
7994 	 * Use machine_info.logical_cpu_max for the CPU IDs that matter.
7995 	 */
7996 	for (int cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
7997 		processor_t processor = processor_array[cpu_id];
7998 		processor_wait_for_start(processor, start_kind);
7999 	}
8000 }
8001 
8002 /*
8003  * Find some available online CPU that threads can be enqueued on
8004  *
8005  * Called with the sched_available_cores_lock held
8006  */
8007 static int
sched_last_resort_cpu(void)8008 sched_last_resort_cpu(void)
8009 {
8010 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
8011 
8012 	int last_resort_cpu = lsb_first(pcs.pcs_effective.pcs_online_cores);
8013 
8014 	if (last_resort_cpu == -1) {
8015 		panic("no last resort cpu found!");
8016 	}
8017 
8018 	return last_resort_cpu;
8019 }
8020 
8021 
8022 static void
assert_no_processors_in_transition_locked()8023 assert_no_processors_in_transition_locked()
8024 {
8025 	assert(pcs.pcs_in_kernel_sleep == false);
8026 
8027 	/* All processors must be either running or offline */
8028 	assert(pcs.pcs_managed_cores ==
8029 	    (processor_offline_state_map[PROCESSOR_OFFLINE_RUNNING] |
8030 	    processor_offline_state_map[PROCESSOR_OFFLINE_FULLY_OFFLINE]));
8031 
8032 	/* All state transitions must be quiesced at this point */
8033 	assert(pcs.pcs_effective.pcs_online_cores ==
8034 	    processor_offline_state_map[PROCESSOR_OFFLINE_RUNNING]);
8035 }
8036 
8037 static struct powered_cores_state
sched_compute_requested_powered_cores()8038 sched_compute_requested_powered_cores()
8039 {
8040 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
8041 
8042 	struct powered_cores_state output = {
8043 		.pcs_online_cores = pcs.pcs_managed_cores,
8044 		.pcs_powerdown_recommended_cores = pcs.pcs_managed_cores,
8045 		.pcs_tempdown_cores = 0,
8046 	};
8047 
8048 	if (!pcs.pcs_init_completed) {
8049 		return output;
8050 	}
8051 
8052 	/*
8053 	 * if we unify this with derecommendation, note that only sleep should stop derecommendation,
8054 	 * not dtrace et al
8055 	 */
8056 	if (pcs.pcs_powerdown_suspend_count) {
8057 		return output;
8058 	} else {
8059 		/*
8060 		 * The cores power clients like ANE require or
8061 		 * the kernel cannot offline
8062 		 */
8063 		cpumap_t system_required_powered_cores = pcs.pcs_required_online_pmgr |
8064 		    pcs.pcs_required_online_system;
8065 
8066 		cpumap_t online_cores_goal;
8067 
8068 		if (pcs.pcs_user_online_core_control) {
8069 			/* This is our new goal state for powered cores */
8070 			output.pcs_powerdown_recommended_cores = pcs.pcs_requested_online_user;
8071 			online_cores_goal = pcs.pcs_requested_online_user | system_required_powered_cores;
8072 		} else {
8073 			/* Remove the cores CLPC wants to power down */
8074 			cpumap_t clpc_wanted_powered_cores = pcs.pcs_managed_cores;
8075 			clpc_wanted_powered_cores &= pcs.pcs_requested_online_clpc_user;
8076 			clpc_wanted_powered_cores &= pcs.pcs_requested_online_clpc_system;
8077 
8078 			output.pcs_powerdown_recommended_cores = clpc_wanted_powered_cores;
8079 			online_cores_goal = clpc_wanted_powered_cores | system_required_powered_cores;
8080 
8081 			/* Any cores in managed cores that are not in wanted powered become temporary */
8082 			output.pcs_tempdown_cores = (pcs.pcs_managed_cores & ~clpc_wanted_powered_cores);
8083 
8084 			/* Future: Treat CLPC user/system separately. */
8085 		}
8086 
8087 		if (online_cores_goal == 0) {
8088 			/*
8089 			 * If we're somehow trying to disable all CPUs,
8090 			 * force online the lowest numbered CPU.
8091 			 */
8092 			online_cores_goal = BIT(lsb_first(pcs.pcs_managed_cores));
8093 		}
8094 
8095 #if RHODES_CLUSTER_POWERDOWN_WORKAROUND
8096 		/*
8097 		 * Because warm CPU boot from WFI is not currently implemented,
8098 		 * we cannot power down only one CPU in a cluster, so we force up
8099 		 * all the CPUs in the cluster if any one CPU is up in the cluster.
8100 		 * Once all CPUs are disabled, then the whole cluster goes down at once.
8101 		 */
8102 
8103 		cpumap_t workaround_online_cores = 0;
8104 
8105 		const ml_topology_info_t* topology = ml_get_topology_info();
8106 		for (unsigned int i = 0; i < topology->num_clusters; i++) {
8107 			ml_topology_cluster_t* cluster = &topology->clusters[i];
8108 			if ((cluster->cpu_mask & online_cores_goal) != 0) {
8109 				workaround_online_cores |= cluster->cpu_mask;
8110 			}
8111 		}
8112 
8113 		online_cores_goal = workaround_online_cores;
8114 #endif /* RHODES_CLUSTER_POWERDOWN_WORKAROUND */
8115 
8116 		output.pcs_online_cores = online_cores_goal;
8117 	}
8118 
8119 	return output;
8120 }
8121 
8122 static bool
sched_needs_update_requested_powered_cores()8123 sched_needs_update_requested_powered_cores()
8124 {
8125 	if (!pcs.pcs_init_completed) {
8126 		return false;
8127 	}
8128 
8129 	struct powered_cores_state requested = sched_compute_requested_powered_cores();
8130 
8131 	struct powered_cores_state effective = pcs.pcs_effective;
8132 
8133 	if (requested.pcs_powerdown_recommended_cores != effective.pcs_powerdown_recommended_cores ||
8134 	    requested.pcs_online_cores != effective.pcs_online_cores ||
8135 	    requested.pcs_tempdown_cores != effective.pcs_tempdown_cores) {
8136 		return true;
8137 	} else {
8138 		return false;
8139 	}
8140 }
8141 
8142 kern_return_t
sched_processor_exit_user(processor_t processor)8143 sched_processor_exit_user(processor_t processor)
8144 {
8145 	assert(processor);
8146 
8147 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8148 	assert(preemption_enabled());
8149 
8150 	kern_return_t result;
8151 
8152 	spl_t s = splsched();
8153 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8154 
8155 	if (!enable_processor_exit) {
8156 		/* This API is not supported on this device. */
8157 		result = KERN_NOT_SUPPORTED;
8158 		goto unlock;
8159 	}
8160 
8161 	if (bit_test(pcs.pcs_required_online_system, processor->cpu_id)) {
8162 		/* This CPU can never change state outside of sleep. */
8163 		result = KERN_NOT_SUPPORTED;
8164 		goto unlock;
8165 	}
8166 
8167 	/*
8168 	 * Future: Instead of failing, simulate the processor
8169 	 * being shut down via derecommendation and decrementing active count.
8170 	 */
8171 	if (bit_test(pcs.pcs_required_online_pmgr, processor->cpu_id)) {
8172 		/* PMGR won't let us power down this CPU right now. */
8173 		result = KERN_FAILURE;
8174 		goto unlock;
8175 	}
8176 
8177 	if (pcs.pcs_powerdown_suspend_count) {
8178 		/* A tool that disables CPU powerdown is active. */
8179 		result = KERN_FAILURE;
8180 		goto unlock;
8181 	}
8182 
8183 	if (!bit_test(pcs.pcs_requested_online_user, processor->cpu_id)) {
8184 		/* The CPU is already powered off by userspace. */
8185 		result = KERN_NODE_DOWN;
8186 		goto unlock;
8187 	}
8188 
8189 	if ((pcs.pcs_recommended_cores & pcs.pcs_effective.pcs_online_cores) == BIT(processor->cpu_id)) {
8190 		/* This is the last available core, can't shut it down. */
8191 		result = KERN_RESOURCE_SHORTAGE;
8192 		goto unlock;
8193 	}
8194 
8195 	result = KERN_SUCCESS;
8196 
8197 	if (!pcs.pcs_user_online_core_control) {
8198 		pcs.pcs_user_online_core_control = true;
8199 	}
8200 
8201 	bit_clear(pcs.pcs_requested_online_user, processor->cpu_id);
8202 
8203 	if (sched_needs_update_requested_powered_cores()) {
8204 		sched_update_powered_cores_drops_lock(REASON_USER, s);
8205 	}
8206 
8207 unlock:
8208 	simple_unlock(&sched_available_cores_lock);
8209 	splx(s);
8210 
8211 	return result;
8212 }
8213 
8214 kern_return_t
sched_processor_start_user(processor_t processor)8215 sched_processor_start_user(processor_t processor)
8216 {
8217 	assert(processor);
8218 
8219 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8220 	assert(preemption_enabled());
8221 
8222 	kern_return_t result;
8223 
8224 	spl_t s = splsched();
8225 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8226 
8227 	if (!enable_processor_exit) {
8228 		result = KERN_NOT_SUPPORTED;
8229 		goto unlock;
8230 	}
8231 
8232 	if (bit_test(pcs.pcs_required_online_system, processor->cpu_id)) {
8233 		result = KERN_NOT_SUPPORTED;
8234 		goto unlock;
8235 	}
8236 
8237 #if CONFIG_SCHED_SMT
8238 	/* Not allowed to start an SMT processor while SMT is disabled */
8239 	if ((sched_enable_smt == 0) && (processor->processor_primary != processor)) {
8240 		result = KERN_FAILURE;
8241 		goto unlock;
8242 	}
8243 #endif /* CONFIG_SCHED_SMT */
8244 
8245 	if (pcs.pcs_powerdown_suspend_count) {
8246 		result = KERN_FAILURE;
8247 		goto unlock;
8248 	}
8249 
8250 	if (bit_test(pcs.pcs_requested_online_user, processor->cpu_id)) {
8251 		result = KERN_FAILURE;
8252 		goto unlock;
8253 	}
8254 
8255 	result = KERN_SUCCESS;
8256 
8257 	bit_set(pcs.pcs_requested_online_user, processor->cpu_id);
8258 
8259 	/*
8260 	 * Once the user puts all CPUs back online,
8261 	 * we can resume automatic cluster power down.
8262 	 */
8263 	if (pcs.pcs_requested_online_user == pcs.pcs_managed_cores) {
8264 		pcs.pcs_user_online_core_control = false;
8265 	}
8266 
8267 	if (sched_needs_update_requested_powered_cores()) {
8268 		sched_update_powered_cores_drops_lock(REASON_USER, s);
8269 	}
8270 
8271 unlock:
8272 	simple_unlock(&sched_available_cores_lock);
8273 	splx(s);
8274 
8275 	return result;
8276 }
8277 
8278 sched_cond_atomic_t sched_update_powered_cores_wakeup;
8279 thread_t sched_update_powered_cores_thread;
8280 
8281 
8282 static void OS_NORETURN sched_update_powered_cores_continue(void *param __unused, wait_result_t wr __unused);
8283 
8284 /*
8285  * After all processors have been ml_processor_register'ed and processor_boot'ed
8286  * the scheduler can finalize its datastructures and allow CPU power state changes.
8287  *
8288  * Enforce that this only happens *once*. More than once is definitely not OK. rdar://121270513
8289  */
8290 void
sched_cpu_init_completed(void)8291 sched_cpu_init_completed(void)
8292 {
8293 	static bool sched_cpu_init_completed_called = false;
8294 
8295 	if (!os_atomic_cmpxchg(&sched_cpu_init_completed_called, false, true, relaxed)) {
8296 		panic("sched_cpu_init_completed called twice! %d", sched_cpu_init_completed_called);
8297 	}
8298 
8299 	if (SCHED(cpu_init_completed) != NULL) {
8300 		SCHED(cpu_init_completed)();
8301 	}
8302 
8303 	/* Wait for any cpu that is still starting, and enforce that they eventually complete. */
8304 	check_all_cpus_are_done_starting(PROCESSOR_FIRST_BOOT);
8305 
8306 	lck_mtx_lock(&cluster_powerdown_lock);
8307 
8308 	assert(sched_update_powered_cores_thread == THREAD_NULL);
8309 
8310 	sched_cond_init(&sched_update_powered_cores_wakeup);
8311 
8312 	kern_return_t result = kernel_thread_start_priority(
8313 		sched_update_powered_cores_continue,
8314 		NULL, MAXPRI_KERNEL, &sched_update_powered_cores_thread);
8315 	if (result != KERN_SUCCESS) {
8316 		panic("failed to create sched_update_powered_cores thread");
8317 	}
8318 
8319 	thread_set_thread_name(sched_update_powered_cores_thread,
8320 	    "sched_update_powered_cores");
8321 
8322 	spl_t s = splsched();
8323 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8324 
8325 	assert(pcs.pcs_init_completed == false);
8326 
8327 	pcs.pcs_managed_cores = pcs.pcs_effective.pcs_online_cores;
8328 
8329 	assert(__builtin_popcountll(pcs.pcs_managed_cores) == machine_info.logical_cpu_max);
8330 
8331 	/* If CLPC tries to cluster power down before this point, it's ignored. */
8332 	pcs.pcs_requested_online_user = pcs.pcs_managed_cores;
8333 	pcs.pcs_requested_online_clpc_system = pcs.pcs_managed_cores;
8334 	pcs.pcs_requested_online_clpc_user = pcs.pcs_managed_cores;
8335 
8336 	cpumap_t system_required_cores = 0;
8337 
8338 	/*
8339 	 * Ask the platform layer which CPUs are allowed to
8340 	 * be powered off outside of system sleep.
8341 	 */
8342 	for (int cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
8343 		if (!ml_cpu_can_exit(cpu_id)) {
8344 			bit_set(system_required_cores, cpu_id);
8345 		}
8346 	}
8347 
8348 	pcs.pcs_required_online_system = system_required_cores;
8349 	pcs.pcs_effective.pcs_powerdown_recommended_cores = pcs.pcs_managed_cores;
8350 
8351 	pcs.pcs_requested = sched_compute_requested_powered_cores();
8352 
8353 	assert(pcs.pcs_requested.pcs_powerdown_recommended_cores == pcs.pcs_managed_cores);
8354 	assert(pcs.pcs_requested.pcs_online_cores == pcs.pcs_managed_cores);
8355 	assert(pcs.pcs_requested.pcs_tempdown_cores == 0);
8356 
8357 	assert(pcs.pcs_effective.pcs_powerdown_recommended_cores == pcs.pcs_managed_cores);
8358 	assert(pcs.pcs_effective.pcs_online_cores == pcs.pcs_managed_cores);
8359 	assert(pcs.pcs_effective.pcs_tempdown_cores == 0);
8360 
8361 	pcs.pcs_init_completed = true;
8362 
8363 	simple_unlock(&sched_available_cores_lock);
8364 	splx(s);
8365 
8366 	lck_mtx_unlock(&cluster_powerdown_lock);
8367 
8368 	/* Release the +1 pcs_powerdown_suspend_count that we booted up with. */
8369 	resume_cluster_powerdown();
8370 }
8371 
8372 bool
sched_is_in_sleep(void)8373 sched_is_in_sleep(void)
8374 {
8375 	return pcs.pcs_in_kernel_sleep || pcs.pcs_wants_kernel_sleep;
8376 }
8377 
8378 bool
sched_is_cpu_init_completed(void)8379 sched_is_cpu_init_completed(void)
8380 {
8381 	return pcs.pcs_init_completed;
8382 }
8383 
8384 processor_reason_t last_sched_update_powered_cores_continue_reason;
8385 
8386 static void OS_NORETURN
sched_update_powered_cores_continue(void * param __unused,wait_result_t wr __unused)8387 sched_update_powered_cores_continue(void *param __unused, wait_result_t wr __unused)
8388 {
8389 	sched_cond_ack(&sched_update_powered_cores_wakeup);
8390 
8391 	while (true) {
8392 		lck_mtx_lock(&cluster_powerdown_lock);
8393 
8394 		spl_t s = splsched();
8395 		simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8396 
8397 		bool needs_update = sched_needs_update_requested_powered_cores();
8398 
8399 		if (needs_update) {
8400 			/* This thread shouldn't need to make changes while powerdown is suspended */
8401 			assert(pcs.pcs_powerdown_suspend_count == 0);
8402 
8403 			processor_reason_t reason = last_sched_update_powered_cores_continue_reason;
8404 
8405 			sched_update_powered_cores_drops_lock(reason, s);
8406 		}
8407 
8408 		simple_unlock(&sched_available_cores_lock);
8409 		splx(s);
8410 
8411 		lck_mtx_unlock(&cluster_powerdown_lock);
8412 
8413 		/* If we did an update, we dropped the lock, so check again. */
8414 
8415 		if (!needs_update) {
8416 			sched_cond_wait(&sched_update_powered_cores_wakeup, THREAD_UNINT,
8417 			    sched_update_powered_cores_continue);
8418 			/* The condition was signaled since we last blocked, check again. */
8419 		}
8420 	}
8421 }
8422 
8423 __options_decl(sched_powered_cores_flags_t, uint32_t, {
8424 	ASSERT_IN_SLEEP                 = 0x10000000,
8425 	ASSERT_POWERDOWN_SUSPENDED      = 0x20000000,
8426 	POWERED_CORES_OPTIONS_MASK      = ASSERT_IN_SLEEP | ASSERT_POWERDOWN_SUSPENDED,
8427 });
8428 
8429 /*
8430  * This is KPI with CLPC.
8431  */
8432 void
sched_perfcontrol_update_powered_cores(uint64_t requested_powered_cores,processor_reason_t reason,__unused uint32_t flags)8433 sched_perfcontrol_update_powered_cores(
8434 	uint64_t requested_powered_cores,
8435 	processor_reason_t reason,
8436 	__unused uint32_t flags)
8437 {
8438 	assert((reason == REASON_CLPC_SYSTEM) || (reason == REASON_CLPC_USER));
8439 
8440 #if DEVELOPMENT || DEBUG
8441 	if (flags & (ASSERT_IN_SLEEP | ASSERT_POWERDOWN_SUSPENDED)) {
8442 		if (flags & ASSERT_POWERDOWN_SUSPENDED) {
8443 			assert(pcs.pcs_powerdown_suspend_count > 0);
8444 		}
8445 		if (flags & ASSERT_IN_SLEEP) {
8446 			assert(pcs.pcs_sleep_override_recommended == true);
8447 		}
8448 		return;
8449 	}
8450 #endif
8451 
8452 	spl_t s = splsched();
8453 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8454 
8455 	cpumap_t requested_cores = requested_powered_cores & pcs.pcs_managed_cores;
8456 
8457 	if (reason == REASON_CLPC_SYSTEM) {
8458 		pcs.pcs_requested_online_clpc_system = requested_cores;
8459 	} else if (reason == REASON_CLPC_USER) {
8460 		pcs.pcs_requested_online_clpc_user = requested_cores;
8461 	}
8462 
8463 	bool needs_update = sched_needs_update_requested_powered_cores();
8464 
8465 	if (needs_update) {
8466 		last_sched_update_powered_cores_continue_reason = reason;
8467 	}
8468 
8469 	simple_unlock(&sched_available_cores_lock);
8470 	splx(s);
8471 
8472 	if (needs_update) {
8473 		sched_cond_signal(&sched_update_powered_cores_wakeup,
8474 		    sched_update_powered_cores_thread);
8475 	}
8476 }
8477 
8478 /*
8479  * This doesn't just suspend cluster powerdown.
8480  * It also powers up all the cores and leaves them up,
8481  * even if some user wanted them down.
8482  * This is important because dtrace, monotonic, and others can't handle any
8483  * powered down cores, not just cluster powerdown.
8484  */
8485 static void
suspend_cluster_powerdown_locked(bool for_sleep)8486 suspend_cluster_powerdown_locked(bool for_sleep)
8487 {
8488 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8489 	kprintf("%s>calling sched_update_powered_cores to suspend powerdown\n", __func__);
8490 
8491 	spl_t s = splsched();
8492 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8493 
8494 	assert(pcs.pcs_powerdown_suspend_count >= 0);
8495 
8496 	if (for_sleep) {
8497 		assert(!pcs.pcs_wants_kernel_sleep);
8498 		assert(!pcs.pcs_in_kernel_sleep);
8499 		pcs.pcs_wants_kernel_sleep = true;
8500 	}
8501 
8502 	pcs.pcs_powerdown_suspend_count++;
8503 
8504 	if (sched_needs_update_requested_powered_cores()) {
8505 		sched_update_powered_cores_drops_lock(REASON_SYSTEM, s);
8506 	}
8507 
8508 	if (for_sleep) {
8509 		assert(pcs.pcs_wants_kernel_sleep);
8510 		assert(!pcs.pcs_in_kernel_sleep);
8511 		pcs.pcs_in_kernel_sleep = true;
8512 
8513 		assert(sched_needs_update_requested_powered_cores() == false);
8514 	}
8515 
8516 	simple_unlock(&sched_available_cores_lock);
8517 	splx(s);
8518 
8519 	if (pcs.pcs_init_completed) {
8520 		/* At this point, no cpu should be still starting. Let's enforce that. */
8521 		check_all_cpus_are_done_starting(for_sleep ?
8522 		    PROCESSOR_BEFORE_ENTERING_SLEEP : PROCESSOR_CLUSTER_POWERDOWN_SUSPEND);
8523 	}
8524 }
8525 
8526 static void
resume_cluster_powerdown_locked(bool for_sleep)8527 resume_cluster_powerdown_locked(bool for_sleep)
8528 {
8529 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8530 
8531 	if (pcs.pcs_init_completed) {
8532 		/* At this point, no cpu should be still starting. Let's enforce that. */
8533 		check_all_cpus_are_done_starting(for_sleep ?
8534 		    PROCESSOR_WAKE_FROM_SLEEP : PROCESSOR_CLUSTER_POWERDOWN_RESUME);
8535 	}
8536 
8537 	kprintf("%s>calling sched_update_powered_cores to resume powerdown\n", __func__);
8538 
8539 	spl_t s = splsched();
8540 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8541 
8542 	if (pcs.pcs_powerdown_suspend_count <= 0) {
8543 		panic("resume_cluster_powerdown() called with pcs.pcs_powerdown_suspend_count=%d\n", pcs.pcs_powerdown_suspend_count);
8544 	}
8545 
8546 	if (for_sleep) {
8547 		assert(pcs.pcs_wants_kernel_sleep);
8548 		assert(pcs.pcs_in_kernel_sleep);
8549 		pcs.pcs_wants_kernel_sleep = false;
8550 	}
8551 
8552 	pcs.pcs_powerdown_suspend_count--;
8553 
8554 	if (pcs.pcs_powerdown_suspend_count == 0) {
8555 		/* Returning to client controlled powerdown mode */
8556 		assert(pcs.pcs_init_completed);
8557 
8558 		/* To match previous behavior, clear the user state */
8559 		pcs.pcs_requested_online_user = pcs.pcs_managed_cores;
8560 		pcs.pcs_user_online_core_control = false;
8561 
8562 		/* To match previous behavior, clear the requested CLPC state. */
8563 		pcs.pcs_requested_online_clpc_user = pcs.pcs_managed_cores;
8564 		pcs.pcs_requested_online_clpc_system = pcs.pcs_managed_cores;
8565 	}
8566 
8567 	if (sched_needs_update_requested_powered_cores()) {
8568 		sched_update_powered_cores_drops_lock(REASON_SYSTEM, s);
8569 	}
8570 
8571 	if (for_sleep) {
8572 		assert(!pcs.pcs_wants_kernel_sleep);
8573 		assert(pcs.pcs_in_kernel_sleep);
8574 		pcs.pcs_in_kernel_sleep = false;
8575 
8576 		assert(sched_needs_update_requested_powered_cores() == false);
8577 	}
8578 
8579 	simple_unlock(&sched_available_cores_lock);
8580 	splx(s);
8581 }
8582 
8583 static uint64_t
die_and_cluster_to_cpu_mask(__unused unsigned int die_id,__unused unsigned int die_cluster_id)8584 die_and_cluster_to_cpu_mask(
8585 	__unused unsigned int die_id,
8586 	__unused unsigned int die_cluster_id)
8587 {
8588 #if __arm__ || __arm64__
8589 	const ml_topology_info_t* topology = ml_get_topology_info();
8590 	unsigned int num_clusters = topology->num_clusters;
8591 	for (unsigned int i = 0; i < num_clusters; i++) {
8592 		ml_topology_cluster_t* cluster = &topology->clusters[i];
8593 		if ((cluster->die_id == die_id) &&
8594 		    (cluster->die_cluster_id == die_cluster_id)) {
8595 			return cluster->cpu_mask;
8596 		}
8597 	}
8598 #endif
8599 	return 0ull;
8600 }
8601 
8602 /*
8603  * Take an assertion that ensures all CPUs in the cluster are powered up until
8604  * the assertion is released.
8605  * A system suspend will still power down the CPUs.
8606  * This call will stall if system suspend is in progress.
8607  *
8608  * Future ER: Could this just power up the cluster, and leave enabling the
8609  * processors to be asynchronous, or deferred?
8610  *
8611  * Enabling the rail is synchronous, it must be powered up before returning.
8612  */
8613 void
sched_enable_acc_rail(unsigned int die_id,unsigned int die_cluster_id)8614 sched_enable_acc_rail(unsigned int die_id, unsigned int die_cluster_id)
8615 {
8616 	uint64_t core_mask = die_and_cluster_to_cpu_mask(die_id, die_cluster_id);
8617 
8618 	lck_mtx_lock(&cluster_powerdown_lock);
8619 
8620 	/*
8621 	 * Note: if pcs.pcs_init_completed is false, because the
8622 	 * CPUs have not booted yet, then we assume that all
8623 	 * clusters are already powered up at boot (see IOCPUInitialize)
8624 	 * so we don't have to wait for cpu boot to complete.
8625 	 * We'll still save the requested assertion and enforce it after
8626 	 * boot completes.
8627 	 */
8628 
8629 	spl_t s = splsched();
8630 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8631 
8632 	if (pcs.pcs_init_completed) {
8633 		assert3u(pcs.pcs_managed_cores & core_mask, ==, core_mask);
8634 	}
8635 
8636 	/* Can't enable something that is already enabled */
8637 	assert((pcs.pcs_required_online_pmgr & core_mask) == 0);
8638 
8639 	pcs.pcs_required_online_pmgr |= core_mask;
8640 
8641 	if (sched_needs_update_requested_powered_cores()) {
8642 		sched_update_powered_cores_drops_lock(REASON_PMGR_SYSTEM, s);
8643 	}
8644 
8645 	simple_unlock(&sched_available_cores_lock);
8646 	splx(s);
8647 
8648 	lck_mtx_unlock(&cluster_powerdown_lock);
8649 }
8650 
8651 /*
8652  * Release the assertion ensuring the cluster is powered up.
8653  * This operation is asynchronous, so PMGR doesn't need to wait until it takes
8654  * effect. If the enable comes in before it takes effect, it'll either
8655  * wait on the lock, or the async thread will discover it needs no update.
8656  */
8657 void
sched_disable_acc_rail(unsigned int die_id,unsigned int die_cluster_id)8658 sched_disable_acc_rail(unsigned int die_id, unsigned int die_cluster_id)
8659 {
8660 	uint64_t core_mask = die_and_cluster_to_cpu_mask(die_id, die_cluster_id);
8661 
8662 	spl_t s = splsched();
8663 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8664 
8665 	/* Can't disable something that is already disabled */
8666 	assert((pcs.pcs_required_online_pmgr & core_mask) == core_mask);
8667 
8668 	if (pcs.pcs_init_completed) {
8669 		assert3u(pcs.pcs_managed_cores & core_mask, ==, core_mask);
8670 	}
8671 
8672 	pcs.pcs_required_online_pmgr &= ~core_mask;
8673 
8674 	bool needs_update = sched_needs_update_requested_powered_cores();
8675 
8676 	if (needs_update) {
8677 		last_sched_update_powered_cores_continue_reason = REASON_PMGR_SYSTEM;
8678 	}
8679 
8680 	simple_unlock(&sched_available_cores_lock);
8681 	splx(s);
8682 
8683 	if (needs_update) {
8684 		sched_cond_signal(&sched_update_powered_cores_wakeup,
8685 		    sched_update_powered_cores_thread);
8686 	}
8687 }
8688 
8689 void
suspend_cluster_powerdown(void)8690 suspend_cluster_powerdown(void)
8691 {
8692 	lck_mtx_lock(&cluster_powerdown_lock);
8693 	suspend_cluster_powerdown_locked(false);
8694 	lck_mtx_unlock(&cluster_powerdown_lock);
8695 }
8696 
8697 void
resume_cluster_powerdown(void)8698 resume_cluster_powerdown(void)
8699 {
8700 	lck_mtx_lock(&cluster_powerdown_lock);
8701 	resume_cluster_powerdown_locked(false);
8702 	lck_mtx_unlock(&cluster_powerdown_lock);
8703 
8704 #if CONFIG_SCHED_SMT
8705 	if (sched_enable_smt == 0) {
8706 		enable_smt_processors(false);
8707 	}
8708 #endif /* CONFIG_SCHED_SMT */
8709 }
8710 
8711 
8712 LCK_MTX_DECLARE(user_cluster_powerdown_lock, &cluster_powerdown_grp);
8713 static bool user_suspended_cluster_powerdown = false;
8714 
8715 kern_return_t
suspend_cluster_powerdown_from_user(void)8716 suspend_cluster_powerdown_from_user(void)
8717 {
8718 	kern_return_t ret = KERN_FAILURE;
8719 
8720 	lck_mtx_lock(&user_cluster_powerdown_lock);
8721 
8722 	if (!user_suspended_cluster_powerdown) {
8723 		suspend_cluster_powerdown();
8724 		user_suspended_cluster_powerdown = true;
8725 		ret = KERN_SUCCESS;
8726 	}
8727 
8728 	lck_mtx_unlock(&user_cluster_powerdown_lock);
8729 
8730 	return ret;
8731 }
8732 
8733 kern_return_t
resume_cluster_powerdown_from_user(void)8734 resume_cluster_powerdown_from_user(void)
8735 {
8736 	kern_return_t ret = KERN_FAILURE;
8737 
8738 	lck_mtx_lock(&user_cluster_powerdown_lock);
8739 
8740 	if (user_suspended_cluster_powerdown) {
8741 		resume_cluster_powerdown();
8742 		user_suspended_cluster_powerdown = false;
8743 		ret = KERN_SUCCESS;
8744 	}
8745 
8746 	lck_mtx_unlock(&user_cluster_powerdown_lock);
8747 
8748 	return ret;
8749 }
8750 
8751 int
get_cluster_powerdown_user_suspended(void)8752 get_cluster_powerdown_user_suspended(void)
8753 {
8754 	lck_mtx_lock(&user_cluster_powerdown_lock);
8755 
8756 	int ret = (int)user_suspended_cluster_powerdown;
8757 
8758 	lck_mtx_unlock(&user_cluster_powerdown_lock);
8759 
8760 	return ret;
8761 }
8762 
8763 #if DEVELOPMENT || DEBUG
8764 /* Functions to support the temporary sysctl */
8765 static uint64_t saved_requested_powered_cores = ALL_CORES_POWERED;
8766 void
sched_set_powered_cores(int requested_powered_cores)8767 sched_set_powered_cores(int requested_powered_cores)
8768 {
8769 	processor_reason_t reason = bit_test(requested_powered_cores, 31) ? REASON_CLPC_USER : REASON_CLPC_SYSTEM;
8770 	sched_powered_cores_flags_t flags = requested_powered_cores & POWERED_CORES_OPTIONS_MASK;
8771 
8772 	saved_requested_powered_cores = requested_powered_cores;
8773 
8774 	requested_powered_cores = bits(requested_powered_cores, 28, 0);
8775 
8776 	sched_perfcontrol_update_powered_cores(requested_powered_cores, reason, flags);
8777 }
8778 int
sched_get_powered_cores(void)8779 sched_get_powered_cores(void)
8780 {
8781 	return (int)saved_requested_powered_cores;
8782 }
8783 
8784 uint64_t
sched_sysctl_get_recommended_cores(void)8785 sched_sysctl_get_recommended_cores(void)
8786 {
8787 	return pcs.pcs_recommended_cores;
8788 }
8789 #endif
8790 
8791 /*
8792  * Ensure that all cores are powered and recommended before sleep
8793  * Acquires cluster_powerdown_lock and returns with it held.
8794  */
8795 void
sched_override_available_cores_for_sleep(void)8796 sched_override_available_cores_for_sleep(void)
8797 {
8798 	if (!pcs.pcs_init_completed) {
8799 		panic("Attempting to sleep before all CPUS are registered");
8800 	}
8801 
8802 	lck_mtx_lock(&cluster_powerdown_lock);
8803 
8804 	spl_t s = splsched();
8805 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8806 
8807 	assert(pcs.pcs_sleep_override_recommended == false);
8808 
8809 	pcs.pcs_sleep_override_recommended = true;
8810 	sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
8811 
8812 	simple_unlock(&sched_available_cores_lock);
8813 	splx(s);
8814 
8815 	suspend_cluster_powerdown_locked(true);
8816 }
8817 
8818 /*
8819  * Restore the previously recommended cores, but leave all cores powered
8820  * after sleep.
8821  * Called with cluster_powerdown_lock still held, releases the lock.
8822  */
8823 void
sched_restore_available_cores_after_sleep(void)8824 sched_restore_available_cores_after_sleep(void)
8825 {
8826 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8827 
8828 	spl_t s = splsched();
8829 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8830 	assert(pcs.pcs_sleep_override_recommended == true);
8831 
8832 	pcs.pcs_sleep_override_recommended = false;
8833 	sched_update_recommended_cores_locked(REASON_NONE, 0);
8834 
8835 	simple_unlock(&sched_available_cores_lock);
8836 	splx(s);
8837 
8838 	resume_cluster_powerdown_locked(true);
8839 
8840 	lck_mtx_unlock(&cluster_powerdown_lock);
8841 
8842 #if CONFIG_SCHED_SMT
8843 	if (sched_enable_smt == 0) {
8844 		enable_smt_processors(false);
8845 	}
8846 #endif /* CONFIG_SCHED_SMT */
8847 }
8848 
8849 #if __arm__ || __arm64__
8850 
8851 uint64_t    perfcontrol_failsafe_maintenance_runnable_time;
8852 uint64_t    perfcontrol_failsafe_activation_time;
8853 uint64_t    perfcontrol_failsafe_deactivation_time;
8854 
8855 /* data covering who likely caused it and how long they ran */
8856 #define FAILSAFE_NAME_LEN       33 /* (2*MAXCOMLEN)+1 from size of p_name */
8857 char        perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
8858 int         perfcontrol_failsafe_pid;
8859 uint64_t    perfcontrol_failsafe_tid;
8860 uint64_t    perfcontrol_failsafe_thread_timer_at_start;
8861 uint64_t    perfcontrol_failsafe_thread_timer_last_seen;
8862 uint64_t    perfcontrol_failsafe_recommended_at_trigger;
8863 
8864 /*
8865  * Perf controller calls here to update the recommended core bitmask.
8866  * If the failsafe is active, we don't immediately apply the new value.
8867  * Instead, we store the new request and use it after the failsafe deactivates.
8868  *
8869  * If the failsafe is not active, immediately apply the update.
8870  *
8871  * No scheduler locks are held, no other locks are held that scheduler might depend on,
8872  * interrupts are enabled
8873  *
8874  * currently prototype is in osfmk/arm/machine_routines.h
8875  */
8876 void
sched_perfcontrol_update_recommended_cores_reason(uint64_t recommended_cores,processor_reason_t reason,__unused uint32_t flags)8877 sched_perfcontrol_update_recommended_cores_reason(
8878 	uint64_t                recommended_cores,
8879 	processor_reason_t      reason,
8880 	__unused uint32_t       flags)
8881 {
8882 	assert(preemption_enabled());
8883 
8884 	spl_t s = splsched();
8885 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8886 
8887 	if (reason == REASON_CLPC_SYSTEM) {
8888 		pcs.pcs_requested_recommended_clpc_system = recommended_cores;
8889 	} else {
8890 		assert(reason == REASON_CLPC_USER);
8891 		pcs.pcs_requested_recommended_clpc_user = recommended_cores;
8892 	}
8893 
8894 	pcs.pcs_requested_recommended_clpc = pcs.pcs_requested_recommended_clpc_system &
8895 	    pcs.pcs_requested_recommended_clpc_user;
8896 
8897 	sysctl_sched_recommended_cores = pcs.pcs_requested_recommended_clpc;
8898 
8899 	sched_update_recommended_cores_locked(reason, 0);
8900 
8901 	simple_unlock(&sched_available_cores_lock);
8902 	splx(s);
8903 }
8904 
8905 void
sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)8906 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
8907 {
8908 	sched_perfcontrol_update_recommended_cores_reason(recommended_cores, REASON_CLPC_USER, 0);
8909 }
8910 
8911 /*
8912  * Consider whether we need to activate the recommended cores failsafe
8913  *
8914  * Called from quantum timer interrupt context of a realtime thread
8915  * No scheduler locks are held, interrupts are disabled
8916  */
8917 void
sched_consider_recommended_cores(uint64_t ctime,thread_t cur_thread)8918 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
8919 {
8920 	/*
8921 	 * Check if a realtime thread is starving the system
8922 	 * and bringing up non-recommended cores would help
8923 	 *
8924 	 * TODO: Is this the correct check for recommended == possible cores?
8925 	 * TODO: Validate the checks without the relevant lock are OK.
8926 	 */
8927 
8928 	if (__improbable(pcs.pcs_recommended_clpc_failsafe_active)) {
8929 		/* keep track of how long the responsible thread runs */
8930 		uint64_t cur_th_time = recount_current_thread_time_mach();
8931 
8932 		simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8933 
8934 		if (pcs.pcs_recommended_clpc_failsafe_active &&
8935 		    cur_thread->thread_id == perfcontrol_failsafe_tid) {
8936 			perfcontrol_failsafe_thread_timer_last_seen = cur_th_time;
8937 		}
8938 
8939 		simple_unlock(&sched_available_cores_lock);
8940 
8941 		/* we're already trying to solve the problem, so bail */
8942 		return;
8943 	}
8944 
8945 	/* The failsafe won't help if there are no more processors to enable */
8946 	if (__probable(bit_count(pcs.pcs_requested_recommended_clpc) >= processor_count)) {
8947 		return;
8948 	}
8949 
8950 	uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
8951 
8952 	/* Use the maintenance thread as our canary in the coal mine */
8953 	thread_t m_thread = sched_maintenance_thread;
8954 
8955 	/* If it doesn't look bad, nothing to see here */
8956 	if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
8957 		return;
8958 	}
8959 
8960 	/* It looks bad, take the lock to be sure */
8961 	thread_lock(m_thread);
8962 
8963 	if (thread_get_runq(m_thread) == PROCESSOR_NULL ||
8964 	    (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
8965 	    m_thread->last_made_runnable_time >= too_long_ago) {
8966 		/*
8967 		 * Maintenance thread is either on cpu or blocked, and
8968 		 * therefore wouldn't benefit from more cores
8969 		 */
8970 		thread_unlock(m_thread);
8971 		return;
8972 	}
8973 
8974 	uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
8975 
8976 	thread_unlock(m_thread);
8977 
8978 	/*
8979 	 * There are cores disabled at perfcontrol's recommendation, but the
8980 	 * system is so overloaded that the maintenance thread can't run.
8981 	 * That likely means that perfcontrol can't run either, so it can't fix
8982 	 * the recommendation.  We have to kick in a failsafe to keep from starving.
8983 	 *
8984 	 * When the maintenance thread has been starved for too long,
8985 	 * ignore the recommendation from perfcontrol and light up all the cores.
8986 	 *
8987 	 * TODO: Consider weird states like boot, sleep, or debugger
8988 	 */
8989 
8990 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8991 
8992 	if (pcs.pcs_recommended_clpc_failsafe_active) {
8993 		simple_unlock(&sched_available_cores_lock);
8994 		return;
8995 	}
8996 
8997 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8998 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
8999 	    pcs.pcs_requested_recommended_clpc, maintenance_runnable_time, 0, 0, 0);
9000 
9001 	pcs.pcs_recommended_clpc_failsafe_active = true;
9002 	perfcontrol_failsafe_activation_time = mach_absolute_time();
9003 	perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
9004 	perfcontrol_failsafe_recommended_at_trigger = pcs.pcs_requested_recommended_clpc;
9005 
9006 	/* Capture some data about who screwed up (assuming that the thread on core is at fault) */
9007 	task_t task = get_threadtask(cur_thread);
9008 	perfcontrol_failsafe_pid = task_pid(task);
9009 	strlcpy(perfcontrol_failsafe_name, proc_name_address(get_bsdtask_info(task)), sizeof(perfcontrol_failsafe_name));
9010 
9011 	perfcontrol_failsafe_tid = cur_thread->thread_id;
9012 
9013 	/* Blame the thread for time it has run recently */
9014 	uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
9015 
9016 	uint64_t last_seen = recount_current_thread_time_mach();
9017 
9018 	/* Compute the start time of the bad behavior in terms of the thread's on core time */
9019 	perfcontrol_failsafe_thread_timer_at_start  = last_seen - recent_computation;
9020 	perfcontrol_failsafe_thread_timer_last_seen = last_seen;
9021 
9022 	/* Publish the pcs_recommended_clpc_failsafe_active override to the CPUs */
9023 	sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
9024 
9025 	simple_unlock(&sched_available_cores_lock);
9026 }
9027 
9028 /*
9029  * Now that our bacon has been saved by the failsafe, consider whether to turn it off
9030  *
9031  * Runs in the context of the maintenance thread, no locks held
9032  */
9033 static void
sched_recommended_cores_maintenance(void)9034 sched_recommended_cores_maintenance(void)
9035 {
9036 	/* Common case - no failsafe, nothing to be done here */
9037 	if (__probable(!pcs.pcs_recommended_clpc_failsafe_active)) {
9038 		return;
9039 	}
9040 
9041 	uint64_t ctime = mach_absolute_time();
9042 
9043 	boolean_t print_diagnostic = FALSE;
9044 	char p_name[FAILSAFE_NAME_LEN] = "";
9045 
9046 	spl_t s = splsched();
9047 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
9048 
9049 	/* Check again, under the lock, to avoid races */
9050 	if (!pcs.pcs_recommended_clpc_failsafe_active) {
9051 		goto out;
9052 	}
9053 
9054 	/*
9055 	 * Ensure that the other cores get another few ticks to run some threads
9056 	 * If we don't have this hysteresis, the maintenance thread is the first
9057 	 * to run, and then it immediately kills the other cores
9058 	 */
9059 	if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
9060 		goto out;
9061 	}
9062 
9063 	/* Capture some diagnostic state under the lock so we can print it out later */
9064 
9065 	int      pid = perfcontrol_failsafe_pid;
9066 	uint64_t tid = perfcontrol_failsafe_tid;
9067 
9068 	uint64_t thread_usage       = perfcontrol_failsafe_thread_timer_last_seen -
9069 	    perfcontrol_failsafe_thread_timer_at_start;
9070 	uint64_t rec_cores_before   = perfcontrol_failsafe_recommended_at_trigger;
9071 	uint64_t rec_cores_after    = pcs.pcs_requested_recommended_clpc;
9072 	uint64_t failsafe_duration  = ctime - perfcontrol_failsafe_activation_time;
9073 	strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
9074 
9075 	print_diagnostic = TRUE;
9076 
9077 	/* Deactivate the failsafe and reinstate the requested recommendation settings */
9078 
9079 	perfcontrol_failsafe_deactivation_time = ctime;
9080 	pcs.pcs_recommended_clpc_failsafe_active = false;
9081 
9082 	sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
9083 
9084 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9085 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
9086 	    pcs.pcs_requested_recommended_clpc, failsafe_duration, 0, 0, 0);
9087 
9088 out:
9089 	simple_unlock(&sched_available_cores_lock);
9090 	splx(s);
9091 
9092 	if (print_diagnostic) {
9093 		uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
9094 
9095 		absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
9096 		failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
9097 
9098 		absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
9099 		thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
9100 
9101 		printf("recommended core failsafe kicked in for %lld ms "
9102 		    "likely due to %s[%d] thread 0x%llx spending "
9103 		    "%lld ms on cpu at realtime priority - "
9104 		    "new recommendation: 0x%llx -> 0x%llx\n",
9105 		    failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
9106 		    rec_cores_before, rec_cores_after);
9107 	}
9108 }
9109 
9110 #endif /* __arm64__ */
9111 
9112 /*
9113  * This is true before we have jumped to kernel_bootstrap_thread
9114  * first thread context during boot, or while all processors
9115  * have offlined during system sleep and the scheduler is disabled.
9116  *
9117  * (Note: only ever true on ARM, Intel doesn't actually offline the last CPU)
9118  */
9119 bool
sched_all_cpus_offline(void)9120 sched_all_cpus_offline(void)
9121 {
9122 	return pcs.pcs_effective.pcs_online_cores == 0;
9123 }
9124 
9125 void
sched_assert_not_last_online_cpu(__assert_only int cpu_id)9126 sched_assert_not_last_online_cpu(__assert_only int cpu_id)
9127 {
9128 	assertf(pcs.pcs_effective.pcs_online_cores != BIT(cpu_id),
9129 	    "attempting to shut down the last online CPU!");
9130 }
9131 
9132 /*
9133  * This is the unified single function to change published active core counts based on processor mode.
9134  * Each type of flag affects the other in terms of how the counts change.
9135  *
9136  * Future: Add support for not decrementing counts in 'temporary derecommended online' mode
9137  * Future: Shutdown for system sleep should be 'temporary' according to the user counts
9138  * so that no client sees a transiently low number of CPUs.
9139  */
9140 void
sched_processor_change_mode_locked(processor_t processor,processor_mode_t pcm_mode,bool set)9141 sched_processor_change_mode_locked(processor_t processor, processor_mode_t pcm_mode, bool set)
9142 {
9143 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
9144 	pset_assert_locked(processor->processor_set);
9145 
9146 	switch (pcm_mode) {
9147 	case PCM_RECOMMENDED:
9148 		if (set) {
9149 			assert(!processor->is_recommended);
9150 			assert(!bit_test(pcs.pcs_recommended_cores, processor->cpu_id));
9151 
9152 			processor->is_recommended = true;
9153 			bit_set(pcs.pcs_recommended_cores, processor->cpu_id);
9154 
9155 			if (processor->processor_online) {
9156 				os_atomic_inc(&processor_avail_count_user, relaxed);
9157 #if CONFIG_SCHED_SMT
9158 				if (processor->processor_primary == processor) {
9159 					os_atomic_inc(&primary_processor_avail_count_user, relaxed);
9160 				}
9161 #endif /* CONFIG_SCHED_SMT */
9162 			}
9163 		} else {
9164 			assert(processor->is_recommended);
9165 			assert(bit_test(pcs.pcs_recommended_cores, processor->cpu_id));
9166 
9167 			processor->is_recommended = false;
9168 			bit_clear(pcs.pcs_recommended_cores, processor->cpu_id);
9169 
9170 			if (processor->processor_online) {
9171 				os_atomic_dec(&processor_avail_count_user, relaxed);
9172 #if CONFIG_SCHED_SMT
9173 				if (processor->processor_primary == processor) {
9174 					os_atomic_dec(&primary_processor_avail_count_user, relaxed);
9175 				}
9176 #endif /* CONFIG_SCHED_SMT */
9177 			}
9178 		}
9179 		break;
9180 	case PCM_TEMPORARY:
9181 		if (set) {
9182 			assert(!processor->shutdown_temporary);
9183 			assert(!bit_test(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id));
9184 
9185 			processor->shutdown_temporary = true;
9186 			bit_set(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id);
9187 
9188 			if (!processor->processor_online) {
9189 				goto counts_up;
9190 			}
9191 		} else {
9192 			assert(processor->shutdown_temporary);
9193 			assert(bit_test(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id));
9194 
9195 			processor->shutdown_temporary = false;
9196 			bit_clear(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id);
9197 
9198 			if (!processor->processor_online) {
9199 				goto counts_down;
9200 			}
9201 		}
9202 		break;
9203 	case PCM_ONLINE:
9204 		if (set) {
9205 			assert(!processor->processor_online);
9206 			assert(!bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
9207 			processor->processor_online = true;
9208 			bit_set(pcs.pcs_effective.pcs_online_cores, processor->cpu_id);
9209 
9210 			if (!processor->shutdown_temporary) {
9211 				goto counts_up;
9212 			}
9213 		} else {
9214 			assert(processor->processor_online);
9215 			assert(bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
9216 			processor->processor_online = false;
9217 			bit_clear(pcs.pcs_effective.pcs_online_cores, processor->cpu_id);
9218 
9219 			if (!processor->shutdown_temporary) {
9220 				goto counts_down;
9221 			}
9222 		}
9223 		break;
9224 	default:
9225 		panic("unknown mode %d", pcm_mode);
9226 	}
9227 
9228 	return;
9229 
9230 counts_up:
9231 	ml_cpu_up_update_counts(processor->cpu_id);
9232 
9233 	os_atomic_inc(&processor_avail_count, relaxed);
9234 
9235 	if (processor->is_recommended) {
9236 		os_atomic_inc(&processor_avail_count_user, relaxed);
9237 #if CONFIG_SCHED_SMT
9238 		if (processor->processor_primary == processor) {
9239 			os_atomic_inc(&primary_processor_avail_count_user, relaxed);
9240 		}
9241 #endif /* CONFIG_SCHED_SMT */
9242 	}
9243 	commpage_update_active_cpus();
9244 
9245 	return;
9246 
9247 counts_down:
9248 	ml_cpu_down_update_counts(processor->cpu_id);
9249 
9250 	os_atomic_dec(&processor_avail_count, relaxed);
9251 
9252 	if (processor->is_recommended) {
9253 		os_atomic_dec(&processor_avail_count_user, relaxed);
9254 #if CONFIG_SCHED_SMT
9255 		if (processor->processor_primary == processor) {
9256 			os_atomic_dec(&primary_processor_avail_count_user, relaxed);
9257 		}
9258 #endif /* CONFIG_SCHED_SMT */
9259 	}
9260 	commpage_update_active_cpus();
9261 
9262 	return;
9263 }
9264 
9265 bool
sched_mark_processor_online(processor_t processor,__assert_only processor_reason_t reason)9266 sched_mark_processor_online(processor_t processor, __assert_only processor_reason_t reason)
9267 {
9268 	assert(processor == current_processor());
9269 
9270 	processor_set_t pset = processor->processor_set;
9271 
9272 	spl_t s = splsched();
9273 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
9274 	pset_lock(pset);
9275 
9276 	/* Boot CPU coming online for the first time, either at boot or after sleep */
9277 	bool is_first_online_processor = sched_all_cpus_offline();
9278 	if (is_first_online_processor) {
9279 		assert(processor == master_processor);
9280 	}
9281 
9282 	assert((processor != master_processor) || (reason == REASON_SYSTEM) || support_bootcpu_shutdown);
9283 
9284 	sched_processor_change_mode_locked(processor, PCM_ONLINE, true);
9285 
9286 	assert(processor->processor_offline_state == PROCESSOR_OFFLINE_STARTING ||
9287 	    processor->processor_offline_state == PROCESSOR_OFFLINE_STARTED_NOT_RUNNING ||
9288 	    processor->processor_offline_state == PROCESSOR_OFFLINE_FINAL_SYSTEM_SLEEP);
9289 
9290 	processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_STARTED_NOT_WAITED);
9291 
9292 	++pset->online_processor_count;
9293 	pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
9294 
9295 	if (processor->is_recommended) {
9296 		SCHED(pset_made_schedulable)(processor, pset, false); /* May relock the pset lock */
9297 	}
9298 	pset_unlock(pset);
9299 
9300 	smr_cpu_up(processor, SMR_CPU_REASON_OFFLINE);
9301 
9302 	simple_unlock(&sched_available_cores_lock);
9303 	splx(s);
9304 
9305 	return is_first_online_processor;
9306 }
9307 
9308 void
sched_mark_processor_offline(processor_t processor,bool is_final_system_sleep)9309 sched_mark_processor_offline(processor_t processor, bool is_final_system_sleep)
9310 {
9311 	assert(processor == current_processor());
9312 
9313 	processor_set_t pset = processor->processor_set;
9314 
9315 	spl_t s = splsched();
9316 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
9317 
9318 	assert(bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
9319 	assert(processor->processor_offline_state == PROCESSOR_OFFLINE_BEGIN_SHUTDOWN);
9320 
9321 	if (!is_final_system_sleep) {
9322 		/*
9323 		 * We can't shut down the last available core!
9324 		 * Force recommend another CPU if this is the last one.
9325 		 */
9326 
9327 		if ((pcs.pcs_effective.pcs_online_cores & pcs.pcs_recommended_cores) == BIT(processor->cpu_id)) {
9328 			sched_update_recommended_cores_locked(REASON_SYSTEM, BIT(processor->cpu_id));
9329 		}
9330 
9331 		/* If we're still the last one, something went wrong. */
9332 		if ((pcs.pcs_effective.pcs_online_cores & pcs.pcs_recommended_cores) == BIT(processor->cpu_id)) {
9333 			panic("shutting down the last available core! online: 0x%llx rec: 0x%llxx",
9334 			    pcs.pcs_effective.pcs_online_cores,
9335 			    pcs.pcs_recommended_cores);
9336 		}
9337 	}
9338 
9339 	pset_lock(pset);
9340 	assert(processor->state == PROCESSOR_RUNNING);
9341 	assert(processor->processor_inshutdown);
9342 	pset_update_processor_state(pset, processor, PROCESSOR_PENDING_OFFLINE);
9343 	--pset->online_processor_count;
9344 
9345 	sched_processor_change_mode_locked(processor, PCM_ONLINE, false);
9346 
9347 	if (is_final_system_sleep) {
9348 		assert3u(pcs.pcs_effective.pcs_online_cores, ==, 0);
9349 		assert(processor == master_processor);
9350 		assert(sched_all_cpus_offline());
9351 
9352 		processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_FINAL_SYSTEM_SLEEP);
9353 	} else {
9354 		processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_PENDING_OFFLINE);
9355 	}
9356 
9357 	simple_unlock(&sched_available_cores_lock);
9358 
9359 	SCHED(processor_queue_shutdown)(processor);
9360 	/* pset lock dropped */
9361 	SCHED(rt_queue_shutdown)(processor);
9362 
9363 	splx(s);
9364 }
9365 
9366 /*
9367  * Apply a new recommended cores mask to the processors it affects
9368  * Runs after considering failsafes and such
9369  *
9370  * Iterate over processors and update their ->is_recommended field.
9371  * If a processor is running, we let it drain out at its next
9372  * quantum expiration or blocking point. If a processor is idle, there
9373  * may be more work for it to do, so IPI it.
9374  *
9375  * interrupts disabled, sched_available_cores_lock is held
9376  *
9377  * If a core is about to go offline, its bit will be set in core_going_offline,
9378  * so we can make sure not to pick it as the last resort cpu.
9379  */
9380 static void
sched_update_recommended_cores_locked(processor_reason_t reason,cpumap_t core_going_offline)9381 sched_update_recommended_cores_locked(processor_reason_t reason,
9382     cpumap_t core_going_offline)
9383 {
9384 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
9385 
9386 	cpumap_t recommended_cores = pcs.pcs_requested_recommended_clpc;
9387 
9388 	if (pcs.pcs_init_completed) {
9389 		recommended_cores &= pcs.pcs_effective.pcs_powerdown_recommended_cores;
9390 	}
9391 
9392 	if (pcs.pcs_sleep_override_recommended || pcs.pcs_recommended_clpc_failsafe_active) {
9393 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
9394 		    MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
9395 		    recommended_cores,
9396 		    sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
9397 
9398 		recommended_cores = pcs.pcs_managed_cores;
9399 	}
9400 
9401 	if (bit_count(recommended_cores & pcs.pcs_effective.pcs_online_cores & ~core_going_offline) == 0) {
9402 		/*
9403 		 * If there are no online cpus recommended,
9404 		 * then the system will make no forward progress.
9405 		 * Pick a CPU of last resort to avoid hanging.
9406 		 */
9407 		int last_resort;
9408 
9409 		if (!support_bootcpu_shutdown) {
9410 			/* We know the master_processor is always available */
9411 			last_resort = master_processor->cpu_id;
9412 		} else {
9413 			/* Pick some still-online processor to be the processor of last resort */
9414 			last_resort = lsb_first(pcs.pcs_effective.pcs_online_cores & ~core_going_offline);
9415 
9416 			if (last_resort == -1) {
9417 				panic("%s> no last resort cpu found: 0x%llx 0x%llx",
9418 				    __func__, pcs.pcs_effective.pcs_online_cores, core_going_offline);
9419 			}
9420 		}
9421 
9422 		bit_set(recommended_cores, last_resort);
9423 	}
9424 
9425 	if (pcs.pcs_recommended_cores == recommended_cores) {
9426 		/* Nothing to do */
9427 		return;
9428 	}
9429 
9430 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) |
9431 	    DBG_FUNC_START,
9432 	    recommended_cores,
9433 	    pcs.pcs_recommended_clpc_failsafe_active, pcs.pcs_sleep_override_recommended, 0);
9434 
9435 	cpumap_t needs_exit_idle_mask = 0x0;
9436 
9437 	/* First set recommended cores */
9438 	foreach_node(node) {
9439 		foreach_pset_id(pset_id, node) {
9440 			processor_set_t pset = pset_array[pset_id];
9441 
9442 			cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
9443 			cpumap_t newly_recommended = changed_recommendations & recommended_cores;
9444 
9445 			if (newly_recommended == 0) {
9446 				/* Nothing to do */
9447 				continue;
9448 			}
9449 
9450 			pset_lock(pset);
9451 
9452 			cpumap_foreach(cpu_id, newly_recommended) {
9453 				processor_t processor = processor_array[cpu_id];
9454 
9455 				sched_processor_change_mode_locked(processor, PCM_RECOMMENDED, true);
9456 
9457 				processor->last_recommend_reason = reason;
9458 
9459 				if (pset->recommended_bitmask == 0) {
9460 					/* Cluster is becoming available for scheduling */
9461 					atomic_bit_set(&pset->node->pset_recommended_map, pset->pset_id, memory_order_relaxed);
9462 				}
9463 				bit_set(pset->recommended_bitmask, processor->cpu_id);
9464 
9465 				if (processor->state == PROCESSOR_IDLE) {
9466 					if (processor != current_processor()) {
9467 						bit_set(needs_exit_idle_mask, processor->cpu_id);
9468 					}
9469 				}
9470 
9471 				if (processor->processor_online) {
9472 					SCHED(pset_made_schedulable)(processor, pset, false); /* May relock the pset lock */
9473 				}
9474 			}
9475 			pset_update_rt_stealable_state(pset);
9476 
9477 			pset_unlock(pset);
9478 
9479 			cpumap_foreach(cpu_id, newly_recommended) {
9480 				smr_cpu_up(processor_array[cpu_id],
9481 				    SMR_CPU_REASON_IGNORED);
9482 			}
9483 		}
9484 	}
9485 
9486 	/* Now shutdown not recommended cores */
9487 	foreach_node(node) {
9488 		foreach_pset_id(pset_id, node) {
9489 			processor_set_t pset = pset_array[pset_id];
9490 
9491 			cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
9492 			cpumap_t newly_unrecommended = changed_recommendations & ~recommended_cores;
9493 
9494 			if (newly_unrecommended == 0) {
9495 				/* Nothing to do */
9496 				continue;
9497 			}
9498 
9499 			cpumap_foreach(cpu_id, newly_unrecommended) {
9500 				processor_t processor = processor_array[cpu_id];
9501 				sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
9502 
9503 				pset_lock(pset);
9504 
9505 				sched_processor_change_mode_locked(processor, PCM_RECOMMENDED, false);
9506 
9507 				if (reason != REASON_NONE) {
9508 					processor->last_derecommend_reason = reason;
9509 				}
9510 				bit_clear(pset->recommended_bitmask, processor->cpu_id);
9511 				pset_update_rt_stealable_state(pset);
9512 				if (pset->recommended_bitmask == 0) {
9513 					/* Cluster is becoming unavailable for scheduling */
9514 					atomic_bit_clear(&pset->node->pset_recommended_map, pset->pset_id, memory_order_relaxed);
9515 				}
9516 
9517 				if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
9518 					ipi_type = SCHED_IPI_IMMEDIATE;
9519 				}
9520 				SCHED(processor_queue_shutdown)(processor);
9521 				/* pset unlocked */
9522 
9523 				SCHED(rt_queue_shutdown)(processor);
9524 
9525 				if (ipi_type == SCHED_IPI_NONE) {
9526 					/*
9527 					 * If the core is idle,
9528 					 * we can directly mark the processor
9529 					 * as "Ignored"
9530 					 *
9531 					 * Otherwise, smr will detect this
9532 					 * during smr_cpu_leave() when the
9533 					 * processor actually idles.
9534 					 */
9535 					smr_cpu_down(processor, SMR_CPU_REASON_IGNORED);
9536 				} else if (processor == current_processor()) {
9537 					ast_on(AST_PREEMPT);
9538 				} else {
9539 					sched_ipi_perform(processor, ipi_type);
9540 				}
9541 			}
9542 		}
9543 	}
9544 
9545 	if (pcs.pcs_init_completed) {
9546 		assert3u(pcs.pcs_recommended_cores, ==, recommended_cores);
9547 	}
9548 
9549 #if defined(__x86_64__)
9550 	commpage_update_active_cpus();
9551 #endif
9552 	/* Issue all pending IPIs now that the pset lock has been dropped */
9553 	cpumap_foreach(cpu_id, needs_exit_idle_mask) {
9554 		processor_t processor = processor_array[cpu_id];
9555 		machine_signal_idle(processor);
9556 	}
9557 
9558 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
9559 	    needs_exit_idle_mask, 0, 0, 0);
9560 }
9561 
9562 /*
9563  * Enters with the available cores lock held, returns with it held, but will drop it in the meantime.
9564  * Enters with the cluster_powerdown_lock held, returns with it held, keeps it held.
9565  */
9566 static void
sched_update_powered_cores_drops_lock(processor_reason_t requested_reason,spl_t caller_s)9567 sched_update_powered_cores_drops_lock(processor_reason_t requested_reason, spl_t caller_s)
9568 {
9569 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
9570 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
9571 
9572 	assert(ml_get_interrupts_enabled() == false);
9573 	assert(caller_s == true); /* Caller must have had interrupts enabled when they took the lock */
9574 
9575 	/* All transitions should be quiesced before we start changing things */
9576 	assert_no_processors_in_transition_locked();
9577 
9578 	pcs.pcs_in_flight_reason = requested_reason;
9579 
9580 	struct powered_cores_state requested = sched_compute_requested_powered_cores();
9581 	struct powered_cores_state effective = pcs.pcs_effective;
9582 
9583 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_START,
9584 	    requested.pcs_online_cores, requested_reason, 0, effective.pcs_online_cores);
9585 
9586 	/* The bits that are different and in the new value */
9587 	cpumap_t newly_online_cores = (requested.pcs_online_cores ^
9588 	    effective.pcs_online_cores) & requested.pcs_online_cores;
9589 
9590 	/* The bits that are different and are not in the new value */
9591 	cpumap_t newly_offline_cores = (requested.pcs_online_cores ^
9592 	    effective.pcs_online_cores) & ~requested.pcs_online_cores;
9593 
9594 	cpumap_t newly_recommended_cores = (requested.pcs_powerdown_recommended_cores ^
9595 	    effective.pcs_powerdown_recommended_cores) & requested.pcs_powerdown_recommended_cores;
9596 
9597 	cpumap_t newly_derecommended_cores = (requested.pcs_powerdown_recommended_cores ^
9598 	    effective.pcs_powerdown_recommended_cores) & ~requested.pcs_powerdown_recommended_cores;
9599 
9600 	cpumap_t newly_temporary_cores = (requested.pcs_tempdown_cores ^
9601 	    effective.pcs_tempdown_cores) & requested.pcs_tempdown_cores;
9602 
9603 	cpumap_t newly_nontemporary_cores = (requested.pcs_tempdown_cores ^
9604 	    effective.pcs_tempdown_cores) & ~requested.pcs_tempdown_cores;
9605 
9606 	/*
9607 	 * Newly online and derecommended cores should be derecommended
9608 	 * before powering them up, so they never run around doing stuff
9609 	 * before we reach the end of this function.
9610 	 */
9611 
9612 	cpumap_t newly_online_and_derecommended = newly_online_cores & newly_derecommended_cores;
9613 
9614 	/*
9615 	 * Publish the goal state we're working on achieving.
9616 	 * At the end of this function, pcs_effective will match this.
9617 	 */
9618 	pcs.pcs_requested = requested;
9619 
9620 	pcs.pcs_effective.pcs_powerdown_recommended_cores |= newly_recommended_cores;
9621 	pcs.pcs_effective.pcs_powerdown_recommended_cores &= ~newly_online_and_derecommended;
9622 
9623 	sched_update_recommended_cores_locked(requested_reason, 0);
9624 
9625 	simple_unlock(&sched_available_cores_lock);
9626 	splx(caller_s);
9627 
9628 	assert(ml_get_interrupts_enabled() == true);
9629 
9630 	/* First set powered cores */
9631 	cpumap_t started_cores = 0ull;
9632 	foreach_node(node) {
9633 		foreach_pset_id(pset_id, node) {
9634 			processor_set_t pset = pset_array[pset_id];
9635 
9636 			spl_t s = splsched();
9637 			pset_lock(pset);
9638 			cpumap_t pset_newly_online = newly_online_cores & pset->cpu_bitmask;
9639 
9640 			__assert_only cpumap_t pset_online_cores =
9641 			    pset->cpu_state_map[PROCESSOR_START] |
9642 			    pset->cpu_state_map[PROCESSOR_IDLE] |
9643 			    pset->cpu_state_map[PROCESSOR_DISPATCHING] |
9644 			    pset->cpu_state_map[PROCESSOR_RUNNING];
9645 			assert((pset_online_cores & pset_newly_online) == 0);
9646 
9647 			pset_unlock(pset);
9648 			splx(s);
9649 
9650 			if (pset_newly_online == 0) {
9651 				/* Nothing to do */
9652 				continue;
9653 			}
9654 			cpumap_foreach(cpu_id, pset_newly_online) {
9655 				processor_start_reason(processor_array[cpu_id], requested_reason);
9656 				bit_set(started_cores, cpu_id);
9657 			}
9658 		}
9659 	}
9660 
9661 	/*
9662 	 * Wait for processors to finish starting in parallel.
9663 	 * We never proceed until all newly started processors have finished.
9664 	 *
9665 	 * This has the side effect of closing the ml_cpu_up_processors race,
9666 	 * as all started CPUs must have SIGPdisabled cleared by the time this
9667 	 * is satisfied. (rdar://124631843)
9668 	 */
9669 	cpumap_foreach(cpu_id, started_cores) {
9670 		processor_wait_for_start(processor_array[cpu_id], PROCESSOR_POWERED_CORES_CHANGE);
9671 	}
9672 
9673 	/*
9674 	 * Update published counts of processors to match new temporary status
9675 	 * Publish all temporary before nontemporary, so that any readers that
9676 	 * see a middle state will see a slightly too high count instead of
9677 	 * ending up seeing a 0 (because that crashes dispatch_apply, ask
9678 	 * me how I know)
9679 	 */
9680 
9681 	spl_t s;
9682 	s = splsched();
9683 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
9684 
9685 	foreach_node(node) {
9686 		foreach_pset_id(pset_id, node) {
9687 			processor_set_t pset = pset_array[pset_id];
9688 
9689 			pset_lock(pset);
9690 
9691 			cpumap_t pset_newly_temporary = newly_temporary_cores & pset->cpu_bitmask;
9692 
9693 			cpumap_foreach(cpu_id, pset_newly_temporary) {
9694 				sched_processor_change_mode_locked(processor_array[cpu_id],
9695 				    PCM_TEMPORARY, true);
9696 			}
9697 
9698 			pset_unlock(pset);
9699 		}
9700 	}
9701 
9702 	foreach_node(node) {
9703 		foreach_pset_id(pset_id, node) {
9704 			processor_set_t pset = pset_array[pset_id];
9705 
9706 			pset_lock(pset);
9707 
9708 			cpumap_t pset_newly_nontemporary = newly_nontemporary_cores & pset->cpu_bitmask;
9709 
9710 			cpumap_foreach(cpu_id, pset_newly_nontemporary) {
9711 				sched_processor_change_mode_locked(processor_array[cpu_id],
9712 				    PCM_TEMPORARY, false);
9713 			}
9714 
9715 			pset_unlock(pset);
9716 		}
9717 	}
9718 
9719 	simple_unlock(&sched_available_cores_lock);
9720 	splx(s);
9721 
9722 	/* Now shutdown not powered cores */
9723 	foreach_node(node) {
9724 		foreach_pset_id(pset_id, node) {
9725 			processor_set_t pset = pset_array[pset_id];
9726 
9727 			s = splsched();
9728 			pset_lock(pset);
9729 
9730 			cpumap_t pset_newly_offline = newly_offline_cores & pset->cpu_bitmask;
9731 			__assert_only cpumap_t pset_powered_cores =
9732 			    pset->cpu_state_map[PROCESSOR_START] |
9733 			    pset->cpu_state_map[PROCESSOR_IDLE] |
9734 			    pset->cpu_state_map[PROCESSOR_DISPATCHING] |
9735 			    pset->cpu_state_map[PROCESSOR_RUNNING];
9736 			assert((pset_powered_cores & pset_newly_offline) == pset_newly_offline);
9737 
9738 			pset_unlock(pset);
9739 			splx(s);
9740 
9741 			if (pset_newly_offline == 0) {
9742 				/* Nothing to do */
9743 				continue;
9744 			}
9745 
9746 			cpumap_foreach(cpu_id, pset_newly_offline) {
9747 				processor_exit_reason(processor_array[cpu_id], requested_reason, false);
9748 			}
9749 		}
9750 	}
9751 
9752 	assert(ml_get_interrupts_enabled() == true);
9753 
9754 	s = splsched();
9755 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
9756 
9757 	assert(s == caller_s);
9758 
9759 	pcs.pcs_effective.pcs_powerdown_recommended_cores &= ~newly_derecommended_cores;
9760 
9761 	sched_update_recommended_cores_locked(requested_reason, 0);
9762 
9763 	pcs.pcs_previous_reason = requested_reason;
9764 
9765 	/* All transitions should be quiesced now that we are done changing things */
9766 	assert_no_processors_in_transition_locked();
9767 
9768 	assert3u(pcs.pcs_requested.pcs_online_cores, ==, pcs.pcs_effective.pcs_online_cores);
9769 	assert3u(pcs.pcs_requested.pcs_tempdown_cores, ==, pcs.pcs_effective.pcs_tempdown_cores);
9770 	assert3u(pcs.pcs_requested.pcs_powerdown_recommended_cores, ==, pcs.pcs_effective.pcs_powerdown_recommended_cores);
9771 
9772 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_END, 0, 0, 0, 0);
9773 }
9774 
9775 void
thread_set_options(uint32_t thopt)9776 thread_set_options(uint32_t thopt)
9777 {
9778 	spl_t x;
9779 	thread_t t = current_thread();
9780 
9781 	x = splsched();
9782 	thread_lock(t);
9783 
9784 	t->options |= thopt;
9785 
9786 	thread_unlock(t);
9787 	splx(x);
9788 }
9789 
9790 void
thread_set_pending_block_hint(thread_t thread,block_hint_t block_hint)9791 thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
9792 {
9793 	thread->pending_block_hint = block_hint;
9794 }
9795 
9796 uint32_t
qos_max_parallelism(int qos,uint64_t options)9797 qos_max_parallelism(int qos, uint64_t options)
9798 {
9799 	return SCHED(qos_max_parallelism)(qos, options);
9800 }
9801 
9802 uint32_t
sched_qos_max_parallelism(__unused int qos,uint64_t options)9803 sched_qos_max_parallelism(__unused int qos, uint64_t options)
9804 {
9805 	host_basic_info_data_t hinfo;
9806 	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
9807 
9808 
9809 	/*
9810 	 * The QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE should be used on AMP platforms only which
9811 	 * implement their own qos_max_parallelism() interfaces.
9812 	 */
9813 	assert((options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) == 0);
9814 
9815 	/* Query the machine layer for core information */
9816 	__assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
9817 	    (host_info_t)&hinfo, &count);
9818 	assert(kret == KERN_SUCCESS);
9819 
9820 	if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
9821 		return hinfo.logical_cpu;
9822 	} else {
9823 		return hinfo.physical_cpu;
9824 	}
9825 }
9826 
9827 int sched_allow_NO_SMT_threads = 1;
9828 #if CONFIG_SCHED_SMT
9829 bool
thread_no_smt(thread_t thread)9830 thread_no_smt(thread_t thread)
9831 {
9832 	return sched_allow_NO_SMT_threads &&
9833 	       (thread->bound_processor == PROCESSOR_NULL) &&
9834 	       ((thread->sched_flags & TH_SFLAG_NO_SMT) || (get_threadtask(thread)->t_flags & TF_NO_SMT));
9835 }
9836 
9837 bool
processor_active_thread_no_smt(processor_t processor)9838 processor_active_thread_no_smt(processor_t processor)
9839 {
9840 	return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
9841 }
9842 #endif /* CONFIG_SCHED_SMT */
9843 
9844 #if __arm64__
9845 
9846 /*
9847  * Set up or replace old timer with new timer
9848  *
9849  * Returns true if canceled old timer, false if it did not
9850  */
9851 boolean_t
sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)9852 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
9853 {
9854 	/*
9855 	 * Exchange deadline for new deadline, if old deadline was nonzero,
9856 	 * then I cancelled the callback, otherwise I didn't
9857 	 */
9858 
9859 	return os_atomic_xchg(&sched_perfcontrol_callback_deadline, new_deadline,
9860 	           relaxed) != 0;
9861 }
9862 
9863 /*
9864  * Set global SFI window (in usec)
9865  */
9866 kern_return_t
sched_perfcontrol_sfi_set_window(uint64_t window_usecs)9867 sched_perfcontrol_sfi_set_window(uint64_t window_usecs)
9868 {
9869 	kern_return_t ret = KERN_NOT_SUPPORTED;
9870 #if CONFIG_THREAD_GROUPS
9871 	if (window_usecs == 0ULL) {
9872 		ret = sfi_window_cancel();
9873 	} else {
9874 		ret = sfi_set_window(window_usecs);
9875 	}
9876 #endif // CONFIG_THREAD_GROUPS
9877 	return ret;
9878 }
9879 
9880 /*
9881  * Set background and maintenance SFI class offtimes
9882  */
9883 kern_return_t
sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)9884 sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)
9885 {
9886 	kern_return_t ret = KERN_NOT_SUPPORTED;
9887 #if CONFIG_THREAD_GROUPS
9888 	if (offtime_usecs == 0ULL) {
9889 		ret = sfi_class_offtime_cancel(SFI_CLASS_MAINTENANCE);
9890 		ret |= sfi_class_offtime_cancel(SFI_CLASS_DARWIN_BG);
9891 	} else {
9892 		ret = sfi_set_class_offtime(SFI_CLASS_MAINTENANCE, offtime_usecs);
9893 		ret |= sfi_set_class_offtime(SFI_CLASS_DARWIN_BG, offtime_usecs);
9894 	}
9895 #endif // CONFIG_THREAD_GROUPS
9896 	return ret;
9897 }
9898 
9899 /*
9900  * Set utility SFI class offtime
9901  */
9902 kern_return_t
sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)9903 sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)
9904 {
9905 	kern_return_t ret = KERN_NOT_SUPPORTED;
9906 #if CONFIG_THREAD_GROUPS
9907 	if (offtime_usecs == 0ULL) {
9908 		ret = sfi_class_offtime_cancel(SFI_CLASS_UTILITY);
9909 	} else {
9910 		ret = sfi_set_class_offtime(SFI_CLASS_UTILITY, offtime_usecs);
9911 	}
9912 #endif // CONFIG_THREAD_GROUPS
9913 	return ret;
9914 }
9915 
9916 #endif /* __arm64__ */
9917 
9918 #if CONFIG_SCHED_EDGE
9919 
9920 #define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u
9921 
9922 /*
9923  * sched_edge_pset_running_higher_bucket()
9924  *
9925  * Routine to calculate cumulative running counts for each scheduling
9926  * bucket. This effectively lets the load calculation calculate if a
9927  * cluster is running any threads at a QoS lower than the thread being
9928  * migrated etc.
9929  */
9930 static void
sched_edge_pset_running_higher_bucket(processor_set_t pset,uint32_t * running_higher)9931 sched_edge_pset_running_higher_bucket(processor_set_t pset, uint32_t *running_higher)
9932 {
9933 	bitmap_t *active_map = &pset->cpu_state_map[PROCESSOR_RUNNING];
9934 	bzero(running_higher, sizeof(uint32_t) * TH_BUCKET_SCHED_MAX);
9935 
9936 	/* Count the running threads per bucket */
9937 	for (int cpu = bitmap_first(active_map, MAX_CPUS); cpu >= 0; cpu = bitmap_next(active_map, cpu)) {
9938 		sched_bucket_t cpu_bucket = os_atomic_load(&pset->cpu_running_buckets[cpu], relaxed);
9939 		/* Don't count idle threads */
9940 		if (cpu_bucket < TH_BUCKET_SCHED_MAX) {
9941 			running_higher[cpu_bucket]++;
9942 		}
9943 	}
9944 
9945 	/* Calculate the cumulative running counts as a prefix sum */
9946 	for (sched_bucket_t bucket = TH_BUCKET_FIXPRI; bucket < TH_BUCKET_SCHED_MAX - 1; bucket++) {
9947 		running_higher[bucket + 1] += running_higher[bucket];
9948 	}
9949 }
9950 
9951 /*
9952  * sched_update_pset_load_average()
9953  *
9954  * Updates the load average for each sched bucket for a cluster.
9955  * This routine must be called with the pset lock held.
9956  */
9957 void
sched_update_pset_load_average(processor_set_t pset,uint64_t curtime)9958 sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
9959 {
9960 	int avail_cpu_count = pset_available_cpu_count(pset);
9961 	if (avail_cpu_count == 0) {
9962 		/* Looks like the pset is not runnable any more; nothing to do here */
9963 		return;
9964 	}
9965 
9966 	/*
9967 	 * Edge Scheduler Optimization
9968 	 *
9969 	 * See if more callers of this routine can pass in timestamps to avoid the
9970 	 * mach_absolute_time() call here.
9971 	 */
9972 
9973 	if (!curtime) {
9974 		curtime = mach_absolute_time();
9975 	}
9976 	uint64_t last_update = os_atomic_load(&pset->pset_load_last_update, relaxed);
9977 	int64_t delta_ticks = curtime - last_update;
9978 	if (delta_ticks < 0) {
9979 		return;
9980 	}
9981 
9982 	uint64_t delta_nsecs = 0;
9983 	absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
9984 
9985 	if (__improbable(delta_nsecs > UINT32_MAX)) {
9986 		delta_nsecs = UINT32_MAX;
9987 	}
9988 
9989 	/* Update the shared resource load on the pset */
9990 	for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
9991 		uint64_t shared_rsrc_runnable_load = sched_edge_shared_rsrc_runnable_load(&pset->pset_clutch_root, shared_rsrc_type);
9992 		uint64_t shared_rsrc_running_load = bit_count(pset->cpu_running_cluster_shared_rsrc_thread[shared_rsrc_type]);
9993 		uint64_t new_shared_load = shared_rsrc_runnable_load + shared_rsrc_running_load;
9994 		uint64_t old_shared_load = os_atomic_xchg(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], new_shared_load, relaxed);
9995 		if (old_shared_load != new_shared_load) {
9996 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_CLUSTER_SHARED_LOAD) | DBG_FUNC_NONE, pset->pset_cluster_id, shared_rsrc_type, new_shared_load, shared_rsrc_running_load);
9997 		}
9998 	}
9999 
10000 	uint32_t running_higher[TH_BUCKET_SCHED_MAX];
10001 	sched_edge_pset_running_higher_bucket(pset, running_higher);
10002 
10003 	for (sched_bucket_t sched_bucket = TH_BUCKET_FIXPRI; sched_bucket < TH_BUCKET_SCHED_MAX; sched_bucket++) {
10004 		uint64_t old_load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
10005 		uint64_t old_load_average_factor = old_load_average * SCHED_PSET_LOAD_EWMA_TC_NSECS;
10006 		uint32_t current_runq_depth = sched_edge_cluster_cumulative_count(&pset->pset_clutch_root, sched_bucket) +  rt_runq_count(pset) + running_higher[sched_bucket];
10007 		os_atomic_store(&pset->pset_runnable_depth[sched_bucket], current_runq_depth, relaxed);
10008 
10009 		uint32_t current_load = current_runq_depth / avail_cpu_count;
10010 		/*
10011 		 * For the new load average multiply current_load by delta_nsecs (which results in a 32.0 value).
10012 		 * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the
10013 		 * new load average needs to be shifted before it can be added to the old load average.
10014 		 */
10015 		uint64_t new_load_average_factor = (current_load * delta_nsecs) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS;
10016 
10017 		/*
10018 		 * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero
10019 		 * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA
10020 		 * when the system is already loaded; otherwise for an idle system use the latest load average immediately.
10021 		 */
10022 		int old_load_shifted = (int)((old_load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
10023 		boolean_t load_uptick = (old_load_shifted == 0) && (current_load != 0);
10024 		boolean_t load_downtick = (old_load_shifted != 0) && (current_load == 0);
10025 		uint64_t load_average;
10026 		if (load_uptick || load_downtick) {
10027 			load_average = (current_load << SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
10028 		} else {
10029 			/* Indicates a loaded system; use EWMA for load average calculation */
10030 			load_average = (old_load_average_factor + new_load_average_factor) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
10031 		}
10032 		os_atomic_store(&pset->pset_load_average[sched_bucket], load_average, relaxed);
10033 		if (load_average != old_load_average) {
10034 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_LOAD_AVG) | DBG_FUNC_NONE, pset->pset_cluster_id, (load_average >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS), load_average & SCHED_PSET_LOAD_EWMA_FRACTION_MASK, sched_bucket);
10035 		}
10036 	}
10037 	os_atomic_store(&pset->pset_load_last_update, curtime, relaxed);
10038 }
10039 
10040 void
sched_update_pset_avg_execution_time(processor_set_t pset,uint64_t execution_time,uint64_t curtime,sched_bucket_t sched_bucket)10041 sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t execution_time, uint64_t curtime, sched_bucket_t sched_bucket)
10042 {
10043 	pset_execution_time_t old_execution_time_packed, new_execution_time_packed;
10044 	uint64_t avg_thread_execution_time = 0;
10045 
10046 	os_atomic_rmw_loop(&pset->pset_execution_time[sched_bucket].pset_execution_time_packed,
10047 	    old_execution_time_packed.pset_execution_time_packed,
10048 	    new_execution_time_packed.pset_execution_time_packed, relaxed, {
10049 		uint64_t last_update = old_execution_time_packed.pset_execution_time_last_update;
10050 		int64_t delta_ticks = curtime - last_update;
10051 		if (delta_ticks <= 0) {
10052 		        /*
10053 		         * Its possible that another CPU came in and updated the pset_execution_time
10054 		         * before this CPU could do it. Since the average execution time is meant to
10055 		         * be an approximate measure per cluster, ignore the older update.
10056 		         */
10057 		        os_atomic_rmw_loop_give_up(return );
10058 		}
10059 		uint64_t delta_nsecs = 0;
10060 		absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
10061 
10062 		uint64_t nanotime = 0;
10063 		absolutetime_to_nanoseconds(execution_time, &nanotime);
10064 		uint64_t execution_time_us = nanotime / NSEC_PER_USEC;
10065 
10066 		/*
10067 		 * Since the average execution time is stored in microseconds, avoid rounding errors in
10068 		 * the EWMA calculation by only using a non-zero previous value.
10069 		 */
10070 		uint64_t old_avg_thread_execution_time = MAX(old_execution_time_packed.pset_avg_thread_execution_time, 1ULL);
10071 
10072 		uint64_t old_execution_time = (old_avg_thread_execution_time * SCHED_PSET_LOAD_EWMA_TC_NSECS);
10073 		uint64_t new_execution_time = (execution_time_us * delta_nsecs);
10074 
10075 		avg_thread_execution_time = (old_execution_time + new_execution_time) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
10076 		new_execution_time_packed.pset_avg_thread_execution_time = avg_thread_execution_time;
10077 		new_execution_time_packed.pset_execution_time_last_update = curtime;
10078 	});
10079 	if (new_execution_time_packed.pset_avg_thread_execution_time != old_execution_time_packed.pset_execution_time_packed) {
10080 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_AVG_EXEC_TIME) | DBG_FUNC_NONE, pset->pset_cluster_id, avg_thread_execution_time, sched_bucket);
10081 	}
10082 }
10083 
10084 uint64_t
sched_pset_cluster_shared_rsrc_load(processor_set_t pset,cluster_shared_rsrc_type_t shared_rsrc_type)10085 sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type)
10086 {
10087 	/* Prevent migrations to derecommended clusters */
10088 	if (!pset_is_recommended(pset)) {
10089 		return UINT64_MAX;
10090 	}
10091 	return os_atomic_load(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], relaxed);
10092 }
10093 
10094 #else /* CONFIG_SCHED_EDGE */
10095 
10096 void
sched_update_pset_load_average(processor_set_t pset,__unused uint64_t curtime)10097 sched_update_pset_load_average(processor_set_t pset, __unused uint64_t curtime)
10098 {
10099 	int non_rt_load = pset->pset_runq.count;
10100 	int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
10101 	int new_load_average = ((int)pset->load_average + load) >> 1;
10102 
10103 	pset->load_average = new_load_average;
10104 #if (DEVELOPMENT || DEBUG)
10105 #if __AMP__
10106 	if (pset->pset_cluster_type == PSET_AMP_P) {
10107 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset, 0), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)));
10108 	}
10109 #endif
10110 #endif
10111 }
10112 
10113 void
sched_update_pset_avg_execution_time(__unused processor_set_t pset,__unused uint64_t execution_time,__unused uint64_t curtime,__unused sched_bucket_t sched_bucket)10114 sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket)
10115 {
10116 }
10117 
10118 #endif /* CONFIG_SCHED_EDGE */
10119 
10120 /* pset is locked */
10121 static bool
processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset,processor_t processor)10122 processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor)
10123 {
10124 	int cpuid = processor->cpu_id;
10125 #if defined(__x86_64__)
10126 	if (sched_avoid_cpu0 && (cpuid == 0)) {
10127 		return false;
10128 	}
10129 #endif
10130 
10131 	cpumap_t fasttrack_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
10132 
10133 	return bit_test(fasttrack_map, cpuid);
10134 }
10135 
10136 #if CONFIG_SCHED_SMT
10137 /* pset is locked */
10138 static processor_t
choose_processor_for_realtime_thread_smt(processor_set_t pset,processor_t skip_processor,bool consider_secondaries,bool skip_spills)10139 choose_processor_for_realtime_thread_smt(processor_set_t pset, processor_t skip_processor, bool consider_secondaries, bool skip_spills)
10140 {
10141 #if defined(__x86_64__)
10142 	bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
10143 #else
10144 	const bool avoid_cpu0 = false;
10145 #endif
10146 	cpumap_t cpu_map;
10147 
10148 try_again:
10149 	cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
10150 	if (skip_processor) {
10151 		bit_clear(cpu_map, skip_processor->cpu_id);
10152 	}
10153 	if (skip_spills) {
10154 		cpu_map &= ~pset->rt_pending_spill_cpu_mask;
10155 	}
10156 
10157 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
10158 		bit_clear(cpu_map, 0);
10159 	}
10160 
10161 	cpumap_t primary_map = cpu_map & pset->primary_map;
10162 	if (avoid_cpu0) {
10163 		primary_map = bit_ror64(primary_map, 1);
10164 	}
10165 
10166 	int rotid = lsb_first(primary_map);
10167 	if (rotid >= 0) {
10168 		int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
10169 
10170 		processor_t processor = processor_array[cpuid];
10171 
10172 		return processor;
10173 	}
10174 
10175 	if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
10176 		goto out;
10177 	}
10178 
10179 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
10180 		/* Also avoid cpu1 */
10181 		bit_clear(cpu_map, 1);
10182 	}
10183 
10184 	/* Consider secondary processors whose primary is actually running a realtime thread */
10185 	cpumap_t secondary_map = cpu_map & ~pset->primary_map & (pset->realtime_map << 1);
10186 	if (avoid_cpu0) {
10187 		/* Also avoid cpu1 */
10188 		secondary_map = bit_ror64(secondary_map, 2);
10189 	}
10190 	rotid = lsb_first(secondary_map);
10191 	if (rotid >= 0) {
10192 		int cpuid = avoid_cpu0 ?  ((rotid + 2) & 63) : rotid;
10193 
10194 		processor_t processor = processor_array[cpuid];
10195 
10196 		return processor;
10197 	}
10198 
10199 	/* Consider secondary processors */
10200 	secondary_map = cpu_map & ~pset->primary_map;
10201 	if (avoid_cpu0) {
10202 		/* Also avoid cpu1 */
10203 		secondary_map = bit_ror64(secondary_map, 2);
10204 	}
10205 	rotid = lsb_first(secondary_map);
10206 	if (rotid >= 0) {
10207 		int cpuid = avoid_cpu0 ?  ((rotid + 2) & 63) : rotid;
10208 
10209 		processor_t processor = processor_array[cpuid];
10210 
10211 		return processor;
10212 	}
10213 
10214 	/*
10215 	 * I was hoping the compiler would optimize
10216 	 * this away when avoid_cpu0 is const bool false
10217 	 * but it still complains about the assignmnent
10218 	 * in that case.
10219 	 */
10220 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
10221 #if defined(__x86_64__)
10222 		avoid_cpu0 = false;
10223 #else
10224 		assert(0);
10225 #endif
10226 		goto try_again;
10227 	}
10228 
10229 out:
10230 	if (skip_processor) {
10231 		return PROCESSOR_NULL;
10232 	}
10233 
10234 	/*
10235 	 * If we didn't find an obvious processor to choose, but there are still more CPUs
10236 	 * not already running realtime threads than realtime threads in the realtime run queue,
10237 	 * this thread belongs in this pset, so choose some other processor in this pset
10238 	 * to ensure the thread is enqueued here.
10239 	 */
10240 	cpumap_t non_realtime_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
10241 	if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
10242 		cpu_map = non_realtime_map;
10243 		assert(cpu_map != 0);
10244 		int cpuid = bit_first(cpu_map);
10245 		assert(cpuid >= 0);
10246 		return processor_array[cpuid];
10247 	}
10248 
10249 	if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
10250 		goto skip_secondaries;
10251 	}
10252 
10253 	non_realtime_map = pset_available_cpumap(pset) & ~pset->realtime_map;
10254 	if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
10255 		cpu_map = non_realtime_map;
10256 		assert(cpu_map != 0);
10257 		int cpuid = bit_first(cpu_map);
10258 		assert(cpuid >= 0);
10259 		return processor_array[cpuid];
10260 	}
10261 
10262 skip_secondaries:
10263 	return PROCESSOR_NULL;
10264 }
10265 #else /* CONFIG_SCHED_SMT*/
10266 /* pset is locked */
10267 static processor_t
choose_processor_for_realtime_thread(processor_set_t pset,processor_t skip_processor,bool skip_spills)10268 choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool skip_spills)
10269 {
10270 	cpumap_t cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
10271 	if (skip_processor) {
10272 		bit_clear(cpu_map, skip_processor->cpu_id);
10273 	}
10274 	if (skip_spills) {
10275 		cpu_map &= ~pset->rt_pending_spill_cpu_mask;
10276 	}
10277 
10278 	int rotid = lsb_first(cpu_map);
10279 	if (rotid >= 0) {
10280 		return processor_array[rotid];
10281 	}
10282 
10283 	/*
10284 	 * If we didn't find an obvious processor to choose, but there are still more CPUs
10285 	 * not already running realtime threads than realtime threads in the realtime run queue,
10286 	 * this thread belongs in this pset, so choose some other processor in this pset
10287 	 * to ensure the thread is enqueued here.
10288 	 */
10289 	cpumap_t non_realtime_map = pset_available_cpumap(pset) & ~pset->realtime_map;
10290 	if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
10291 		cpu_map = non_realtime_map;
10292 		assert(cpu_map != 0);
10293 		int cpuid = bit_first(cpu_map);
10294 		assert(cpuid >= 0);
10295 		return processor_array[cpuid];
10296 	}
10297 
10298 	return PROCESSOR_NULL;
10299 }
10300 #endif /* CONFIG_SCHED_SMT */
10301 
10302 /*
10303  * Choose the processor with (1) the lowest priority less than max_pri and (2) the furthest deadline for that priority.
10304  * If all available processors are at max_pri, choose the furthest deadline that is greater than minimum_deadline.
10305  *
10306  * pset is locked.
10307  */
10308 static processor_t
choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset,int max_pri,uint64_t minimum_deadline,processor_t skip_processor,bool skip_spills,bool include_ast_urgent_pending_cpus)10309 choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool skip_spills, bool include_ast_urgent_pending_cpus)
10310 {
10311 	uint64_t  furthest_deadline = deadline_add(minimum_deadline, rt_deadline_epsilon);
10312 	processor_t fd_processor = PROCESSOR_NULL;
10313 	int lowest_priority = max_pri;
10314 
10315 	cpumap_t cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask;
10316 	if (skip_processor) {
10317 		bit_clear(cpu_map, skip_processor->cpu_id);
10318 	}
10319 	if (skip_spills) {
10320 		cpu_map &= ~pset->rt_pending_spill_cpu_mask;
10321 	}
10322 
10323 	for (int cpuid = bit_first(cpu_map); cpuid >= 0; cpuid = bit_next(cpu_map, cpuid)) {
10324 		processor_t processor = processor_array[cpuid];
10325 
10326 		if (processor->current_pri > lowest_priority) {
10327 			continue;
10328 		}
10329 
10330 		if (processor->current_pri < lowest_priority) {
10331 			lowest_priority = processor->current_pri;
10332 			furthest_deadline = processor->deadline;
10333 			fd_processor = processor;
10334 			continue;
10335 		}
10336 
10337 		if (processor->deadline > furthest_deadline) {
10338 			furthest_deadline = processor->deadline;
10339 			fd_processor = processor;
10340 		}
10341 	}
10342 
10343 	if (fd_processor) {
10344 		return fd_processor;
10345 	}
10346 
10347 	/*
10348 	 * There is a race condition possible when there are multiple processor sets.
10349 	 * choose_processor() takes pset lock A, sees the pending_AST_URGENT_cpu_mask set for a processor in that set and finds no suitable candiate CPU,
10350 	 * so it drops pset lock A and tries to take pset lock B.  Meanwhile the pending_AST_URGENT_cpu_mask CPU is looking for a thread to run and holds
10351 	 * pset lock B. It doesn't find any threads (because the candidate thread isn't yet on any run queue), so drops lock B, takes lock A again to clear
10352 	 * the pending_AST_URGENT_cpu_mask bit, and keeps running the current (far deadline) thread. choose_processor() now has lock B and can only find
10353 	 * the lowest count processor in set B so enqueues it on set B's run queue but doesn't IPI anyone. (The lowest count includes all threads,
10354 	 * near and far deadlines, so will prefer a low count of earlier deadlines to a high count of far deadlines, which is suboptimal for EDF scheduling.
10355 	 * To make a better choice we would need to know how many threads with earlier deadlines than the candidate thread exist on each pset's run queue.
10356 	 * But even if we chose the better run queue, we still wouldn't send an IPI in this case.)
10357 	 *
10358 	 * The migitation is to also look for suitable CPUs that have their pending_AST_URGENT_cpu_mask bit set where there are no earlier deadline threads
10359 	 * on the run queue of that pset.
10360 	 */
10361 	if (include_ast_urgent_pending_cpus && (rt_runq_earliest_deadline(pset) > furthest_deadline)) {
10362 		cpu_map = pset_available_cpumap(pset) & pset->pending_AST_URGENT_cpu_mask;
10363 		assert(skip_processor == PROCESSOR_NULL);
10364 		assert(skip_spills == false);
10365 
10366 		for (int cpuid = bit_first(cpu_map); cpuid >= 0; cpuid = bit_next(cpu_map, cpuid)) {
10367 			processor_t processor = processor_array[cpuid];
10368 
10369 			if (processor->current_pri > lowest_priority) {
10370 				continue;
10371 			}
10372 
10373 			if (processor->current_pri < lowest_priority) {
10374 				lowest_priority = processor->current_pri;
10375 				furthest_deadline = processor->deadline;
10376 				fd_processor = processor;
10377 				continue;
10378 			}
10379 
10380 			if (processor->deadline > furthest_deadline) {
10381 				furthest_deadline = processor->deadline;
10382 				fd_processor = processor;
10383 			}
10384 		}
10385 	}
10386 
10387 	return fd_processor;
10388 }
10389 
10390 /* pset is locked */
10391 static processor_t
choose_next_processor_for_realtime_thread(processor_set_t pset,int max_pri,uint64_t minimum_deadline,processor_t skip_processor,bool consider_secondaries)10392 choose_next_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool consider_secondaries)
10393 {
10394 	(void) consider_secondaries;
10395 	bool skip_spills = true;
10396 	bool include_ast_urgent_pending_cpus = false;
10397 
10398 #if CONFIG_SCHED_SMT
10399 	processor_t next_processor = choose_processor_for_realtime_thread_smt(pset, skip_processor, consider_secondaries, skip_spills);
10400 #else /* CONFIG_SCHED_SMT */
10401 	processor_t next_processor = choose_processor_for_realtime_thread(pset, skip_processor, skip_spills);
10402 #endif /* CONFIG_SCHED_SMT */
10403 	if (next_processor != PROCESSOR_NULL) {
10404 		return next_processor;
10405 	}
10406 
10407 	next_processor = choose_furthest_deadline_processor_for_realtime_thread(pset, max_pri, minimum_deadline, skip_processor, skip_spills, include_ast_urgent_pending_cpus);
10408 	return next_processor;
10409 }
10410 
10411 #if CONFIG_SCHED_SMT
10412 /* pset is locked */
10413 static bool
all_available_primaries_are_running_realtime_threads(processor_set_t pset,bool include_backups)10414 all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups)
10415 {
10416 	bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
10417 	int nbackup_cpus = 0;
10418 
10419 	if (include_backups && rt_runq_is_low_latency(pset)) {
10420 		nbackup_cpus = sched_rt_n_backup_processors;
10421 	}
10422 
10423 	cpumap_t cpu_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
10424 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
10425 		bit_clear(cpu_map, 0);
10426 	}
10427 	return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
10428 }
10429 
10430 /* pset is locked */
10431 static bool
these_processors_are_running_realtime_threads(processor_set_t pset,uint64_t these_map,bool include_backups)10432 these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups)
10433 {
10434 	int nbackup_cpus = 0;
10435 
10436 	if (include_backups && rt_runq_is_low_latency(pset)) {
10437 		nbackup_cpus = sched_rt_n_backup_processors;
10438 	}
10439 
10440 	cpumap_t cpu_map = pset_available_cpumap(pset) & these_map & ~pset->realtime_map;
10441 	return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
10442 }
10443 #endif /* CONFIG_SCHED_SMT */
10444 
10445 static bool
sched_ok_to_run_realtime_thread(processor_set_t pset,processor_t processor,bool as_backup)10446 sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup)
10447 {
10448 	if (!processor->is_recommended) {
10449 		return false;
10450 	}
10451 	bool ok_to_run_realtime_thread = true;
10452 #if CONFIG_SCHED_SMT
10453 	bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
10454 	if (spill_pending) {
10455 		return true;
10456 	}
10457 	if (processor->cpu_id == 0) {
10458 		if (sched_avoid_cpu0 == 1) {
10459 			ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1, as_backup);
10460 		} else if (sched_avoid_cpu0 == 2) {
10461 			ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, ~0x3, as_backup);
10462 		}
10463 	} else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
10464 		ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2, as_backup);
10465 	} else if (processor->processor_primary != processor) {
10466 		ok_to_run_realtime_thread = (sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset, as_backup));
10467 	}
10468 #else /* CONFIG_SCHED_SMT */
10469 	(void)pset;
10470 	(void)processor;
10471 	(void)as_backup;
10472 #endif /* CONFIG_SCHED_SMT */
10473 	return ok_to_run_realtime_thread;
10474 }
10475 
10476 void
sched_pset_made_schedulable(__unused processor_t processor,processor_set_t pset,boolean_t drop_lock)10477 sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset, boolean_t drop_lock)
10478 {
10479 	if (drop_lock) {
10480 		pset_unlock(pset);
10481 	}
10482 }
10483 
10484 #if defined(__x86_64__)
10485 void
thread_set_no_smt(bool set)10486 thread_set_no_smt(bool set)
10487 {
10488 	(void) set;
10489 #if CONFIG_SCHED_SMT
10490 	if (!system_is_SMT) {
10491 		/* Not a machine that supports SMT */
10492 		return;
10493 	}
10494 
10495 	thread_t thread = current_thread();
10496 
10497 	spl_t s = splsched();
10498 	thread_lock(thread);
10499 	if (set) {
10500 		thread->sched_flags |= TH_SFLAG_NO_SMT;
10501 	}
10502 	thread_unlock(thread);
10503 	splx(s);
10504 #endif /* CONFIG_SCHED_SMT */
10505 }
10506 #endif /* __x86_64__ */
10507 
10508 
10509 #if CONFIG_SCHED_SMT
10510 bool
thread_get_no_smt(void)10511 thread_get_no_smt(void)
10512 {
10513 	return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
10514 }
10515 
10516 extern void task_set_no_smt(task_t);
10517 void
task_set_no_smt(task_t task)10518 task_set_no_smt(task_t task)
10519 {
10520 	if (!system_is_SMT) {
10521 		/* Not a machine that supports SMT */
10522 		return;
10523 	}
10524 
10525 	if (task == TASK_NULL) {
10526 		task = current_task();
10527 	}
10528 
10529 	task_lock(task);
10530 	task->t_flags |= TF_NO_SMT;
10531 	task_unlock(task);
10532 }
10533 
10534 #if DEBUG || DEVELOPMENT
10535 extern void sysctl_task_set_no_smt(char no_smt);
10536 void
sysctl_task_set_no_smt(char no_smt)10537 sysctl_task_set_no_smt(char no_smt)
10538 {
10539 	if (!system_is_SMT) {
10540 		/* Not a machine that supports SMT */
10541 		return;
10542 	}
10543 
10544 	task_t task = current_task();
10545 
10546 	task_lock(task);
10547 	if (no_smt == '1') {
10548 		task->t_flags |= TF_NO_SMT;
10549 	}
10550 	task_unlock(task);
10551 }
10552 
10553 extern char sysctl_task_get_no_smt(void);
10554 char
sysctl_task_get_no_smt(void)10555 sysctl_task_get_no_smt(void)
10556 {
10557 	task_t task = current_task();
10558 
10559 	if (task->t_flags & TF_NO_SMT) {
10560 		return '1';
10561 	}
10562 	return '0';
10563 }
10564 #endif /* DEVELOPMENT || DEBUG */
10565 #else /* CONFIG_SCHED_SMT */
10566 
10567 extern void task_set_no_smt(task_t);
10568 void
task_set_no_smt(__unused task_t task)10569 task_set_no_smt(__unused task_t task)
10570 {
10571 	return;
10572 }
10573 
10574 #if DEBUG || DEVELOPMENT
10575 extern void sysctl_task_set_no_smt(char no_smt);
10576 void
sysctl_task_set_no_smt(__unused char no_smt)10577 sysctl_task_set_no_smt(__unused char no_smt)
10578 {
10579 	return;
10580 }
10581 
10582 extern char sysctl_task_get_no_smt(void);
10583 char
sysctl_task_get_no_smt(void)10584 sysctl_task_get_no_smt(void)
10585 {
10586 	return '1';
10587 }
10588 #endif /* DEBUG || DEVELOPMENT */
10589 #endif /* CONFIG_SCHED_SMT */
10590 
10591 __private_extern__ void
thread_soft_bind_cluster_type(thread_t thread,char cluster_type)10592 thread_soft_bind_cluster_type(thread_t thread, char cluster_type)
10593 {
10594 #if __AMP__
10595 	spl_t s = splsched();
10596 	thread_lock(thread);
10597 	thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
10598 	pset_node_t bind_node = PSET_NODE_NULL;
10599 	switch (cluster_type) {
10600 	case 'e':
10601 	case 'E':
10602 		if (ecore_node->psets != PROCESSOR_SET_NULL) {
10603 			bind_node = ecore_node;
10604 		}
10605 		break;
10606 	case 'p':
10607 	case 'P':
10608 		if (pcore_node->psets != PROCESSOR_SET_NULL) {
10609 			bind_node = pcore_node;
10610 		}
10611 		break;
10612 	default:
10613 		break;
10614 	}
10615 	if (bind_node != PSET_NODE_NULL) {
10616 		thread->th_bound_cluster_id = bind_node->psets->pset_id;
10617 	}
10618 	thread_unlock(thread);
10619 	splx(s);
10620 
10621 	if (thread == current_thread()) {
10622 		/* Trigger a context-switch to get on the newly bound cluster */
10623 		thread_block(THREAD_CONTINUE_NULL);
10624 	}
10625 #else /* __AMP__ */
10626 	(void)thread;
10627 	(void)cluster_type;
10628 #endif /* __AMP__ */
10629 }
10630 
10631 extern uint32_t thread_bound_cluster_id(thread_t thread);
10632 uint32_t
thread_bound_cluster_id(thread_t thread)10633 thread_bound_cluster_id(thread_t thread)
10634 {
10635 	return thread->th_bound_cluster_id;
10636 }
10637 
10638 __private_extern__ kern_return_t
thread_soft_bind_cluster_id(thread_t thread,uint32_t cluster_id,thread_bind_option_t options)10639 thread_soft_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options)
10640 {
10641 #if __AMP__
10642 	if (cluster_id == THREAD_BOUND_CLUSTER_NONE) {
10643 		/* Treat binding to THREAD_BOUND_CLUSTER_NONE as a request to unbind. */
10644 		options |= THREAD_UNBIND;
10645 	}
10646 
10647 	if (options & THREAD_UNBIND) {
10648 		cluster_id = THREAD_BOUND_CLUSTER_NONE;
10649 	} else {
10650 		/* Validate the specified cluster id */
10651 		int max_clusters = ml_get_cluster_count();
10652 		if (cluster_id >= max_clusters) {
10653 			/* Invalid cluster id */
10654 			return KERN_INVALID_VALUE;
10655 		}
10656 		processor_set_t pset = pset_array[cluster_id];
10657 		if (pset == NULL) {
10658 			/* Cluster has not finished initializing at boot */
10659 			return KERN_FAILURE;
10660 		}
10661 		if (options & THREAD_BIND_ELIGIBLE_ONLY) {
10662 			if (SCHED(thread_eligible_for_pset(thread, pset)) == false) {
10663 				/* Thread is not recommended for the cluster type */
10664 				return KERN_INVALID_POLICY;
10665 			}
10666 		}
10667 	}
10668 
10669 	spl_t s = splsched();
10670 	thread_lock(thread);
10671 
10672 	thread->th_bound_cluster_id = cluster_id;
10673 
10674 	thread_unlock(thread);
10675 	splx(s);
10676 
10677 	if (thread == current_thread()) {
10678 		/* Trigger a context-switch to get on the newly bound cluster */
10679 		thread_block(THREAD_CONTINUE_NULL);
10680 	}
10681 #else /* __AMP__ */
10682 	(void)thread;
10683 	(void)cluster_id;
10684 	(void)options;
10685 #endif /* __AMP__ */
10686 	return KERN_SUCCESS;
10687 }
10688 
10689 #if DEVELOPMENT || DEBUG
10690 extern int32_t sysctl_get_bound_cpuid(void);
10691 int32_t
sysctl_get_bound_cpuid(void)10692 sysctl_get_bound_cpuid(void)
10693 {
10694 	int32_t cpuid = -1;
10695 	thread_t self = current_thread();
10696 
10697 	processor_t processor = self->bound_processor;
10698 	if (processor == NULL) {
10699 		cpuid = -1;
10700 	} else {
10701 		cpuid = processor->cpu_id;
10702 	}
10703 
10704 	return cpuid;
10705 }
10706 
10707 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
10708 kern_return_t
sysctl_thread_bind_cpuid(int32_t cpuid)10709 sysctl_thread_bind_cpuid(int32_t cpuid)
10710 {
10711 	processor_t processor = PROCESSOR_NULL;
10712 
10713 	if (cpuid == -1) {
10714 		goto unbind;
10715 	}
10716 
10717 	if (cpuid < 0 || cpuid >= MAX_SCHED_CPUS) {
10718 		return KERN_INVALID_VALUE;
10719 	}
10720 
10721 	processor = processor_array[cpuid];
10722 	if (processor == PROCESSOR_NULL) {
10723 		return KERN_INVALID_VALUE;
10724 	}
10725 
10726 unbind:
10727 	thread_bind(processor);
10728 
10729 	thread_block(THREAD_CONTINUE_NULL);
10730 	return KERN_SUCCESS;
10731 }
10732 
10733 #if __AMP__
10734 static char
pset_cluster_type_name_char(pset_cluster_type_t pset_type)10735 pset_cluster_type_name_char(pset_cluster_type_t pset_type)
10736 {
10737 	switch (pset_type) {
10738 	case PSET_AMP_E:
10739 		return 'E';
10740 	case PSET_AMP_P:
10741 		return 'P';
10742 	default:
10743 		panic("Unexpected AMP pset cluster type %d", pset_type);
10744 	}
10745 }
10746 #endif /* __AMP__ */
10747 
10748 extern char sysctl_get_task_cluster_type(void);
10749 char
sysctl_get_task_cluster_type(void)10750 sysctl_get_task_cluster_type(void)
10751 {
10752 #if __AMP__
10753 	task_t task = current_task();
10754 	processor_set_t pset_hint = task->pset_hint;
10755 
10756 	if (!pset_hint) {
10757 		return '0';
10758 	}
10759 	return pset_cluster_type_name_char(pset_hint->pset_cluster_type);
10760 #else /* !__AMP__ */
10761 	return '0';
10762 #endif /* __AMP__ */
10763 }
10764 
10765 #if __AMP__
10766 extern char sysctl_get_bound_cluster_type(void);
10767 char
sysctl_get_bound_cluster_type(void)10768 sysctl_get_bound_cluster_type(void)
10769 {
10770 	thread_t self = current_thread();
10771 
10772 	if (self->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) {
10773 		return '0';
10774 	}
10775 	pset_cluster_type_t pset_type = pset_array[self->th_bound_cluster_id]->pset_cluster_type;
10776 	return pset_cluster_type_name_char(pset_type);
10777 }
10778 
10779 static processor_set_t
find_pset_of_type(pset_cluster_type_t t)10780 find_pset_of_type(pset_cluster_type_t t)
10781 {
10782 	for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
10783 		if (node->pset_cluster_type != t) {
10784 			continue;
10785 		}
10786 
10787 		processor_set_t pset = PROCESSOR_SET_NULL;
10788 		for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
10789 			pset = pset_array[pset_id];
10790 			/* Prefer one with recommended processsors */
10791 			if (pset_is_recommended(pset)) {
10792 				assert(pset->pset_cluster_type == t);
10793 				return pset;
10794 			}
10795 		}
10796 		/* Otherwise return whatever was found last */
10797 		return pset;
10798 	}
10799 
10800 	return PROCESSOR_SET_NULL;
10801 }
10802 #endif /* __AMP__ */
10803 
10804 extern void sysctl_task_set_cluster_type(char cluster_type);
10805 void
sysctl_task_set_cluster_type(char cluster_type)10806 sysctl_task_set_cluster_type(char cluster_type)
10807 {
10808 	task_t task = current_task();
10809 	processor_set_t pset_hint = PROCESSOR_SET_NULL;
10810 
10811 #if __AMP__
10812 	switch (cluster_type) {
10813 	case 'e':
10814 	case 'E':
10815 		pset_hint = find_pset_of_type(PSET_AMP_E);
10816 		break;
10817 	case 'p':
10818 	case 'P':
10819 		pset_hint = find_pset_of_type(PSET_AMP_P);
10820 		break;
10821 	default:
10822 		break;
10823 	}
10824 
10825 	if (pset_hint) {
10826 		task_lock(task);
10827 		task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
10828 		task->pset_hint = pset_hint;
10829 		task_unlock(task);
10830 
10831 		thread_block(THREAD_CONTINUE_NULL);
10832 	}
10833 #else
10834 	(void)cluster_type;
10835 	(void)task;
10836 	(void)pset_hint;
10837 #endif
10838 }
10839 
10840 #endif /* DEVELOPMENT || DEBUG */
10841