1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: priority.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Priority related scheduler bits.
64 */
65
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
73 #include <kern/spl.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <kern/ledger.h>
77 #include <kern/monotonic.h>
78 #include <machine/machparam.h>
79 #include <kern/machine.h>
80 #include <kern/policy_internal.h>
81 #include <kern/sched_clutch.h>
82
83 #ifdef CONFIG_MACH_APPROXIMATE_TIME
84 #include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
85 #endif
86
87 /*
88 * thread_quantum_expire:
89 *
90 * Recalculate the quantum and priority for a thread.
91 *
92 * Called at splsched.
93 */
94
95 void
thread_quantum_expire(timer_call_param_t p0,timer_call_param_t p1)96 thread_quantum_expire(
97 timer_call_param_t p0,
98 timer_call_param_t p1)
99 {
100 processor_t processor = p0;
101 thread_t thread = p1;
102 ast_t preempt;
103 uint64_t ctime;
104
105 assert(processor == current_processor());
106 assert(thread == current_thread());
107
108 KDBG_RELEASE(MACHDBG_CODE(
109 DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START);
110
111 SCHED_STATS_INC(quantum_timer_expirations);
112
113 /*
114 * We bill CPU time to both the individual thread and its task.
115 *
116 * Because this balance adjustment could potentially attempt to wake this
117 * very thread, we must credit the ledger before taking the thread lock.
118 * The ledger pointers are only manipulated by the thread itself at the ast
119 * boundary.
120 *
121 * TODO: This fails to account for the time between when the timer was
122 * armed and when it fired. It should be based on the system_timer and
123 * running a timer_update operation here.
124 */
125 ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
126 ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
127 if (thread->t_bankledger) {
128 ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
129 (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
130 }
131 thread->t_deduct_bank_ledger_time = 0;
132
133 struct recount_snap snap = { 0 };
134 recount_snapshot(&snap);
135 ctime = snap.rsn_time_mach;
136 check_monotonic_time(ctime);
137 #ifdef CONFIG_MACH_APPROXIMATE_TIME
138 commpage_update_mach_approximate_time(ctime);
139 #endif /* CONFIG_MACH_APPROXIMATE_TIME */
140
141 sched_update_pset_avg_execution_time(processor->processor_set, thread->quantum_remaining, ctime, thread->th_sched_bucket);
142
143 recount_switch_thread(&snap, thread, get_threadtask(thread));
144 recount_log_switch_thread(&snap);
145
146 thread_lock(thread);
147
148 /*
149 * We've run up until our quantum expiration, and will (potentially)
150 * continue without re-entering the scheduler, so update this now.
151 */
152 processor->last_dispatch = ctime;
153 thread->last_run_time = ctime;
154
155 /*
156 * Check for fail-safe trip.
157 */
158 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
159 !(thread->kern_promotion_schedpri != 0) &&
160 !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) &&
161 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
162 uint64_t new_computation;
163
164 new_computation = ctime - thread->computation_epoch;
165 new_computation += thread->computation_metered;
166 /*
167 * Remove any time spent handling interrupts outside of the thread's
168 * control.
169 */
170 new_computation -= recount_current_thread_interrupt_time_mach() - thread->computation_interrupt_epoch;
171
172 bool demote = false;
173 switch (thread->sched_mode) {
174 case TH_MODE_REALTIME:
175 if (new_computation > max_unsafe_rt_computation) {
176 thread->safe_release = ctime + sched_safe_rt_duration;
177 demote = true;
178 }
179 break;
180 case TH_MODE_FIXED:
181 if (new_computation > max_unsafe_fixed_computation) {
182 thread->safe_release = ctime + sched_safe_fixed_duration;
183 demote = true;
184 }
185 break;
186 default:
187 panic("unexpected mode: %d", thread->sched_mode);
188 }
189
190 if (demote) {
191 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE,
192 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
193 sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE);
194 }
195 }
196
197 /*
198 * Recompute scheduled priority if appropriate.
199 */
200 if (SCHED(can_update_priority)(thread)) {
201 SCHED(update_priority)(thread);
202 } else {
203 SCHED(lightweight_update_priority)(thread);
204 }
205
206 if (thread->sched_mode != TH_MODE_REALTIME) {
207 SCHED(quantum_expire)(thread);
208 }
209
210 /*
211 * This quantum is up, give this thread another.
212 */
213 processor->first_timeslice = FALSE;
214
215 thread_quantum_init(thread, ctime);
216
217 timer_update(&thread->runnable_timer, ctime);
218
219 processor->quantum_end = ctime + thread->quantum_remaining;
220
221 /*
222 * Context switch check
223 *
224 * non-urgent flags don't affect kernel threads, so upgrade to urgent
225 * to ensure that rebalancing and non-recommendation kick in quickly.
226 */
227
228 ast_t check_reason = AST_QUANTUM;
229 if (get_threadtask(thread) == kernel_task) {
230 check_reason |= AST_URGENT;
231 }
232
233 if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) {
234 ast_on(preempt);
235 }
236
237 /*
238 * AST_KEVENT does not send an IPI when setting the AST,
239 * to avoid waiting for the next context switch to propagate the AST,
240 * the AST is propagated here at quantum expiration.
241 */
242 ast_propagate(thread);
243
244 thread_unlock(thread);
245
246 /* Now that the processor->thread_timer has been updated, evaluate to see if
247 * the workqueue quantum expired and set AST_KEVENT if it has */
248 if (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) {
249 thread_evaluate_workqueue_quantum_expiry(thread);
250 }
251
252 running_timer_enter(processor, RUNNING_TIMER_QUANTUM, thread,
253 processor->quantum_end, ctime);
254
255 /* Tell platform layer that we are still running this thread */
256 thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL);
257 machine_thread_going_on_core(thread, urgency, 0, 0, ctime);
258 machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime,
259 0, thread);
260
261 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
262 sched_timeshare_consider_maintenance(ctime, false);
263 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
264
265 #if __arm64__
266 if (thread->sched_mode == TH_MODE_REALTIME) {
267 sched_consider_recommended_cores(ctime, thread);
268 }
269 #endif /* __arm64__ */
270
271 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
272 }
273
274 /*
275 * sched_set_thread_base_priority:
276 *
277 * Set the base priority of the thread
278 * and reset its scheduled priority.
279 *
280 * This is the only path to change base_pri.
281 *
282 * Called with the thread locked.
283 */
284 void
sched_set_thread_base_priority(thread_t thread,int priority)285 sched_set_thread_base_priority(thread_t thread, int priority)
286 {
287 assert(priority >= MINPRI);
288 uint64_t ctime = 0;
289
290 if (thread->sched_mode == TH_MODE_REALTIME) {
291 assert((priority >= BASEPRI_RTQUEUES) && (priority <= MAXPRI));
292 } else {
293 assert(priority < BASEPRI_RTQUEUES);
294 }
295
296 int old_base_pri = thread->base_pri;
297 thread->req_base_pri = (int16_t)priority;
298 if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) {
299 priority = MAX(priority, old_base_pri);
300 }
301 thread->base_pri = (int16_t)priority;
302
303 if ((thread->state & TH_RUN) == TH_RUN) {
304 assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE);
305 ctime = mach_approximate_time();
306 thread->last_basepri_change_time = ctime;
307 } else {
308 assert(thread->last_basepri_change_time == THREAD_NOT_RUNNABLE);
309 assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE);
310 }
311
312 /*
313 * Currently the perfcontrol_attr depends on the base pri of the
314 * thread. Therefore, we use this function as the hook for the
315 * perfcontrol callout.
316 */
317 if (thread == current_thread() && old_base_pri != priority) {
318 if (!ctime) {
319 ctime = mach_approximate_time();
320 }
321 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
322 ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread);
323 }
324 #if !CONFIG_SCHED_CLUTCH
325 /* For the clutch scheduler, this operation is done in set_sched_pri() */
326 SCHED(update_thread_bucket)(thread);
327 #endif /* !CONFIG_SCHED_CLUTCH */
328
329 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
330 }
331
332 /*
333 * sched_set_kernel_thread_priority:
334 *
335 * Set the absolute base priority of the thread
336 * and reset its scheduled priority.
337 *
338 * Called with the thread unlocked.
339 */
340 void
sched_set_kernel_thread_priority(thread_t thread,int new_priority)341 sched_set_kernel_thread_priority(thread_t thread, int new_priority)
342 {
343 spl_t s = splsched();
344
345 thread_lock(thread);
346
347 assert(thread->sched_mode != TH_MODE_REALTIME);
348 assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED);
349
350 if (new_priority > thread->max_priority) {
351 new_priority = thread->max_priority;
352 }
353 #if !defined(XNU_TARGET_OS_OSX)
354 if (new_priority < MAXPRI_THROTTLE) {
355 new_priority = MAXPRI_THROTTLE;
356 }
357 #endif /* !defined(XNU_TARGET_OS_OSX) */
358
359 thread->importance = new_priority - thread->task_priority;
360
361 sched_set_thread_base_priority(thread, new_priority);
362
363 thread_unlock(thread);
364 splx(s);
365 }
366
367 /*
368 * thread_recompute_sched_pri:
369 *
370 * Reset the scheduled priority of the thread
371 * according to its base priority if the
372 * thread has not been promoted or depressed.
373 *
374 * This is the only way to push base_pri changes into sched_pri,
375 * or to recalculate the appropriate sched_pri after changing
376 * a promotion or depression.
377 *
378 * Called at splsched with the thread locked.
379 *
380 * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation
381 */
382 void
thread_recompute_sched_pri(thread_t thread,set_sched_pri_options_t options)383 thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options)
384 {
385 uint32_t sched_flags = thread->sched_flags;
386 sched_mode_t sched_mode = thread->sched_mode;
387
388 int16_t priority = thread->base_pri;
389
390 if (sched_mode == TH_MODE_TIMESHARE) {
391 priority = (int16_t)SCHED(compute_timeshare_priority)(thread);
392 }
393
394 if (sched_flags & TH_SFLAG_DEPRESS) {
395 /* thread_yield_internal overrides kernel mutex promotion */
396 priority = DEPRESSPRI;
397 } else {
398 /* poll-depress is overridden by mutex promotion and promote-reasons */
399 if ((sched_flags & TH_SFLAG_POLLDEPRESS)) {
400 priority = DEPRESSPRI;
401 }
402
403 if (thread->kern_promotion_schedpri > 0) {
404 priority = MAX(priority, thread->kern_promotion_schedpri);
405
406 if (sched_mode != TH_MODE_REALTIME) {
407 priority = MIN(priority, MAXPRI_PROMOTE);
408 }
409 }
410
411 if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) {
412 if (sched_flags & TH_SFLAG_RW_PROMOTED) {
413 priority = MAX(priority, MINPRI_RWLOCK);
414 }
415
416 if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) {
417 priority = MAX(priority, MINPRI_WAITQ);
418 }
419
420 if (sched_flags & TH_SFLAG_EXEC_PROMOTED) {
421 priority = MAX(priority, MINPRI_EXEC);
422 }
423
424 if (sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
425 priority = MAX(priority, MINPRI_FLOOR);
426 }
427 }
428 }
429
430 set_sched_pri(thread, priority, options);
431 }
432
433 void
sched_default_quantum_expire(thread_t thread __unused)434 sched_default_quantum_expire(thread_t thread __unused)
435 {
436 /*
437 * No special behavior when a timeshare, fixed, or realtime thread
438 * uses up its entire quantum
439 */
440 }
441
442 int smt_timeshare_enabled = 1;
443 int smt_sched_bonus_16ths = 8;
444
445 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
446
447 /*
448 * lightweight_update_priority:
449 *
450 * Update the scheduled priority for
451 * a timesharing thread.
452 *
453 * Only for use on the current thread.
454 *
455 * Called with the thread locked.
456 */
457 void
lightweight_update_priority(thread_t thread)458 lightweight_update_priority(thread_t thread)
459 {
460 thread_assert_runq_null(thread);
461 assert(thread == current_thread());
462
463 if (thread->sched_mode == TH_MODE_TIMESHARE) {
464 int priority;
465 uint32_t delta;
466
467 sched_tick_delta(thread, delta);
468
469 /*
470 * Accumulate timesharing usage only
471 * during contention for processor
472 * resources.
473 */
474 if (thread->pri_shift < INT8_MAX) {
475 #if CONFIG_SCHED_SMT
476 if (thread_no_smt(thread) && smt_timeshare_enabled) {
477 thread->sched_usage += ((delta * smt_sched_bonus_16ths) >> 4);
478 }
479 #endif /* CONFIG_SCHED_SMT */
480 thread->sched_usage += delta;
481 }
482
483 thread->cpu_delta += delta;
484
485 #if CONFIG_SCHED_CLUTCH
486 /*
487 * Update the CPU usage for the thread group to which the thread belongs.
488 * The implementation assumes that the thread ran for the entire delta
489 * as part of the same thread group.
490 */
491 sched_clutch_cpu_usage_update(thread, delta);
492 #endif /* CONFIG_SCHED_CLUTCH */
493
494 priority = sched_compute_timeshare_priority(thread);
495
496 if (priority != thread->sched_pri) {
497 thread_recompute_sched_pri(thread, SETPRI_LAZY);
498 }
499 }
500 }
501
502 /*
503 * Define shifts for simulating (5/8) ** n
504 *
505 * Shift structures for holding update shifts. Actual computation
506 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
507 * +/- is determined by the sign of shift 2.
508 */
509
510 const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
511 { .shift1 = 1, .shift2 = 1 },
512 { .shift1 = 1, .shift2 = 3 },
513 { .shift1 = 1, .shift2 = -3 },
514 { .shift1 = 2, .shift2 = -7 },
515 { .shift1 = 3, .shift2 = 5 },
516 { .shift1 = 3, .shift2 = -5 },
517 { .shift1 = 4, .shift2 = -8 },
518 { .shift1 = 5, .shift2 = 7 },
519 { .shift1 = 5, .shift2 = -7 },
520 { .shift1 = 6, .shift2 = -10 },
521 { .shift1 = 7, .shift2 = 10 },
522 { .shift1 = 7, .shift2 = -9 },
523 { .shift1 = 8, .shift2 = -11 },
524 { .shift1 = 9, .shift2 = 12 },
525 { .shift1 = 9, .shift2 = -11 },
526 { .shift1 = 10, .shift2 = -13 },
527 { .shift1 = 11, .shift2 = 14 },
528 { .shift1 = 11, .shift2 = -13 },
529 { .shift1 = 12, .shift2 = -15 },
530 { .shift1 = 13, .shift2 = 17 },
531 { .shift1 = 13, .shift2 = -15 },
532 { .shift1 = 14, .shift2 = -17 },
533 { .shift1 = 15, .shift2 = 19 },
534 { .shift1 = 16, .shift2 = 18 },
535 { .shift1 = 16, .shift2 = -19 },
536 { .shift1 = 17, .shift2 = 22 },
537 { .shift1 = 18, .shift2 = 20 },
538 { .shift1 = 18, .shift2 = -20 },
539 { .shift1 = 19, .shift2 = 26 },
540 { .shift1 = 20, .shift2 = 22 },
541 { .shift1 = 20, .shift2 = -22 },
542 { .shift1 = 21, .shift2 = -27 }
543 };
544
545 /*
546 * sched_compute_timeshare_priority:
547 *
548 * Calculate the timesharing priority based upon usage and load.
549 */
550 extern int sched_pri_decay_band_limit;
551
552
553 /* Only use the decay floor logic on non-macOS and non-clutch schedulers */
554 #if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH
555
556 int
sched_compute_timeshare_priority(thread_t thread)557 sched_compute_timeshare_priority(thread_t thread)
558 {
559 int decay_amount;
560 int decay_limit = sched_pri_decay_band_limit;
561
562 if (thread->base_pri > BASEPRI_FOREGROUND) {
563 decay_limit += (thread->base_pri - BASEPRI_FOREGROUND);
564 }
565
566 if (thread->pri_shift == INT8_MAX) {
567 decay_amount = 0;
568 } else {
569 decay_amount = (thread->sched_usage >> thread->pri_shift);
570 }
571
572 if (decay_amount > decay_limit) {
573 decay_amount = decay_limit;
574 }
575
576 /* start with base priority */
577 int priority = thread->base_pri - decay_amount;
578
579 if (priority < MAXPRI_THROTTLE) {
580 if (get_threadtask(thread)->max_priority > MAXPRI_THROTTLE) {
581 priority = MAXPRI_THROTTLE;
582 } else if (priority < MINPRI_USER) {
583 priority = MINPRI_USER;
584 }
585 } else if (priority > MAXPRI_KERNEL) {
586 priority = MAXPRI_KERNEL;
587 }
588
589 return priority;
590 }
591
592 #else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
593
594 int
sched_compute_timeshare_priority(thread_t thread)595 sched_compute_timeshare_priority(thread_t thread)
596 {
597 /* start with base priority */
598 int priority = thread->base_pri;
599
600 if (thread->pri_shift != INT8_MAX) {
601 priority -= (thread->sched_usage >> thread->pri_shift);
602 }
603
604 if (priority < MINPRI_USER) {
605 priority = MINPRI_USER;
606 } else if (priority > MAXPRI_KERNEL) {
607 priority = MAXPRI_KERNEL;
608 }
609
610 return priority;
611 }
612
613 #endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
614
615 /*
616 * can_update_priority
617 *
618 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
619 *
620 * Called with the thread locked.
621 */
622 boolean_t
can_update_priority(thread_t thread)623 can_update_priority(
624 thread_t thread)
625 {
626 if (sched_tick == thread->sched_stamp) {
627 return FALSE;
628 } else {
629 return TRUE;
630 }
631 }
632
633 /*
634 * update_priority
635 *
636 * Perform housekeeping operations driven by scheduler tick.
637 *
638 * Called with the thread locked.
639 */
640 void
update_priority(thread_t thread)641 update_priority(
642 thread_t thread)
643 {
644 uint32_t ticks, delta;
645
646 ticks = sched_tick - thread->sched_stamp;
647 assert(ticks != 0);
648
649 thread->sched_stamp += ticks;
650
651 /* If requested, accelerate aging of sched_usage */
652 if (sched_decay_usage_age_factor > 1) {
653 ticks *= sched_decay_usage_age_factor;
654 }
655
656 /*
657 * Gather cpu usage data.
658 */
659 sched_tick_delta(thread, delta);
660 if (ticks < SCHED_DECAY_TICKS) {
661 /*
662 * Accumulate timesharing usage only during contention for processor
663 * resources. Use the pri_shift from the previous tick window to
664 * determine if the system was in a contended state.
665 */
666 if (thread->pri_shift < INT8_MAX) {
667 #if CONFIG_SCHED_SMT
668 if (thread_no_smt(thread) && smt_timeshare_enabled) {
669 thread->sched_usage += ((delta * smt_sched_bonus_16ths) >> 4);
670 }
671 #endif /* CONFIG_SCHED_SMT */
672 thread->sched_usage += delta;
673 }
674
675 thread->cpu_usage += delta + thread->cpu_delta;
676 thread->cpu_delta = 0;
677
678 #if CONFIG_SCHED_CLUTCH
679 /*
680 * Update the CPU usage for the thread group to which the thread belongs.
681 * The implementation assumes that the thread ran for the entire delta
682 * as part of the same thread group.
683 */
684 sched_clutch_cpu_usage_update(thread, delta);
685 #endif /* CONFIG_SCHED_CLUTCH */
686
687 const struct shift_data *shiftp = &sched_decay_shifts[ticks];
688
689 if (shiftp->shift2 > 0) {
690 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
691 (thread->cpu_usage >> shiftp->shift2);
692 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
693 (thread->sched_usage >> shiftp->shift2);
694 } else {
695 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
696 (thread->cpu_usage >> -(shiftp->shift2));
697 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
698 (thread->sched_usage >> -(shiftp->shift2));
699 }
700 } else {
701 thread->cpu_usage = thread->cpu_delta = 0;
702 thread->sched_usage = 0;
703 }
704
705 /*
706 * Check for fail-safe release.
707 */
708 if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
709 mach_absolute_time() >= thread->safe_release) {
710 sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
711 }
712
713 /*
714 * Now that the thread's CPU usage has been accumulated and aged
715 * based on contention of the previous tick window, update the
716 * pri_shift of the thread to match the current global load/shift
717 * values. The updated pri_shift would be used to calculate the
718 * new priority of the thread.
719 */
720 #if CONFIG_SCHED_CLUTCH
721 thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket);
722 #else /* CONFIG_SCHED_CLUTCH */
723 thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
724 #endif /* CONFIG_SCHED_CLUTCH */
725
726 /* Recompute scheduled priority if appropriate. */
727 if (thread->sched_mode == TH_MODE_TIMESHARE) {
728 thread_recompute_sched_pri(thread, SETPRI_LAZY);
729 }
730 }
731
732 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
733
734
735 /*
736 * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
737 * Each other bucket is a count of the runnable non-idle threads
738 * with that property. All updates to these counts should be
739 * performed with os_atomic_* operations.
740 *
741 * For the clutch scheduler, this global bucket is used only for
742 * keeping the total global run count.
743 */
744 uint32_t sched_run_buckets[TH_BUCKET_MAX];
745
746 static void
sched_incr_bucket(sched_bucket_t bucket)747 sched_incr_bucket(sched_bucket_t bucket)
748 {
749 assert(bucket >= TH_BUCKET_FIXPRI &&
750 bucket <= TH_BUCKET_SHARE_BG);
751
752 os_atomic_inc(&sched_run_buckets[bucket], relaxed);
753 }
754
755 static void
sched_decr_bucket(sched_bucket_t bucket)756 sched_decr_bucket(sched_bucket_t bucket)
757 {
758 assert(bucket >= TH_BUCKET_FIXPRI &&
759 bucket <= TH_BUCKET_SHARE_BG);
760
761 assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
762
763 os_atomic_dec(&sched_run_buckets[bucket], relaxed);
764 }
765
766 static void
sched_add_bucket(sched_bucket_t bucket,uint8_t run_weight)767 sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight)
768 {
769 assert(bucket >= TH_BUCKET_FIXPRI &&
770 bucket <= TH_BUCKET_SHARE_BG);
771
772 os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed);
773 }
774
775 static void
sched_sub_bucket(sched_bucket_t bucket,uint8_t run_weight)776 sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight)
777 {
778 assert(bucket >= TH_BUCKET_FIXPRI &&
779 bucket <= TH_BUCKET_SHARE_BG);
780
781 assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
782
783 os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed);
784 }
785
786 uint32_t
sched_run_incr(thread_t thread)787 sched_run_incr(thread_t thread)
788 {
789 assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
790
791 uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
792
793 sched_incr_bucket(thread->th_sched_bucket);
794
795 return new_count;
796 }
797
798 uint32_t
sched_run_decr(thread_t thread)799 sched_run_decr(thread_t thread)
800 {
801 assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
802
803 sched_decr_bucket(thread->th_sched_bucket);
804
805 uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
806
807 return new_count;
808 }
809
810 uint32_t
sched_smt_run_incr(thread_t thread)811 sched_smt_run_incr(thread_t thread)
812 {
813 assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
814
815 #if CONFIG_SCHED_SMT
816 uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1;
817 #else /* CONFIG_SCHED_SMT */
818 uint8_t run_weight = 1;
819 #endif /* CONFIG_SCHED_SMT */
820 thread->sched_saved_run_weight = run_weight;
821
822 uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
823
824 sched_add_bucket(thread->th_sched_bucket, run_weight);
825
826 return new_count;
827 }
828
829 uint32_t
sched_smt_run_decr(thread_t thread)830 sched_smt_run_decr(thread_t thread)
831 {
832 assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
833
834 uint8_t run_weight = thread->sched_saved_run_weight;
835
836 sched_sub_bucket(thread->th_sched_bucket, run_weight);
837
838 uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
839
840 return new_count;
841 }
842
843 void
sched_update_thread_bucket(thread_t thread)844 sched_update_thread_bucket(thread_t thread)
845 {
846 sched_bucket_t old_bucket = thread->th_sched_bucket;
847 sched_bucket_t new_bucket = TH_BUCKET_RUN;
848
849 switch (thread->sched_mode) {
850 case TH_MODE_FIXED:
851 case TH_MODE_REALTIME:
852 new_bucket = TH_BUCKET_FIXPRI;
853 break;
854
855 case TH_MODE_TIMESHARE:
856 if (thread->base_pri > BASEPRI_DEFAULT) {
857 new_bucket = TH_BUCKET_SHARE_FG;
858 } else if (thread->base_pri > BASEPRI_UTILITY) {
859 new_bucket = TH_BUCKET_SHARE_DF;
860 } else if (thread->base_pri > MAXPRI_THROTTLE) {
861 new_bucket = TH_BUCKET_SHARE_UT;
862 } else {
863 new_bucket = TH_BUCKET_SHARE_BG;
864 }
865 break;
866
867 default:
868 panic("unexpected mode: %d", thread->sched_mode);
869 break;
870 }
871
872 if (old_bucket != new_bucket) {
873 thread->th_sched_bucket = new_bucket;
874 thread->pri_shift = sched_pri_shifts[new_bucket];
875
876 if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
877 sched_decr_bucket(old_bucket);
878 sched_incr_bucket(new_bucket);
879 }
880 }
881 }
882
883 void
sched_smt_update_thread_bucket(thread_t thread)884 sched_smt_update_thread_bucket(thread_t thread)
885 {
886 sched_bucket_t old_bucket = thread->th_sched_bucket;
887 sched_bucket_t new_bucket = TH_BUCKET_RUN;
888
889 switch (thread->sched_mode) {
890 case TH_MODE_FIXED:
891 case TH_MODE_REALTIME:
892 new_bucket = TH_BUCKET_FIXPRI;
893 break;
894
895 case TH_MODE_TIMESHARE:
896 if (thread->base_pri > BASEPRI_DEFAULT) {
897 new_bucket = TH_BUCKET_SHARE_FG;
898 } else if (thread->base_pri > BASEPRI_UTILITY) {
899 new_bucket = TH_BUCKET_SHARE_DF;
900 } else if (thread->base_pri > MAXPRI_THROTTLE) {
901 new_bucket = TH_BUCKET_SHARE_UT;
902 } else {
903 new_bucket = TH_BUCKET_SHARE_BG;
904 }
905 break;
906
907 default:
908 panic("unexpected mode: %d", thread->sched_mode);
909 break;
910 }
911
912 if (old_bucket != new_bucket) {
913 thread->th_sched_bucket = new_bucket;
914 thread->pri_shift = sched_pri_shifts[new_bucket];
915
916 if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
917 sched_sub_bucket(old_bucket, thread->sched_saved_run_weight);
918 sched_add_bucket(new_bucket, thread->sched_saved_run_weight);
919 }
920 }
921 }
922
923 static inline void
sched_validate_mode(sched_mode_t mode)924 sched_validate_mode(sched_mode_t mode)
925 {
926 switch (mode) {
927 case TH_MODE_FIXED:
928 case TH_MODE_REALTIME:
929 case TH_MODE_TIMESHARE:
930 break;
931
932 default:
933 panic("unexpected mode: %d", mode);
934 break;
935 }
936 }
937
938 /*
939 * Set the thread's true scheduling mode
940 * Called with thread mutex and thread locked
941 * The thread has already been removed from the runqueue.
942 *
943 * (saved_mode is handled before this point)
944 */
945 void
sched_set_thread_mode(thread_t thread,sched_mode_t new_mode)946 sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
947 {
948 thread_assert_runq_null(thread);
949
950 sched_validate_mode(new_mode);
951
952 #if CONFIG_SCHED_AUTO_JOIN
953 /*
954 * Realtime threads might have auto-joined a work interval based on
955 * make runnable relationships. If such an RT thread is now being demoted
956 * to non-RT, unjoin the thread from the work interval.
957 */
958 if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) && (new_mode != TH_MODE_REALTIME)) {
959 assert((thread->sched_mode == TH_MODE_REALTIME) || (thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK));
960 work_interval_auto_join_demote(thread);
961 }
962 #endif /* CONFIG_SCHED_AUTO_JOIN */
963
964 thread->sched_mode = new_mode;
965
966 SCHED(update_thread_bucket)(thread);
967 }
968
969 /*
970 * TODO: Instead of having saved mode, have 'user mode' and 'true mode'.
971 * That way there's zero confusion over which the user wants
972 * and which the kernel wants.
973 */
974 void
sched_set_thread_mode_user(thread_t thread,sched_mode_t new_mode)975 sched_set_thread_mode_user(thread_t thread, sched_mode_t new_mode)
976 {
977 thread_assert_runq_null(thread);
978
979 sched_validate_mode(new_mode);
980
981 /* If demoted, only modify the saved mode. */
982 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
983 thread->saved_mode = new_mode;
984 } else {
985 sched_set_thread_mode(thread, new_mode);
986 }
987 }
988
989 sched_mode_t
sched_get_thread_mode_user(thread_t thread)990 sched_get_thread_mode_user(thread_t thread)
991 {
992 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
993 return thread->saved_mode;
994 } else {
995 return thread->sched_mode;
996 }
997 }
998
999 /*
1000 * Demote the true scheduler mode to timeshare (called with the thread locked)
1001 */
1002 void
sched_thread_mode_demote(thread_t thread,uint32_t reason)1003 sched_thread_mode_demote(thread_t thread, uint32_t reason)
1004 {
1005 assert(reason & TH_SFLAG_DEMOTED_MASK);
1006 assert((thread->sched_flags & reason) != reason);
1007
1008 if (thread->policy_reset) {
1009 return;
1010 }
1011
1012 switch (reason) {
1013 case TH_SFLAG_THROTTLED:
1014 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_DEMOTE_THROTTLED),
1015 thread_tid(thread), thread->sched_flags);
1016 break;
1017 case TH_SFLAG_FAILSAFE:
1018 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_DEMOTE_FAILSAFE),
1019 thread_tid(thread), thread->sched_flags);
1020 break;
1021 case TH_SFLAG_RT_DISALLOWED:
1022 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_DEMOTE_RT_DISALLOWED),
1023 thread_tid(thread), thread->sched_flags);
1024 break;
1025 }
1026
1027 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
1028 /* Another demotion reason is already active */
1029 thread->sched_flags |= reason;
1030 return;
1031 }
1032
1033 assert(thread->saved_mode == TH_MODE_NONE);
1034
1035 boolean_t removed = thread_run_queue_remove(thread);
1036
1037 thread->sched_flags |= reason;
1038
1039 thread->saved_mode = thread->sched_mode;
1040
1041 sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
1042
1043 thread_recompute_priority(thread);
1044
1045 if (removed) {
1046 thread_run_queue_reinsert(thread, SCHED_TAILQ);
1047 }
1048 }
1049
1050 /*
1051 * Return true if the thread is demoted for the specified reason
1052 */
1053 bool
sched_thread_mode_has_demotion(thread_t thread,uint32_t reason)1054 sched_thread_mode_has_demotion(thread_t thread, uint32_t reason)
1055 {
1056 assert(reason & TH_SFLAG_DEMOTED_MASK);
1057 return (thread->sched_flags & reason) != 0;
1058 }
1059
1060 /*
1061 * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
1062 */
1063 void
sched_thread_mode_undemote(thread_t thread,uint32_t reason)1064 sched_thread_mode_undemote(thread_t thread, uint32_t reason)
1065 {
1066 assert(reason & TH_SFLAG_DEMOTED_MASK);
1067 assert((thread->sched_flags & reason) == reason);
1068 assert(thread->saved_mode != TH_MODE_NONE);
1069 assert(thread->sched_mode == TH_MODE_TIMESHARE);
1070 assert(thread->policy_reset == 0);
1071
1072 switch (reason) {
1073 case TH_SFLAG_THROTTLED:
1074 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_UNDEMOTE_THROTTLED),
1075 thread_tid(thread), thread->sched_flags);
1076 break;
1077 case TH_SFLAG_FAILSAFE:
1078 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_UNDEMOTE_FAILSAFE),
1079 thread_tid(thread), thread->sched_flags);
1080 /* re-arm failsafe reporting mechanism */
1081 thread->sched_flags &= ~TH_SFLAG_FAILSAFE_REPORTED;
1082 break;
1083 case TH_SFLAG_RT_DISALLOWED:
1084 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_UNDEMOTE_RT_DISALLOWED),
1085 thread_tid(thread), thread->sched_flags);
1086 break;
1087 }
1088
1089 thread->sched_flags &= ~reason;
1090
1091 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
1092 /* Another demotion reason is still active */
1093 return;
1094 }
1095
1096 boolean_t removed = thread_run_queue_remove(thread);
1097
1098 sched_set_thread_mode(thread, thread->saved_mode);
1099
1100 thread->saved_mode = TH_MODE_NONE;
1101
1102 thread_recompute_priority(thread);
1103
1104 if (removed) {
1105 thread_run_queue_reinsert(thread, SCHED_TAILQ);
1106 }
1107 }
1108
1109 /*
1110 * Promote thread to have a sched pri floor for a specific reason
1111 *
1112 * Promotion must not last past syscall boundary
1113 * Clients must always pair promote and demote 1:1,
1114 * Handling nesting of the same promote reason is the client's responsibility
1115 *
1116 * Called at splsched with thread locked
1117 */
1118 void
sched_thread_promote_reason(thread_t thread,uint32_t reason,__kdebug_only uintptr_t trace_obj)1119 sched_thread_promote_reason(thread_t thread,
1120 uint32_t reason,
1121 __kdebug_only uintptr_t trace_obj /* already unslid */)
1122 {
1123 assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
1124 assert((thread->sched_flags & reason) != reason);
1125
1126 switch (reason) {
1127 case TH_SFLAG_RW_PROMOTED:
1128 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE),
1129 thread_tid(thread), thread->sched_pri,
1130 thread->base_pri, trace_obj);
1131 break;
1132 case TH_SFLAG_WAITQ_PROMOTED:
1133 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE),
1134 thread_tid(thread), thread->sched_pri,
1135 thread->base_pri, trace_obj);
1136 break;
1137 case TH_SFLAG_EXEC_PROMOTED:
1138 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE),
1139 thread_tid(thread), thread->sched_pri,
1140 thread->base_pri, trace_obj);
1141 break;
1142 case TH_SFLAG_FLOOR_PROMOTED:
1143 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FLOOR_PROMOTE),
1144 thread_tid(thread), thread->sched_pri,
1145 thread->base_pri, trace_obj);
1146 break;
1147 }
1148
1149 thread->sched_flags |= reason;
1150 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
1151 }
1152
1153 /*
1154 * End a specific promotion reason
1155 * Demotes a thread back to its expected priority without the promotion in place
1156 *
1157 * Called at splsched with thread locked
1158 */
1159 void
sched_thread_unpromote_reason(thread_t thread,uint32_t reason,__kdebug_only uintptr_t trace_obj)1160 sched_thread_unpromote_reason(thread_t thread,
1161 uint32_t reason,
1162 __kdebug_only uintptr_t trace_obj /* already unslid */)
1163 {
1164 assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
1165 assert((thread->sched_flags & reason) == reason);
1166
1167 switch (reason) {
1168 case TH_SFLAG_RW_PROMOTED:
1169 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE),
1170 thread_tid(thread), thread->sched_pri,
1171 thread->base_pri, trace_obj);
1172 break;
1173 case TH_SFLAG_WAITQ_PROMOTED:
1174 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE),
1175 thread_tid(thread), thread->sched_pri,
1176 thread->base_pri, trace_obj);
1177 break;
1178 case TH_SFLAG_EXEC_PROMOTED:
1179 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE),
1180 thread_tid(thread), thread->sched_pri,
1181 thread->base_pri, trace_obj);
1182 break;
1183 case TH_SFLAG_FLOOR_PROMOTED:
1184 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FLOOR_DEMOTE),
1185 thread_tid(thread), thread->sched_pri,
1186 thread->base_pri, trace_obj);
1187 break;
1188 }
1189
1190 thread->sched_flags &= ~reason;
1191
1192 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
1193 }
1194