1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: priority.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Priority related scheduler bits.
64 */
65
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
73 #include <kern/spl.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <kern/ledger.h>
77 #include <machine/machparam.h>
78 #include <kern/machine.h>
79 #include <kern/policy_internal.h>
80 #include <kern/sched_clutch.h>
81
82 #ifdef CONFIG_MACH_APPROXIMATE_TIME
83 #include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
84 #endif
85
86 #if MONOTONIC
87 #include <kern/monotonic.h>
88 #endif /* MONOTONIC */
89
90 /*
91 * thread_quantum_expire:
92 *
93 * Recalculate the quantum and priority for a thread.
94 *
95 * Called at splsched.
96 */
97
98 void
thread_quantum_expire(timer_call_param_t p0,timer_call_param_t p1)99 thread_quantum_expire(
100 timer_call_param_t p0,
101 timer_call_param_t p1)
102 {
103 processor_t processor = p0;
104 thread_t thread = p1;
105 ast_t preempt;
106 uint64_t ctime;
107
108 assert(processor == current_processor());
109 assert(thread == current_thread());
110
111 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0);
112
113 SCHED_STATS_INC(quantum_timer_expirations);
114
115 /*
116 * We bill CPU time to both the individual thread and its task.
117 *
118 * Because this balance adjustment could potentially attempt to wake this
119 * very thread, we must credit the ledger before taking the thread lock.
120 * The ledger pointers are only manipulated by the thread itself at the ast
121 * boundary.
122 *
123 * TODO: This fails to account for the time between when the timer was
124 * armed and when it fired. It should be based on the system_timer and
125 * running a timer_update operation here.
126 */
127 ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
128 ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
129 if (thread->t_bankledger) {
130 ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
131 (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
132 }
133 thread->t_deduct_bank_ledger_time = 0;
134
135 ctime = mach_absolute_time();
136
137 check_monotonic_time(ctime);
138
139 #ifdef CONFIG_MACH_APPROXIMATE_TIME
140 commpage_update_mach_approximate_time(ctime);
141 #endif
142 sched_update_pset_avg_execution_time(processor->processor_set, thread->quantum_remaining, ctime, thread->th_sched_bucket);
143
144 #if MONOTONIC
145 mt_sched_update(thread);
146 #endif /* MONOTONIC */
147
148 thread_lock(thread);
149
150 /*
151 * We've run up until our quantum expiration, and will (potentially)
152 * continue without re-entering the scheduler, so update this now.
153 */
154 processor->last_dispatch = ctime;
155 thread->last_run_time = ctime;
156
157 /*
158 * Check for fail-safe trip.
159 */
160 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
161 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
162 !(thread->kern_promotion_schedpri != 0) &&
163 !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) &&
164 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
165 uint64_t new_computation;
166
167 new_computation = ctime - thread->computation_epoch;
168 new_computation += thread->computation_metered;
169 if (new_computation > max_unsafe_computation) {
170 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE,
171 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
172
173 thread->safe_release = ctime + sched_safe_duration;
174
175 sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE);
176 }
177 }
178
179 /*
180 * Recompute scheduled priority if appropriate.
181 */
182 if (SCHED(can_update_priority)(thread)) {
183 SCHED(update_priority)(thread);
184 } else {
185 SCHED(lightweight_update_priority)(thread);
186 }
187
188 if (thread->sched_mode != TH_MODE_REALTIME) {
189 SCHED(quantum_expire)(thread);
190 }
191
192 /*
193 * This quantum is up, give this thread another.
194 */
195 processor->first_timeslice = FALSE;
196
197 thread_quantum_init(thread);
198
199 /* Reload precise timing global policy to thread-local policy */
200 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
201
202 /*
203 * Since non-precise user/kernel time doesn't update the state/thread timer
204 * during privilege transitions, synthesize an event now.
205 */
206 if (!thread->precise_user_kernel_time) {
207 timer_update(processor->current_state, ctime);
208 timer_update(processor->thread_timer, ctime);
209 timer_update(&thread->runnable_timer, ctime);
210 }
211
212
213 processor->quantum_end = ctime + thread->quantum_remaining;
214
215 /*
216 * Context switch check
217 *
218 * non-urgent flags don't affect kernel threads, so upgrade to urgent
219 * to ensure that rebalancing and non-recommendation kick in quickly.
220 */
221
222 ast_t check_reason = AST_QUANTUM;
223 if (get_threadtask(thread) == kernel_task) {
224 check_reason |= AST_URGENT;
225 }
226
227 if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) {
228 ast_on(preempt);
229 }
230
231 /*
232 * AST_KEVENT does not send an IPI when setting the AST,
233 * to avoid waiting for the next context switch to propagate the AST,
234 * the AST is propagated here at quantum expiration.
235 */
236 ast_propagate(thread);
237
238 thread_unlock(thread);
239
240 /* Now that the processor->thread_timer has been updated, evaluate to see if
241 * the workqueue quantum expired and set AST_KEVENT if it has */
242 if (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) {
243 thread_evaluate_workqueue_quantum_expiry(thread);
244 }
245
246 running_timer_enter(processor, RUNNING_TIMER_QUANTUM, thread,
247 processor->quantum_end, ctime);
248
249 /* Tell platform layer that we are still running this thread */
250 thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL);
251 machine_thread_going_on_core(thread, urgency, 0, 0, ctime);
252 machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime,
253 0, thread);
254
255 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
256 sched_timeshare_consider_maintenance(ctime);
257 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
258
259 #if __arm__ || __arm64__
260 if (thread->sched_mode == TH_MODE_REALTIME) {
261 sched_consider_recommended_cores(ctime, thread);
262 }
263 #endif /* __arm__ || __arm64__ */
264
265 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
266 }
267
268 /*
269 * sched_set_thread_base_priority:
270 *
271 * Set the base priority of the thread
272 * and reset its scheduled priority.
273 *
274 * This is the only path to change base_pri.
275 *
276 * Called with the thread locked.
277 */
278 void
sched_set_thread_base_priority(thread_t thread,int priority)279 sched_set_thread_base_priority(thread_t thread, int priority)
280 {
281 assert(priority >= MINPRI);
282 uint64_t ctime = 0;
283
284 if (thread->sched_mode == TH_MODE_REALTIME) {
285 assert((priority >= BASEPRI_RTQUEUES) && (priority <= MAXPRI));
286 } else {
287 assert(priority < BASEPRI_RTQUEUES);
288 }
289
290 int old_base_pri = thread->base_pri;
291 thread->req_base_pri = (int16_t)priority;
292 if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) {
293 priority = MAX(priority, old_base_pri);
294 }
295 thread->base_pri = (int16_t)priority;
296
297 if ((thread->state & TH_RUN) == TH_RUN) {
298 assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE);
299 ctime = mach_approximate_time();
300 thread->last_basepri_change_time = ctime;
301 } else {
302 assert(thread->last_basepri_change_time == THREAD_NOT_RUNNABLE);
303 assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE);
304 }
305
306 /*
307 * Currently the perfcontrol_attr depends on the base pri of the
308 * thread. Therefore, we use this function as the hook for the
309 * perfcontrol callout.
310 */
311 if (thread == current_thread() && old_base_pri != priority) {
312 if (!ctime) {
313 ctime = mach_approximate_time();
314 }
315 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
316 ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread);
317 }
318 #if !CONFIG_SCHED_CLUTCH
319 /* For the clutch scheduler, this operation is done in set_sched_pri() */
320 SCHED(update_thread_bucket)(thread);
321 #endif /* !CONFIG_SCHED_CLUTCH */
322
323 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
324 }
325
326 /*
327 * sched_set_kernel_thread_priority:
328 *
329 * Set the absolute base priority of the thread
330 * and reset its scheduled priority.
331 *
332 * Called with the thread unlocked.
333 */
334 void
sched_set_kernel_thread_priority(thread_t thread,int new_priority)335 sched_set_kernel_thread_priority(thread_t thread, int new_priority)
336 {
337 spl_t s = splsched();
338
339 thread_lock(thread);
340
341 assert(thread->sched_mode != TH_MODE_REALTIME);
342 assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED);
343
344 if (new_priority > thread->max_priority) {
345 new_priority = thread->max_priority;
346 }
347 #if !defined(XNU_TARGET_OS_OSX)
348 if (new_priority < MAXPRI_THROTTLE) {
349 new_priority = MAXPRI_THROTTLE;
350 }
351 #endif /* !defined(XNU_TARGET_OS_OSX) */
352
353 thread->importance = new_priority - thread->task_priority;
354
355 sched_set_thread_base_priority(thread, new_priority);
356
357 thread_unlock(thread);
358 splx(s);
359 }
360
361 /*
362 * thread_recompute_sched_pri:
363 *
364 * Reset the scheduled priority of the thread
365 * according to its base priority if the
366 * thread has not been promoted or depressed.
367 *
368 * This is the only way to push base_pri changes into sched_pri,
369 * or to recalculate the appropriate sched_pri after changing
370 * a promotion or depression.
371 *
372 * Called at splsched with the thread locked.
373 *
374 * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation
375 */
376 void
thread_recompute_sched_pri(thread_t thread,set_sched_pri_options_t options)377 thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options)
378 {
379 uint32_t sched_flags = thread->sched_flags;
380 sched_mode_t sched_mode = thread->sched_mode;
381
382 int16_t priority = thread->base_pri;
383
384 if (sched_mode == TH_MODE_TIMESHARE) {
385 priority = (int16_t)SCHED(compute_timeshare_priority)(thread);
386 }
387
388 if (sched_flags & TH_SFLAG_DEPRESS) {
389 /* thread_yield_internal overrides kernel mutex promotion */
390 priority = DEPRESSPRI;
391 } else {
392 /* poll-depress is overridden by mutex promotion and promote-reasons */
393 if ((sched_flags & TH_SFLAG_POLLDEPRESS)) {
394 priority = DEPRESSPRI;
395 }
396
397 if (thread->kern_promotion_schedpri > 0) {
398 priority = MAX(priority, thread->kern_promotion_schedpri);
399
400 if (sched_mode != TH_MODE_REALTIME) {
401 priority = MIN(priority, MAXPRI_PROMOTE);
402 }
403 }
404
405 if (sched_flags & TH_SFLAG_PROMOTED) {
406 priority = MAX(priority, thread->promotion_priority);
407
408 if (sched_mode != TH_MODE_REALTIME) {
409 priority = MIN(priority, MAXPRI_PROMOTE);
410 }
411 }
412
413 if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) {
414 if (sched_flags & TH_SFLAG_RW_PROMOTED) {
415 priority = MAX(priority, MINPRI_RWLOCK);
416 }
417
418 if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) {
419 priority = MAX(priority, MINPRI_WAITQ);
420 }
421
422 if (sched_flags & TH_SFLAG_EXEC_PROMOTED) {
423 priority = MAX(priority, MINPRI_EXEC);
424 }
425
426 if (sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
427 priority = MAX(priority, MINPRI_FLOOR);
428 }
429 }
430 }
431
432 set_sched_pri(thread, priority, options);
433 }
434
435 void
sched_default_quantum_expire(thread_t thread __unused)436 sched_default_quantum_expire(thread_t thread __unused)
437 {
438 /*
439 * No special behavior when a timeshare, fixed, or realtime thread
440 * uses up its entire quantum
441 */
442 }
443
444 int smt_timeshare_enabled = 1;
445 int smt_sched_bonus_16ths = 8;
446
447 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
448
449 /*
450 * lightweight_update_priority:
451 *
452 * Update the scheduled priority for
453 * a timesharing thread.
454 *
455 * Only for use on the current thread.
456 *
457 * Called with the thread locked.
458 */
459 void
lightweight_update_priority(thread_t thread)460 lightweight_update_priority(thread_t thread)
461 {
462 assert(thread->runq == PROCESSOR_NULL);
463 assert(thread == current_thread());
464
465 if (thread->sched_mode == TH_MODE_TIMESHARE) {
466 int priority;
467 uint32_t delta;
468
469 thread_timer_delta(thread, delta);
470
471 /*
472 * Accumulate timesharing usage only
473 * during contention for processor
474 * resources.
475 */
476 if (thread->pri_shift < INT8_MAX) {
477 if (thread_no_smt(thread) && smt_timeshare_enabled) {
478 thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4));
479 } else {
480 thread->sched_usage += delta;
481 }
482 }
483
484 thread->cpu_delta += delta;
485
486 #if CONFIG_SCHED_CLUTCH
487 /*
488 * Update the CPU usage for the thread group to which the thread belongs.
489 * The implementation assumes that the thread ran for the entire delta
490 * as part of the same thread group.
491 */
492 sched_clutch_cpu_usage_update(thread, delta);
493 #endif /* CONFIG_SCHED_CLUTCH */
494
495 priority = sched_compute_timeshare_priority(thread);
496
497 if (priority != thread->sched_pri) {
498 thread_recompute_sched_pri(thread, SETPRI_LAZY);
499 }
500 }
501 }
502
503 /*
504 * Define shifts for simulating (5/8) ** n
505 *
506 * Shift structures for holding update shifts. Actual computation
507 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
508 * +/- is determined by the sign of shift 2.
509 */
510
511 const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
512 { .shift1 = 1, .shift2 = 1 },
513 { .shift1 = 1, .shift2 = 3 },
514 { .shift1 = 1, .shift2 = -3 },
515 { .shift1 = 2, .shift2 = -7 },
516 { .shift1 = 3, .shift2 = 5 },
517 { .shift1 = 3, .shift2 = -5 },
518 { .shift1 = 4, .shift2 = -8 },
519 { .shift1 = 5, .shift2 = 7 },
520 { .shift1 = 5, .shift2 = -7 },
521 { .shift1 = 6, .shift2 = -10 },
522 { .shift1 = 7, .shift2 = 10 },
523 { .shift1 = 7, .shift2 = -9 },
524 { .shift1 = 8, .shift2 = -11 },
525 { .shift1 = 9, .shift2 = 12 },
526 { .shift1 = 9, .shift2 = -11 },
527 { .shift1 = 10, .shift2 = -13 },
528 { .shift1 = 11, .shift2 = 14 },
529 { .shift1 = 11, .shift2 = -13 },
530 { .shift1 = 12, .shift2 = -15 },
531 { .shift1 = 13, .shift2 = 17 },
532 { .shift1 = 13, .shift2 = -15 },
533 { .shift1 = 14, .shift2 = -17 },
534 { .shift1 = 15, .shift2 = 19 },
535 { .shift1 = 16, .shift2 = 18 },
536 { .shift1 = 16, .shift2 = -19 },
537 { .shift1 = 17, .shift2 = 22 },
538 { .shift1 = 18, .shift2 = 20 },
539 { .shift1 = 18, .shift2 = -20 },
540 { .shift1 = 19, .shift2 = 26 },
541 { .shift1 = 20, .shift2 = 22 },
542 { .shift1 = 20, .shift2 = -22 },
543 { .shift1 = 21, .shift2 = -27 }
544 };
545
546 /*
547 * sched_compute_timeshare_priority:
548 *
549 * Calculate the timesharing priority based upon usage and load.
550 */
551 extern int sched_pri_decay_band_limit;
552
553
554 /* Only use the decay floor logic on non-macOS and non-clutch schedulers */
555 #if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH
556
557 int
sched_compute_timeshare_priority(thread_t thread)558 sched_compute_timeshare_priority(thread_t thread)
559 {
560 int decay_amount;
561 int decay_limit = sched_pri_decay_band_limit;
562
563 if (thread->base_pri > BASEPRI_FOREGROUND) {
564 decay_limit += (thread->base_pri - BASEPRI_FOREGROUND);
565 }
566
567 if (thread->pri_shift == INT8_MAX) {
568 decay_amount = 0;
569 } else {
570 decay_amount = (thread->sched_usage >> thread->pri_shift);
571 }
572
573 if (decay_amount > decay_limit) {
574 decay_amount = decay_limit;
575 }
576
577 /* start with base priority */
578 int priority = thread->base_pri - decay_amount;
579
580 if (priority < MAXPRI_THROTTLE) {
581 if (get_threadtask(thread)->max_priority > MAXPRI_THROTTLE) {
582 priority = MAXPRI_THROTTLE;
583 } else if (priority < MINPRI_USER) {
584 priority = MINPRI_USER;
585 }
586 } else if (priority > MAXPRI_KERNEL) {
587 priority = MAXPRI_KERNEL;
588 }
589
590 return priority;
591 }
592
593 #else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
594
595 int
sched_compute_timeshare_priority(thread_t thread)596 sched_compute_timeshare_priority(thread_t thread)
597 {
598 /* start with base priority */
599 int priority = thread->base_pri;
600
601 if (thread->pri_shift != INT8_MAX) {
602 priority -= (thread->sched_usage >> thread->pri_shift);
603 }
604
605 if (priority < MINPRI_USER) {
606 priority = MINPRI_USER;
607 } else if (priority > MAXPRI_KERNEL) {
608 priority = MAXPRI_KERNEL;
609 }
610
611 return priority;
612 }
613
614 #endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
615
616 /*
617 * can_update_priority
618 *
619 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
620 *
621 * Called with the thread locked.
622 */
623 boolean_t
can_update_priority(thread_t thread)624 can_update_priority(
625 thread_t thread)
626 {
627 if (sched_tick == thread->sched_stamp) {
628 return FALSE;
629 } else {
630 return TRUE;
631 }
632 }
633
634 /*
635 * update_priority
636 *
637 * Perform housekeeping operations driven by scheduler tick.
638 *
639 * Called with the thread locked.
640 */
641 void
update_priority(thread_t thread)642 update_priority(
643 thread_t thread)
644 {
645 uint32_t ticks, delta;
646
647 ticks = sched_tick - thread->sched_stamp;
648 assert(ticks != 0);
649
650 thread->sched_stamp += ticks;
651
652 /* If requested, accelerate aging of sched_usage */
653 if (sched_decay_usage_age_factor > 1) {
654 ticks *= sched_decay_usage_age_factor;
655 }
656
657 /*
658 * Gather cpu usage data.
659 */
660 thread_timer_delta(thread, delta);
661 if (ticks < SCHED_DECAY_TICKS) {
662 /*
663 * Accumulate timesharing usage only during contention for processor
664 * resources. Use the pri_shift from the previous tick window to
665 * determine if the system was in a contended state.
666 */
667 if (thread->pri_shift < INT8_MAX) {
668 if (thread_no_smt(thread) && smt_timeshare_enabled) {
669 thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4));
670 } else {
671 thread->sched_usage += delta;
672 }
673 }
674
675 thread->cpu_usage += delta + thread->cpu_delta;
676 thread->cpu_delta = 0;
677
678 #if CONFIG_SCHED_CLUTCH
679 /*
680 * Update the CPU usage for the thread group to which the thread belongs.
681 * The implementation assumes that the thread ran for the entire delta
682 * as part of the same thread group.
683 */
684 sched_clutch_cpu_usage_update(thread, delta);
685 #endif /* CONFIG_SCHED_CLUTCH */
686
687 const struct shift_data *shiftp = &sched_decay_shifts[ticks];
688
689 if (shiftp->shift2 > 0) {
690 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
691 (thread->cpu_usage >> shiftp->shift2);
692 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
693 (thread->sched_usage >> shiftp->shift2);
694 } else {
695 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
696 (thread->cpu_usage >> -(shiftp->shift2));
697 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
698 (thread->sched_usage >> -(shiftp->shift2));
699 }
700 } else {
701 thread->cpu_usage = thread->cpu_delta = 0;
702 thread->sched_usage = 0;
703 }
704
705 /*
706 * Check for fail-safe release.
707 */
708 if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
709 mach_absolute_time() >= thread->safe_release) {
710 sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
711 }
712
713 /*
714 * Now that the thread's CPU usage has been accumulated and aged
715 * based on contention of the previous tick window, update the
716 * pri_shift of the thread to match the current global load/shift
717 * values. The updated pri_shift would be used to calculate the
718 * new priority of the thread.
719 */
720 #if CONFIG_SCHED_CLUTCH
721 thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket);
722 #else /* CONFIG_SCHED_CLUTCH */
723 thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
724 #endif /* CONFIG_SCHED_CLUTCH */
725
726 /* Recompute scheduled priority if appropriate. */
727 if (thread->sched_mode == TH_MODE_TIMESHARE) {
728 thread_recompute_sched_pri(thread, SETPRI_LAZY);
729 }
730 }
731
732 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
733
734
735 /*
736 * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
737 * Each other bucket is a count of the runnable non-idle threads
738 * with that property. All updates to these counts should be
739 * performed with os_atomic_* operations.
740 *
741 * For the clutch scheduler, this global bucket is used only for
742 * keeping the total global run count.
743 */
744 uint32_t sched_run_buckets[TH_BUCKET_MAX];
745
746 static void
sched_incr_bucket(sched_bucket_t bucket)747 sched_incr_bucket(sched_bucket_t bucket)
748 {
749 assert(bucket >= TH_BUCKET_FIXPRI &&
750 bucket <= TH_BUCKET_SHARE_BG);
751
752 os_atomic_inc(&sched_run_buckets[bucket], relaxed);
753 }
754
755 static void
sched_decr_bucket(sched_bucket_t bucket)756 sched_decr_bucket(sched_bucket_t bucket)
757 {
758 assert(bucket >= TH_BUCKET_FIXPRI &&
759 bucket <= TH_BUCKET_SHARE_BG);
760
761 assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
762
763 os_atomic_dec(&sched_run_buckets[bucket], relaxed);
764 }
765
766 static void
sched_add_bucket(sched_bucket_t bucket,uint8_t run_weight)767 sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight)
768 {
769 assert(bucket >= TH_BUCKET_FIXPRI &&
770 bucket <= TH_BUCKET_SHARE_BG);
771
772 os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed);
773 }
774
775 static void
sched_sub_bucket(sched_bucket_t bucket,uint8_t run_weight)776 sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight)
777 {
778 assert(bucket >= TH_BUCKET_FIXPRI &&
779 bucket <= TH_BUCKET_SHARE_BG);
780
781 assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
782
783 os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed);
784 }
785
786 uint32_t
sched_run_incr(thread_t thread)787 sched_run_incr(thread_t thread)
788 {
789 assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
790
791 uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
792
793 sched_incr_bucket(thread->th_sched_bucket);
794
795 return new_count;
796 }
797
798 uint32_t
sched_run_decr(thread_t thread)799 sched_run_decr(thread_t thread)
800 {
801 assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
802
803 sched_decr_bucket(thread->th_sched_bucket);
804
805 uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
806
807 return new_count;
808 }
809
810 uint32_t
sched_smt_run_incr(thread_t thread)811 sched_smt_run_incr(thread_t thread)
812 {
813 assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
814
815 uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1;
816 thread->sched_saved_run_weight = run_weight;
817
818 uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
819
820 sched_add_bucket(thread->th_sched_bucket, run_weight);
821
822 return new_count;
823 }
824
825 uint32_t
sched_smt_run_decr(thread_t thread)826 sched_smt_run_decr(thread_t thread)
827 {
828 assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
829
830 uint8_t run_weight = thread->sched_saved_run_weight;
831
832 sched_sub_bucket(thread->th_sched_bucket, run_weight);
833
834 uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
835
836 return new_count;
837 }
838
839 void
sched_update_thread_bucket(thread_t thread)840 sched_update_thread_bucket(thread_t thread)
841 {
842 sched_bucket_t old_bucket = thread->th_sched_bucket;
843 sched_bucket_t new_bucket = TH_BUCKET_RUN;
844
845 switch (thread->sched_mode) {
846 case TH_MODE_FIXED:
847 case TH_MODE_REALTIME:
848 new_bucket = TH_BUCKET_FIXPRI;
849 break;
850
851 case TH_MODE_TIMESHARE:
852 if (thread->base_pri > BASEPRI_DEFAULT) {
853 new_bucket = TH_BUCKET_SHARE_FG;
854 } else if (thread->base_pri > BASEPRI_UTILITY) {
855 new_bucket = TH_BUCKET_SHARE_DF;
856 } else if (thread->base_pri > MAXPRI_THROTTLE) {
857 new_bucket = TH_BUCKET_SHARE_UT;
858 } else {
859 new_bucket = TH_BUCKET_SHARE_BG;
860 }
861 break;
862
863 default:
864 panic("unexpected mode: %d", thread->sched_mode);
865 break;
866 }
867
868 if (old_bucket != new_bucket) {
869 thread->th_sched_bucket = new_bucket;
870 thread->pri_shift = sched_pri_shifts[new_bucket];
871
872 if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
873 sched_decr_bucket(old_bucket);
874 sched_incr_bucket(new_bucket);
875 }
876 }
877 }
878
879 void
sched_smt_update_thread_bucket(thread_t thread)880 sched_smt_update_thread_bucket(thread_t thread)
881 {
882 sched_bucket_t old_bucket = thread->th_sched_bucket;
883 sched_bucket_t new_bucket = TH_BUCKET_RUN;
884
885 switch (thread->sched_mode) {
886 case TH_MODE_FIXED:
887 case TH_MODE_REALTIME:
888 new_bucket = TH_BUCKET_FIXPRI;
889 break;
890
891 case TH_MODE_TIMESHARE:
892 if (thread->base_pri > BASEPRI_DEFAULT) {
893 new_bucket = TH_BUCKET_SHARE_FG;
894 } else if (thread->base_pri > BASEPRI_UTILITY) {
895 new_bucket = TH_BUCKET_SHARE_DF;
896 } else if (thread->base_pri > MAXPRI_THROTTLE) {
897 new_bucket = TH_BUCKET_SHARE_UT;
898 } else {
899 new_bucket = TH_BUCKET_SHARE_BG;
900 }
901 break;
902
903 default:
904 panic("unexpected mode: %d", thread->sched_mode);
905 break;
906 }
907
908 if (old_bucket != new_bucket) {
909 thread->th_sched_bucket = new_bucket;
910 thread->pri_shift = sched_pri_shifts[new_bucket];
911
912 if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
913 sched_sub_bucket(old_bucket, thread->sched_saved_run_weight);
914 sched_add_bucket(new_bucket, thread->sched_saved_run_weight);
915 }
916 }
917 }
918
919 /*
920 * Set the thread's true scheduling mode
921 * Called with thread mutex and thread locked
922 * The thread has already been removed from the runqueue.
923 *
924 * (saved_mode is handled before this point)
925 */
926 void
sched_set_thread_mode(thread_t thread,sched_mode_t new_mode)927 sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
928 {
929 assert(thread->runq == PROCESSOR_NULL);
930
931 switch (new_mode) {
932 case TH_MODE_FIXED:
933 case TH_MODE_REALTIME:
934 case TH_MODE_TIMESHARE:
935 break;
936
937 default:
938 panic("unexpected mode: %d", new_mode);
939 break;
940 }
941
942 #if CONFIG_SCHED_AUTO_JOIN
943 /*
944 * Realtime threads might have auto-joined a work interval based on
945 * make runnable relationships. If such an RT thread is now being demoted
946 * to non-RT, unjoin the thread from the work interval.
947 */
948 if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) && (new_mode != TH_MODE_REALTIME)) {
949 assert((thread->sched_mode == TH_MODE_REALTIME) || (thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK));
950 work_interval_auto_join_demote(thread);
951 }
952 #endif /* CONFIG_SCHED_AUTO_JOIN */
953
954 thread->sched_mode = new_mode;
955
956 SCHED(update_thread_bucket)(thread);
957 }
958
959 /*
960 * Demote the true scheduler mode to timeshare (called with the thread locked)
961 */
962 void
sched_thread_mode_demote(thread_t thread,uint32_t reason)963 sched_thread_mode_demote(thread_t thread, uint32_t reason)
964 {
965 assert(reason & TH_SFLAG_DEMOTED_MASK);
966 assert((thread->sched_flags & reason) != reason);
967
968 if (thread->policy_reset) {
969 return;
970 }
971
972 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
973 /* Another demotion reason is already active */
974 thread->sched_flags |= reason;
975 return;
976 }
977
978 assert(thread->saved_mode == TH_MODE_NONE);
979
980 boolean_t removed = thread_run_queue_remove(thread);
981
982 thread->sched_flags |= reason;
983
984 thread->saved_mode = thread->sched_mode;
985
986 sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
987
988 thread_recompute_priority(thread);
989
990 if (removed) {
991 thread_run_queue_reinsert(thread, SCHED_TAILQ);
992 }
993 }
994
995 /*
996 * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
997 */
998 void
sched_thread_mode_undemote(thread_t thread,uint32_t reason)999 sched_thread_mode_undemote(thread_t thread, uint32_t reason)
1000 {
1001 assert(reason & TH_SFLAG_DEMOTED_MASK);
1002 assert((thread->sched_flags & reason) == reason);
1003 assert(thread->saved_mode != TH_MODE_NONE);
1004 assert(thread->sched_mode == TH_MODE_TIMESHARE);
1005 assert(thread->policy_reset == 0);
1006
1007 thread->sched_flags &= ~reason;
1008
1009 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
1010 /* Another demotion reason is still active */
1011 return;
1012 }
1013
1014 boolean_t removed = thread_run_queue_remove(thread);
1015
1016 sched_set_thread_mode(thread, thread->saved_mode);
1017
1018 thread->saved_mode = TH_MODE_NONE;
1019
1020 thread_recompute_priority(thread);
1021
1022 if (removed) {
1023 thread_run_queue_reinsert(thread, SCHED_TAILQ);
1024 }
1025 }
1026
1027 /*
1028 * Promote thread to have a sched pri floor for a specific reason
1029 *
1030 * Promotion must not last past syscall boundary
1031 * Clients must always pair promote and demote 1:1,
1032 * Handling nesting of the same promote reason is the client's responsibility
1033 *
1034 * Called at splsched with thread locked
1035 */
1036 void
sched_thread_promote_reason(thread_t thread,uint32_t reason,__kdebug_only uintptr_t trace_obj)1037 sched_thread_promote_reason(thread_t thread,
1038 uint32_t reason,
1039 __kdebug_only uintptr_t trace_obj /* already unslid */)
1040 {
1041 assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
1042 assert((thread->sched_flags & reason) != reason);
1043
1044 switch (reason) {
1045 case TH_SFLAG_RW_PROMOTED:
1046 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE),
1047 thread_tid(thread), thread->sched_pri,
1048 thread->base_pri, trace_obj);
1049 break;
1050 case TH_SFLAG_WAITQ_PROMOTED:
1051 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE),
1052 thread_tid(thread), thread->sched_pri,
1053 thread->base_pri, trace_obj);
1054 break;
1055 case TH_SFLAG_EXEC_PROMOTED:
1056 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE),
1057 thread_tid(thread), thread->sched_pri,
1058 thread->base_pri, trace_obj);
1059 break;
1060 case TH_SFLAG_FLOOR_PROMOTED:
1061 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FLOOR_PROMOTE),
1062 thread_tid(thread), thread->sched_pri,
1063 thread->base_pri, trace_obj);
1064 break;
1065 }
1066
1067 thread->sched_flags |= reason;
1068 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
1069 }
1070
1071 /*
1072 * End a specific promotion reason
1073 * Demotes a thread back to its expected priority without the promotion in place
1074 *
1075 * Called at splsched with thread locked
1076 */
1077 void
sched_thread_unpromote_reason(thread_t thread,uint32_t reason,__kdebug_only uintptr_t trace_obj)1078 sched_thread_unpromote_reason(thread_t thread,
1079 uint32_t reason,
1080 __kdebug_only uintptr_t trace_obj /* already unslid */)
1081 {
1082 assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
1083 assert((thread->sched_flags & reason) == reason);
1084
1085 switch (reason) {
1086 case TH_SFLAG_RW_PROMOTED:
1087 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE),
1088 thread_tid(thread), thread->sched_pri,
1089 thread->base_pri, trace_obj);
1090 break;
1091 case TH_SFLAG_WAITQ_PROMOTED:
1092 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE),
1093 thread_tid(thread), thread->sched_pri,
1094 thread->base_pri, trace_obj);
1095 break;
1096 case TH_SFLAG_EXEC_PROMOTED:
1097 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE),
1098 thread_tid(thread), thread->sched_pri,
1099 thread->base_pri, trace_obj);
1100 break;
1101 case TH_SFLAG_FLOOR_PROMOTED:
1102 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FLOOR_DEMOTE),
1103 thread_tid(thread), thread->sched_pri,
1104 thread->base_pri, trace_obj);
1105 break;
1106 }
1107
1108 thread->sched_flags &= ~reason;
1109
1110 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
1111 }
1112