1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: priority.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Priority related scheduler bits.
64 */
65
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <mach/machine.h>
69 #include <kern/host.h>
70 #include <kern/mach_param.h>
71 #include <kern/sched.h>
72 #include <sys/kdebug.h>
73 #include <kern/spl.h>
74 #include <kern/thread.h>
75 #include <kern/processor.h>
76 #include <kern/ledger.h>
77 #include <machine/machparam.h>
78 #include <kern/machine.h>
79 #include <kern/policy_internal.h>
80 #include <kern/sched_clutch.h>
81
82 #ifdef CONFIG_MACH_APPROXIMATE_TIME
83 #include <machine/commpage.h> /* for commpage_update_mach_approximate_time */
84 #endif
85
86 #if MONOTONIC
87 #include <kern/monotonic.h>
88 #endif /* MONOTONIC */
89
90 /*
91 * thread_quantum_expire:
92 *
93 * Recalculate the quantum and priority for a thread.
94 *
95 * Called at splsched.
96 */
97
98 void
thread_quantum_expire(timer_call_param_t p0,timer_call_param_t p1)99 thread_quantum_expire(
100 timer_call_param_t p0,
101 timer_call_param_t p1)
102 {
103 processor_t processor = p0;
104 thread_t thread = p1;
105 ast_t preempt;
106 uint64_t ctime;
107
108 assert(processor == current_processor());
109 assert(thread == current_thread());
110
111 KDBG_RELEASE(MACHDBG_CODE(
112 DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START);
113
114 SCHED_STATS_INC(quantum_timer_expirations);
115
116 /*
117 * We bill CPU time to both the individual thread and its task.
118 *
119 * Because this balance adjustment could potentially attempt to wake this
120 * very thread, we must credit the ledger before taking the thread lock.
121 * The ledger pointers are only manipulated by the thread itself at the ast
122 * boundary.
123 *
124 * TODO: This fails to account for the time between when the timer was
125 * armed and when it fired. It should be based on the system_timer and
126 * running a timer_update operation here.
127 */
128 ledger_credit(thread->t_ledger, task_ledgers.cpu_time, thread->quantum_remaining);
129 ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining);
130 if (thread->t_bankledger) {
131 ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time,
132 (thread->quantum_remaining - thread->t_deduct_bank_ledger_time));
133 }
134 thread->t_deduct_bank_ledger_time = 0;
135
136 struct recount_snap snap = { 0 };
137 recount_snapshot(&snap);
138 ctime = snap.rsn_time_mach;
139 check_monotonic_time(ctime);
140 #ifdef CONFIG_MACH_APPROXIMATE_TIME
141 commpage_update_mach_approximate_time(ctime);
142 #endif /* CONFIG_MACH_APPROXIMATE_TIME */
143
144 sched_update_pset_avg_execution_time(processor->processor_set, thread->quantum_remaining, ctime, thread->th_sched_bucket);
145
146 recount_switch_thread(&snap, thread, get_threadtask(thread));
147 recount_log_switch_thread(&snap);
148
149 thread_lock(thread);
150
151 /*
152 * We've run up until our quantum expiration, and will (potentially)
153 * continue without re-entering the scheduler, so update this now.
154 */
155 processor->last_dispatch = ctime;
156 thread->last_run_time = ctime;
157
158 /*
159 * Check for fail-safe trip.
160 */
161 if ((thread->sched_mode == TH_MODE_REALTIME || thread->sched_mode == TH_MODE_FIXED) &&
162 !(thread->sched_flags & TH_SFLAG_PROMOTED) &&
163 !(thread->kern_promotion_schedpri != 0) &&
164 !(thread->sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) &&
165 !(thread->options & TH_OPT_SYSTEM_CRITICAL)) {
166 uint64_t new_computation;
167
168 new_computation = ctime - thread->computation_epoch;
169 new_computation += thread->computation_metered;
170 /*
171 * Remove any time spent handling interrupts outside of the thread's
172 * control.
173 */
174 new_computation -= recount_current_thread_interrupt_time_mach() - thread->computation_interrupt_epoch;
175
176 bool demote = false;
177 switch (thread->sched_mode) {
178 case TH_MODE_REALTIME:
179 if (new_computation > max_unsafe_rt_computation) {
180 thread->safe_release = ctime + sched_safe_rt_duration;
181 demote = true;
182 }
183 break;
184 case TH_MODE_FIXED:
185 if (new_computation > max_unsafe_fixed_computation) {
186 thread->safe_release = ctime + sched_safe_fixed_duration;
187 demote = true;
188 }
189 break;
190 default:
191 panic("unexpected mode: %d", thread->sched_mode);
192 }
193
194 if (demote) {
195 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE,
196 (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0);
197 sched_thread_mode_demote(thread, TH_SFLAG_FAILSAFE);
198 }
199 }
200
201 /*
202 * Recompute scheduled priority if appropriate.
203 */
204 if (SCHED(can_update_priority)(thread)) {
205 SCHED(update_priority)(thread);
206 } else {
207 SCHED(lightweight_update_priority)(thread);
208 }
209
210 if (thread->sched_mode != TH_MODE_REALTIME) {
211 SCHED(quantum_expire)(thread);
212 }
213
214 /*
215 * This quantum is up, give this thread another.
216 */
217 processor->first_timeslice = FALSE;
218
219 thread_quantum_init(thread, ctime);
220
221 timer_update(&thread->runnable_timer, ctime);
222
223 processor->quantum_end = ctime + thread->quantum_remaining;
224
225 /*
226 * Context switch check
227 *
228 * non-urgent flags don't affect kernel threads, so upgrade to urgent
229 * to ensure that rebalancing and non-recommendation kick in quickly.
230 */
231
232 ast_t check_reason = AST_QUANTUM;
233 if (get_threadtask(thread) == kernel_task) {
234 check_reason |= AST_URGENT;
235 }
236
237 if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) {
238 ast_on(preempt);
239 }
240
241 /*
242 * AST_KEVENT does not send an IPI when setting the AST,
243 * to avoid waiting for the next context switch to propagate the AST,
244 * the AST is propagated here at quantum expiration.
245 */
246 ast_propagate(thread);
247
248 thread_unlock(thread);
249
250 /* Now that the processor->thread_timer has been updated, evaluate to see if
251 * the workqueue quantum expired and set AST_KEVENT if it has */
252 if (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) {
253 thread_evaluate_workqueue_quantum_expiry(thread);
254 }
255
256 running_timer_enter(processor, RUNNING_TIMER_QUANTUM, thread,
257 processor->quantum_end, ctime);
258
259 /* Tell platform layer that we are still running this thread */
260 thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL);
261 machine_thread_going_on_core(thread, urgency, 0, 0, ctime);
262 machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime,
263 0, thread);
264
265 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
266 sched_timeshare_consider_maintenance(ctime, false);
267 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
268
269 #if __arm64__
270 if (thread->sched_mode == TH_MODE_REALTIME) {
271 sched_consider_recommended_cores(ctime, thread);
272 }
273 #endif /* __arm64__ */
274
275 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0);
276 }
277
278 /*
279 * sched_set_thread_base_priority:
280 *
281 * Set the base priority of the thread
282 * and reset its scheduled priority.
283 *
284 * This is the only path to change base_pri.
285 *
286 * Called with the thread locked.
287 */
288 void
sched_set_thread_base_priority(thread_t thread,int priority)289 sched_set_thread_base_priority(thread_t thread, int priority)
290 {
291 assert(priority >= MINPRI);
292 uint64_t ctime = 0;
293
294 if (thread->sched_mode == TH_MODE_REALTIME) {
295 assert((priority >= BASEPRI_RTQUEUES) && (priority <= MAXPRI));
296 } else {
297 assert(priority < BASEPRI_RTQUEUES);
298 }
299
300 int old_base_pri = thread->base_pri;
301 thread->req_base_pri = (int16_t)priority;
302 if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) {
303 priority = MAX(priority, old_base_pri);
304 }
305 thread->base_pri = (int16_t)priority;
306
307 if ((thread->state & TH_RUN) == TH_RUN) {
308 assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE);
309 ctime = mach_approximate_time();
310 thread->last_basepri_change_time = ctime;
311 } else {
312 assert(thread->last_basepri_change_time == THREAD_NOT_RUNNABLE);
313 assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE);
314 }
315
316 /*
317 * Currently the perfcontrol_attr depends on the base pri of the
318 * thread. Therefore, we use this function as the hook for the
319 * perfcontrol callout.
320 */
321 if (thread == current_thread() && old_base_pri != priority) {
322 if (!ctime) {
323 ctime = mach_approximate_time();
324 }
325 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
326 ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread);
327 }
328 #if !CONFIG_SCHED_CLUTCH
329 /* For the clutch scheduler, this operation is done in set_sched_pri() */
330 SCHED(update_thread_bucket)(thread);
331 #endif /* !CONFIG_SCHED_CLUTCH */
332
333 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
334 }
335
336 /*
337 * sched_set_kernel_thread_priority:
338 *
339 * Set the absolute base priority of the thread
340 * and reset its scheduled priority.
341 *
342 * Called with the thread unlocked.
343 */
344 void
sched_set_kernel_thread_priority(thread_t thread,int new_priority)345 sched_set_kernel_thread_priority(thread_t thread, int new_priority)
346 {
347 spl_t s = splsched();
348
349 thread_lock(thread);
350
351 assert(thread->sched_mode != TH_MODE_REALTIME);
352 assert(thread->effective_policy.thep_qos == THREAD_QOS_UNSPECIFIED);
353
354 if (new_priority > thread->max_priority) {
355 new_priority = thread->max_priority;
356 }
357 #if !defined(XNU_TARGET_OS_OSX)
358 if (new_priority < MAXPRI_THROTTLE) {
359 new_priority = MAXPRI_THROTTLE;
360 }
361 #endif /* !defined(XNU_TARGET_OS_OSX) */
362
363 thread->importance = new_priority - thread->task_priority;
364
365 sched_set_thread_base_priority(thread, new_priority);
366
367 thread_unlock(thread);
368 splx(s);
369 }
370
371 /*
372 * thread_recompute_sched_pri:
373 *
374 * Reset the scheduled priority of the thread
375 * according to its base priority if the
376 * thread has not been promoted or depressed.
377 *
378 * This is the only way to push base_pri changes into sched_pri,
379 * or to recalculate the appropriate sched_pri after changing
380 * a promotion or depression.
381 *
382 * Called at splsched with the thread locked.
383 *
384 * TODO: Add an 'update urgency' flag to avoid urgency callouts on every rwlock operation
385 */
386 void
thread_recompute_sched_pri(thread_t thread,set_sched_pri_options_t options)387 thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options)
388 {
389 uint32_t sched_flags = thread->sched_flags;
390 sched_mode_t sched_mode = thread->sched_mode;
391
392 int16_t priority = thread->base_pri;
393
394 if (sched_mode == TH_MODE_TIMESHARE) {
395 priority = (int16_t)SCHED(compute_timeshare_priority)(thread);
396 }
397
398 if (sched_flags & TH_SFLAG_DEPRESS) {
399 /* thread_yield_internal overrides kernel mutex promotion */
400 priority = DEPRESSPRI;
401 } else {
402 /* poll-depress is overridden by mutex promotion and promote-reasons */
403 if ((sched_flags & TH_SFLAG_POLLDEPRESS)) {
404 priority = DEPRESSPRI;
405 }
406
407 if (thread->kern_promotion_schedpri > 0) {
408 priority = MAX(priority, thread->kern_promotion_schedpri);
409
410 if (sched_mode != TH_MODE_REALTIME) {
411 priority = MIN(priority, MAXPRI_PROMOTE);
412 }
413 }
414
415 if (sched_flags & TH_SFLAG_PROMOTED) {
416 priority = MAX(priority, thread->promotion_priority);
417
418 if (sched_mode != TH_MODE_REALTIME) {
419 priority = MIN(priority, MAXPRI_PROMOTE);
420 }
421 }
422
423 if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) {
424 if (sched_flags & TH_SFLAG_RW_PROMOTED) {
425 priority = MAX(priority, MINPRI_RWLOCK);
426 }
427
428 if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) {
429 priority = MAX(priority, MINPRI_WAITQ);
430 }
431
432 if (sched_flags & TH_SFLAG_EXEC_PROMOTED) {
433 priority = MAX(priority, MINPRI_EXEC);
434 }
435
436 if (sched_flags & TH_SFLAG_FLOOR_PROMOTED) {
437 priority = MAX(priority, MINPRI_FLOOR);
438 }
439 }
440 }
441
442 set_sched_pri(thread, priority, options);
443 }
444
445 void
sched_default_quantum_expire(thread_t thread __unused)446 sched_default_quantum_expire(thread_t thread __unused)
447 {
448 /*
449 * No special behavior when a timeshare, fixed, or realtime thread
450 * uses up its entire quantum
451 */
452 }
453
454 int smt_timeshare_enabled = 1;
455 int smt_sched_bonus_16ths = 8;
456
457 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
458
459 /*
460 * lightweight_update_priority:
461 *
462 * Update the scheduled priority for
463 * a timesharing thread.
464 *
465 * Only for use on the current thread.
466 *
467 * Called with the thread locked.
468 */
469 void
lightweight_update_priority(thread_t thread)470 lightweight_update_priority(thread_t thread)
471 {
472 assert(thread->runq == PROCESSOR_NULL);
473 assert(thread == current_thread());
474
475 if (thread->sched_mode == TH_MODE_TIMESHARE) {
476 int priority;
477 uint32_t delta;
478
479 sched_tick_delta(thread, delta);
480
481 /*
482 * Accumulate timesharing usage only
483 * during contention for processor
484 * resources.
485 */
486 if (thread->pri_shift < INT8_MAX) {
487 if (thread_no_smt(thread) && smt_timeshare_enabled) {
488 thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4));
489 } else {
490 thread->sched_usage += delta;
491 }
492 }
493
494 thread->cpu_delta += delta;
495
496 #if CONFIG_SCHED_CLUTCH
497 /*
498 * Update the CPU usage for the thread group to which the thread belongs.
499 * The implementation assumes that the thread ran for the entire delta
500 * as part of the same thread group.
501 */
502 sched_clutch_cpu_usage_update(thread, delta);
503 #endif /* CONFIG_SCHED_CLUTCH */
504
505 priority = sched_compute_timeshare_priority(thread);
506
507 if (priority != thread->sched_pri) {
508 thread_recompute_sched_pri(thread, SETPRI_LAZY);
509 }
510 }
511 }
512
513 /*
514 * Define shifts for simulating (5/8) ** n
515 *
516 * Shift structures for holding update shifts. Actual computation
517 * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
518 * +/- is determined by the sign of shift 2.
519 */
520
521 const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = {
522 { .shift1 = 1, .shift2 = 1 },
523 { .shift1 = 1, .shift2 = 3 },
524 { .shift1 = 1, .shift2 = -3 },
525 { .shift1 = 2, .shift2 = -7 },
526 { .shift1 = 3, .shift2 = 5 },
527 { .shift1 = 3, .shift2 = -5 },
528 { .shift1 = 4, .shift2 = -8 },
529 { .shift1 = 5, .shift2 = 7 },
530 { .shift1 = 5, .shift2 = -7 },
531 { .shift1 = 6, .shift2 = -10 },
532 { .shift1 = 7, .shift2 = 10 },
533 { .shift1 = 7, .shift2 = -9 },
534 { .shift1 = 8, .shift2 = -11 },
535 { .shift1 = 9, .shift2 = 12 },
536 { .shift1 = 9, .shift2 = -11 },
537 { .shift1 = 10, .shift2 = -13 },
538 { .shift1 = 11, .shift2 = 14 },
539 { .shift1 = 11, .shift2 = -13 },
540 { .shift1 = 12, .shift2 = -15 },
541 { .shift1 = 13, .shift2 = 17 },
542 { .shift1 = 13, .shift2 = -15 },
543 { .shift1 = 14, .shift2 = -17 },
544 { .shift1 = 15, .shift2 = 19 },
545 { .shift1 = 16, .shift2 = 18 },
546 { .shift1 = 16, .shift2 = -19 },
547 { .shift1 = 17, .shift2 = 22 },
548 { .shift1 = 18, .shift2 = 20 },
549 { .shift1 = 18, .shift2 = -20 },
550 { .shift1 = 19, .shift2 = 26 },
551 { .shift1 = 20, .shift2 = 22 },
552 { .shift1 = 20, .shift2 = -22 },
553 { .shift1 = 21, .shift2 = -27 }
554 };
555
556 /*
557 * sched_compute_timeshare_priority:
558 *
559 * Calculate the timesharing priority based upon usage and load.
560 */
561 extern int sched_pri_decay_band_limit;
562
563
564 /* Only use the decay floor logic on non-macOS and non-clutch schedulers */
565 #if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH
566
567 int
sched_compute_timeshare_priority(thread_t thread)568 sched_compute_timeshare_priority(thread_t thread)
569 {
570 int decay_amount;
571 int decay_limit = sched_pri_decay_band_limit;
572
573 if (thread->base_pri > BASEPRI_FOREGROUND) {
574 decay_limit += (thread->base_pri - BASEPRI_FOREGROUND);
575 }
576
577 if (thread->pri_shift == INT8_MAX) {
578 decay_amount = 0;
579 } else {
580 decay_amount = (thread->sched_usage >> thread->pri_shift);
581 }
582
583 if (decay_amount > decay_limit) {
584 decay_amount = decay_limit;
585 }
586
587 /* start with base priority */
588 int priority = thread->base_pri - decay_amount;
589
590 if (priority < MAXPRI_THROTTLE) {
591 if (get_threadtask(thread)->max_priority > MAXPRI_THROTTLE) {
592 priority = MAXPRI_THROTTLE;
593 } else if (priority < MINPRI_USER) {
594 priority = MINPRI_USER;
595 }
596 } else if (priority > MAXPRI_KERNEL) {
597 priority = MAXPRI_KERNEL;
598 }
599
600 return priority;
601 }
602
603 #else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
604
605 int
sched_compute_timeshare_priority(thread_t thread)606 sched_compute_timeshare_priority(thread_t thread)
607 {
608 /* start with base priority */
609 int priority = thread->base_pri;
610
611 if (thread->pri_shift != INT8_MAX) {
612 priority -= (thread->sched_usage >> thread->pri_shift);
613 }
614
615 if (priority < MINPRI_USER) {
616 priority = MINPRI_USER;
617 } else if (priority > MAXPRI_KERNEL) {
618 priority = MAXPRI_KERNEL;
619 }
620
621 return priority;
622 }
623
624 #endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */
625
626 /*
627 * can_update_priority
628 *
629 * Make sure we don't do re-dispatches more frequently than a scheduler tick.
630 *
631 * Called with the thread locked.
632 */
633 boolean_t
can_update_priority(thread_t thread)634 can_update_priority(
635 thread_t thread)
636 {
637 if (sched_tick == thread->sched_stamp) {
638 return FALSE;
639 } else {
640 return TRUE;
641 }
642 }
643
644 /*
645 * update_priority
646 *
647 * Perform housekeeping operations driven by scheduler tick.
648 *
649 * Called with the thread locked.
650 */
651 void
update_priority(thread_t thread)652 update_priority(
653 thread_t thread)
654 {
655 uint32_t ticks, delta;
656
657 ticks = sched_tick - thread->sched_stamp;
658 assert(ticks != 0);
659
660 thread->sched_stamp += ticks;
661
662 /* If requested, accelerate aging of sched_usage */
663 if (sched_decay_usage_age_factor > 1) {
664 ticks *= sched_decay_usage_age_factor;
665 }
666
667 /*
668 * Gather cpu usage data.
669 */
670 sched_tick_delta(thread, delta);
671 if (ticks < SCHED_DECAY_TICKS) {
672 /*
673 * Accumulate timesharing usage only during contention for processor
674 * resources. Use the pri_shift from the previous tick window to
675 * determine if the system was in a contended state.
676 */
677 if (thread->pri_shift < INT8_MAX) {
678 if (thread_no_smt(thread) && smt_timeshare_enabled) {
679 thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4));
680 } else {
681 thread->sched_usage += delta;
682 }
683 }
684
685 thread->cpu_usage += delta + thread->cpu_delta;
686 thread->cpu_delta = 0;
687
688 #if CONFIG_SCHED_CLUTCH
689 /*
690 * Update the CPU usage for the thread group to which the thread belongs.
691 * The implementation assumes that the thread ran for the entire delta
692 * as part of the same thread group.
693 */
694 sched_clutch_cpu_usage_update(thread, delta);
695 #endif /* CONFIG_SCHED_CLUTCH */
696
697 const struct shift_data *shiftp = &sched_decay_shifts[ticks];
698
699 if (shiftp->shift2 > 0) {
700 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) +
701 (thread->cpu_usage >> shiftp->shift2);
702 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) +
703 (thread->sched_usage >> shiftp->shift2);
704 } else {
705 thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) -
706 (thread->cpu_usage >> -(shiftp->shift2));
707 thread->sched_usage = (thread->sched_usage >> shiftp->shift1) -
708 (thread->sched_usage >> -(shiftp->shift2));
709 }
710 } else {
711 thread->cpu_usage = thread->cpu_delta = 0;
712 thread->sched_usage = 0;
713 }
714
715 /*
716 * Check for fail-safe release.
717 */
718 if ((thread->sched_flags & TH_SFLAG_FAILSAFE) &&
719 mach_absolute_time() >= thread->safe_release) {
720 sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);
721 }
722
723 /*
724 * Now that the thread's CPU usage has been accumulated and aged
725 * based on contention of the previous tick window, update the
726 * pri_shift of the thread to match the current global load/shift
727 * values. The updated pri_shift would be used to calculate the
728 * new priority of the thread.
729 */
730 #if CONFIG_SCHED_CLUTCH
731 thread->pri_shift = sched_clutch_thread_pri_shift(thread, thread->th_sched_bucket);
732 #else /* CONFIG_SCHED_CLUTCH */
733 thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket];
734 #endif /* CONFIG_SCHED_CLUTCH */
735
736 /* Recompute scheduled priority if appropriate. */
737 if (thread->sched_mode == TH_MODE_TIMESHARE) {
738 thread_recompute_sched_pri(thread, SETPRI_LAZY);
739 }
740 }
741
742 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
743
744
745 /*
746 * TH_BUCKET_RUN is a count of *all* runnable non-idle threads.
747 * Each other bucket is a count of the runnable non-idle threads
748 * with that property. All updates to these counts should be
749 * performed with os_atomic_* operations.
750 *
751 * For the clutch scheduler, this global bucket is used only for
752 * keeping the total global run count.
753 */
754 uint32_t sched_run_buckets[TH_BUCKET_MAX];
755
756 static void
sched_incr_bucket(sched_bucket_t bucket)757 sched_incr_bucket(sched_bucket_t bucket)
758 {
759 assert(bucket >= TH_BUCKET_FIXPRI &&
760 bucket <= TH_BUCKET_SHARE_BG);
761
762 os_atomic_inc(&sched_run_buckets[bucket], relaxed);
763 }
764
765 static void
sched_decr_bucket(sched_bucket_t bucket)766 sched_decr_bucket(sched_bucket_t bucket)
767 {
768 assert(bucket >= TH_BUCKET_FIXPRI &&
769 bucket <= TH_BUCKET_SHARE_BG);
770
771 assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
772
773 os_atomic_dec(&sched_run_buckets[bucket], relaxed);
774 }
775
776 static void
sched_add_bucket(sched_bucket_t bucket,uint8_t run_weight)777 sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight)
778 {
779 assert(bucket >= TH_BUCKET_FIXPRI &&
780 bucket <= TH_BUCKET_SHARE_BG);
781
782 os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed);
783 }
784
785 static void
sched_sub_bucket(sched_bucket_t bucket,uint8_t run_weight)786 sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight)
787 {
788 assert(bucket >= TH_BUCKET_FIXPRI &&
789 bucket <= TH_BUCKET_SHARE_BG);
790
791 assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0);
792
793 os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed);
794 }
795
796 uint32_t
sched_run_incr(thread_t thread)797 sched_run_incr(thread_t thread)
798 {
799 assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
800
801 uint32_t new_count = os_atomic_inc(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
802
803 sched_incr_bucket(thread->th_sched_bucket);
804
805 return new_count;
806 }
807
808 uint32_t
sched_run_decr(thread_t thread)809 sched_run_decr(thread_t thread)
810 {
811 assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
812
813 sched_decr_bucket(thread->th_sched_bucket);
814
815 uint32_t new_count = os_atomic_dec(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
816
817 return new_count;
818 }
819
820 uint32_t
sched_smt_run_incr(thread_t thread)821 sched_smt_run_incr(thread_t thread)
822 {
823 assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN);
824
825 uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1;
826 thread->sched_saved_run_weight = run_weight;
827
828 uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
829
830 sched_add_bucket(thread->th_sched_bucket, run_weight);
831
832 return new_count;
833 }
834
835 uint32_t
sched_smt_run_decr(thread_t thread)836 sched_smt_run_decr(thread_t thread)
837 {
838 assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN);
839
840 uint8_t run_weight = thread->sched_saved_run_weight;
841
842 sched_sub_bucket(thread->th_sched_bucket, run_weight);
843
844 uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed);
845
846 return new_count;
847 }
848
849 void
sched_update_thread_bucket(thread_t thread)850 sched_update_thread_bucket(thread_t thread)
851 {
852 sched_bucket_t old_bucket = thread->th_sched_bucket;
853 sched_bucket_t new_bucket = TH_BUCKET_RUN;
854
855 switch (thread->sched_mode) {
856 case TH_MODE_FIXED:
857 case TH_MODE_REALTIME:
858 new_bucket = TH_BUCKET_FIXPRI;
859 break;
860
861 case TH_MODE_TIMESHARE:
862 if (thread->base_pri > BASEPRI_DEFAULT) {
863 new_bucket = TH_BUCKET_SHARE_FG;
864 } else if (thread->base_pri > BASEPRI_UTILITY) {
865 new_bucket = TH_BUCKET_SHARE_DF;
866 } else if (thread->base_pri > MAXPRI_THROTTLE) {
867 new_bucket = TH_BUCKET_SHARE_UT;
868 } else {
869 new_bucket = TH_BUCKET_SHARE_BG;
870 }
871 break;
872
873 default:
874 panic("unexpected mode: %d", thread->sched_mode);
875 break;
876 }
877
878 if (old_bucket != new_bucket) {
879 thread->th_sched_bucket = new_bucket;
880 thread->pri_shift = sched_pri_shifts[new_bucket];
881
882 if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
883 sched_decr_bucket(old_bucket);
884 sched_incr_bucket(new_bucket);
885 }
886 }
887 }
888
889 void
sched_smt_update_thread_bucket(thread_t thread)890 sched_smt_update_thread_bucket(thread_t thread)
891 {
892 sched_bucket_t old_bucket = thread->th_sched_bucket;
893 sched_bucket_t new_bucket = TH_BUCKET_RUN;
894
895 switch (thread->sched_mode) {
896 case TH_MODE_FIXED:
897 case TH_MODE_REALTIME:
898 new_bucket = TH_BUCKET_FIXPRI;
899 break;
900
901 case TH_MODE_TIMESHARE:
902 if (thread->base_pri > BASEPRI_DEFAULT) {
903 new_bucket = TH_BUCKET_SHARE_FG;
904 } else if (thread->base_pri > BASEPRI_UTILITY) {
905 new_bucket = TH_BUCKET_SHARE_DF;
906 } else if (thread->base_pri > MAXPRI_THROTTLE) {
907 new_bucket = TH_BUCKET_SHARE_UT;
908 } else {
909 new_bucket = TH_BUCKET_SHARE_BG;
910 }
911 break;
912
913 default:
914 panic("unexpected mode: %d", thread->sched_mode);
915 break;
916 }
917
918 if (old_bucket != new_bucket) {
919 thread->th_sched_bucket = new_bucket;
920 thread->pri_shift = sched_pri_shifts[new_bucket];
921
922 if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
923 sched_sub_bucket(old_bucket, thread->sched_saved_run_weight);
924 sched_add_bucket(new_bucket, thread->sched_saved_run_weight);
925 }
926 }
927 }
928
929 static inline void
sched_validate_mode(sched_mode_t mode)930 sched_validate_mode(sched_mode_t mode)
931 {
932 switch (mode) {
933 case TH_MODE_FIXED:
934 case TH_MODE_REALTIME:
935 case TH_MODE_TIMESHARE:
936 break;
937
938 default:
939 panic("unexpected mode: %d", mode);
940 break;
941 }
942 }
943
944 /*
945 * Set the thread's true scheduling mode
946 * Called with thread mutex and thread locked
947 * The thread has already been removed from the runqueue.
948 *
949 * (saved_mode is handled before this point)
950 */
951 void
sched_set_thread_mode(thread_t thread,sched_mode_t new_mode)952 sched_set_thread_mode(thread_t thread, sched_mode_t new_mode)
953 {
954 assert(thread->runq == PROCESSOR_NULL);
955
956 sched_validate_mode(new_mode);
957
958 #if CONFIG_SCHED_AUTO_JOIN
959 /*
960 * Realtime threads might have auto-joined a work interval based on
961 * make runnable relationships. If such an RT thread is now being demoted
962 * to non-RT, unjoin the thread from the work interval.
963 */
964 if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) && (new_mode != TH_MODE_REALTIME)) {
965 assert((thread->sched_mode == TH_MODE_REALTIME) || (thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK));
966 work_interval_auto_join_demote(thread);
967 }
968 #endif /* CONFIG_SCHED_AUTO_JOIN */
969
970 thread->sched_mode = new_mode;
971
972 SCHED(update_thread_bucket)(thread);
973 }
974
975 /*
976 * TODO: Instead of having saved mode, have 'user mode' and 'true mode'.
977 * That way there's zero confusion over which the user wants
978 * and which the kernel wants.
979 */
980 void
sched_set_thread_mode_user(thread_t thread,sched_mode_t new_mode)981 sched_set_thread_mode_user(thread_t thread, sched_mode_t new_mode)
982 {
983 assert(thread->runq == PROCESSOR_NULL);
984
985 sched_validate_mode(new_mode);
986
987 /* If demoted, only modify the saved mode. */
988 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
989 thread->saved_mode = new_mode;
990 } else {
991 sched_set_thread_mode(thread, new_mode);
992 }
993 }
994
995 sched_mode_t
sched_get_thread_mode_user(thread_t thread)996 sched_get_thread_mode_user(thread_t thread)
997 {
998 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
999 return thread->saved_mode;
1000 } else {
1001 return thread->sched_mode;
1002 }
1003 }
1004
1005 /*
1006 * Demote the true scheduler mode to timeshare (called with the thread locked)
1007 */
1008 void
sched_thread_mode_demote(thread_t thread,uint32_t reason)1009 sched_thread_mode_demote(thread_t thread, uint32_t reason)
1010 {
1011 assert(reason & TH_SFLAG_DEMOTED_MASK);
1012 assert((thread->sched_flags & reason) != reason);
1013
1014 if (thread->policy_reset) {
1015 return;
1016 }
1017
1018 switch (reason) {
1019 case TH_SFLAG_THROTTLED:
1020 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_DEMOTE_THROTTLED),
1021 thread_tid(thread), thread->sched_flags);
1022 break;
1023 case TH_SFLAG_FAILSAFE:
1024 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_DEMOTE_FAILSAFE),
1025 thread_tid(thread), thread->sched_flags);
1026 break;
1027 case TH_SFLAG_RT_DISALLOWED:
1028 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_DEMOTE_RT_DISALLOWED),
1029 thread_tid(thread), thread->sched_flags);
1030 break;
1031 }
1032
1033 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
1034 /* Another demotion reason is already active */
1035 thread->sched_flags |= reason;
1036 return;
1037 }
1038
1039 assert(thread->saved_mode == TH_MODE_NONE);
1040
1041 boolean_t removed = thread_run_queue_remove(thread);
1042
1043 thread->sched_flags |= reason;
1044
1045 thread->saved_mode = thread->sched_mode;
1046
1047 sched_set_thread_mode(thread, TH_MODE_TIMESHARE);
1048
1049 thread_recompute_priority(thread);
1050
1051 if (removed) {
1052 thread_run_queue_reinsert(thread, SCHED_TAILQ);
1053 }
1054 }
1055
1056 /*
1057 * Return true if the thread is demoted for the specified reason
1058 */
1059 bool
sched_thread_mode_has_demotion(thread_t thread,uint32_t reason)1060 sched_thread_mode_has_demotion(thread_t thread, uint32_t reason)
1061 {
1062 assert(reason & TH_SFLAG_DEMOTED_MASK);
1063 return (thread->sched_flags & reason) != 0;
1064 }
1065
1066 /*
1067 * Un-demote the true scheduler mode back to the saved mode (called with the thread locked)
1068 */
1069 void
sched_thread_mode_undemote(thread_t thread,uint32_t reason)1070 sched_thread_mode_undemote(thread_t thread, uint32_t reason)
1071 {
1072 assert(reason & TH_SFLAG_DEMOTED_MASK);
1073 assert((thread->sched_flags & reason) == reason);
1074 assert(thread->saved_mode != TH_MODE_NONE);
1075 assert(thread->sched_mode == TH_MODE_TIMESHARE);
1076 assert(thread->policy_reset == 0);
1077
1078 switch (reason) {
1079 case TH_SFLAG_THROTTLED:
1080 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_UNDEMOTE_THROTTLED),
1081 thread_tid(thread), thread->sched_flags);
1082 break;
1083 case TH_SFLAG_FAILSAFE:
1084 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_UNDEMOTE_FAILSAFE),
1085 thread_tid(thread), thread->sched_flags);
1086 break;
1087 case TH_SFLAG_RT_DISALLOWED:
1088 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MODE_UNDEMOTE_RT_DISALLOWED),
1089 thread_tid(thread), thread->sched_flags);
1090 break;
1091 }
1092
1093 thread->sched_flags &= ~reason;
1094
1095 if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) {
1096 /* Another demotion reason is still active */
1097 return;
1098 }
1099
1100 boolean_t removed = thread_run_queue_remove(thread);
1101
1102 sched_set_thread_mode(thread, thread->saved_mode);
1103
1104 thread->saved_mode = TH_MODE_NONE;
1105
1106 thread_recompute_priority(thread);
1107
1108 if (removed) {
1109 thread_run_queue_reinsert(thread, SCHED_TAILQ);
1110 }
1111 }
1112
1113 /*
1114 * Promote thread to have a sched pri floor for a specific reason
1115 *
1116 * Promotion must not last past syscall boundary
1117 * Clients must always pair promote and demote 1:1,
1118 * Handling nesting of the same promote reason is the client's responsibility
1119 *
1120 * Called at splsched with thread locked
1121 */
1122 void
sched_thread_promote_reason(thread_t thread,uint32_t reason,__kdebug_only uintptr_t trace_obj)1123 sched_thread_promote_reason(thread_t thread,
1124 uint32_t reason,
1125 __kdebug_only uintptr_t trace_obj /* already unslid */)
1126 {
1127 assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
1128 assert((thread->sched_flags & reason) != reason);
1129
1130 switch (reason) {
1131 case TH_SFLAG_RW_PROMOTED:
1132 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE),
1133 thread_tid(thread), thread->sched_pri,
1134 thread->base_pri, trace_obj);
1135 break;
1136 case TH_SFLAG_WAITQ_PROMOTED:
1137 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE),
1138 thread_tid(thread), thread->sched_pri,
1139 thread->base_pri, trace_obj);
1140 break;
1141 case TH_SFLAG_EXEC_PROMOTED:
1142 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE),
1143 thread_tid(thread), thread->sched_pri,
1144 thread->base_pri, trace_obj);
1145 break;
1146 case TH_SFLAG_FLOOR_PROMOTED:
1147 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FLOOR_PROMOTE),
1148 thread_tid(thread), thread->sched_pri,
1149 thread->base_pri, trace_obj);
1150 break;
1151 }
1152
1153 thread->sched_flags |= reason;
1154 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
1155 }
1156
1157 /*
1158 * End a specific promotion reason
1159 * Demotes a thread back to its expected priority without the promotion in place
1160 *
1161 * Called at splsched with thread locked
1162 */
1163 void
sched_thread_unpromote_reason(thread_t thread,uint32_t reason,__kdebug_only uintptr_t trace_obj)1164 sched_thread_unpromote_reason(thread_t thread,
1165 uint32_t reason,
1166 __kdebug_only uintptr_t trace_obj /* already unslid */)
1167 {
1168 assert(reason & TH_SFLAG_PROMOTE_REASON_MASK);
1169 assert((thread->sched_flags & reason) == reason);
1170
1171 switch (reason) {
1172 case TH_SFLAG_RW_PROMOTED:
1173 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE),
1174 thread_tid(thread), thread->sched_pri,
1175 thread->base_pri, trace_obj);
1176 break;
1177 case TH_SFLAG_WAITQ_PROMOTED:
1178 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE),
1179 thread_tid(thread), thread->sched_pri,
1180 thread->base_pri, trace_obj);
1181 break;
1182 case TH_SFLAG_EXEC_PROMOTED:
1183 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE),
1184 thread_tid(thread), thread->sched_pri,
1185 thread->base_pri, trace_obj);
1186 break;
1187 case TH_SFLAG_FLOOR_PROMOTED:
1188 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FLOOR_DEMOTE),
1189 thread_tid(thread), thread->sched_pri,
1190 thread->base_pri, trace_obj);
1191 break;
1192 }
1193
1194 thread->sched_flags &= ~reason;
1195
1196 thread_recompute_sched_pri(thread, SETPRI_DEFAULT);
1197 }
1198