1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <kern/assert.h>
30 #include <kern/clock.h>
31 #include <kern/coalition.h>
32 #include <kern/debug.h>
33 #include <kern/startup.h>
34 #include <kern/host.h>
35 #include <kern/kern_types.h>
36 #include <kern/machine.h>
37 #include <kern/simple_lock.h>
38 #include <kern/misc_protos.h>
39 #include <kern/sched.h>
40 #include <kern/sched_prim.h>
41 #include <kern/sfi.h>
42 #include <kern/timer_call.h>
43 #include <kern/waitq.h>
44 #include <kern/ledger.h>
45 #include <kern/policy_internal.h>
46
47 #include <machine/atomic.h>
48
49 #include <pexpert/pexpert.h>
50
51 #include <libkern/kernel_mach_header.h>
52
53 #include <sys/kdebug.h>
54
55 #if CONFIG_SCHED_SFI
56
57 #define SFI_DEBUG 0
58
59 #if SFI_DEBUG
60 #define dprintf(...) kprintf(__VA_ARGS__)
61 #else
62 #define dprintf(...) do { } while(0)
63 #endif
64
65 /*
66 * SFI (Selective Forced Idle) operates by enabling a global
67 * timer on the SFI window interval. When it fires, all processors
68 * running a thread that should be SFI-ed are sent an AST.
69 * As threads become runnable while in their "off phase", they
70 * are placed on a deferred ready queue. When a per-class
71 * "on timer" fires, the ready threads for that class are
72 * re-enqueued for running. As an optimization to avoid spurious
73 * wakeups, the timer may be lazily programmed.
74 */
75
76 /*
77 * The "sfi_lock" simple lock guards access to static configuration
78 * parameters (as specified by userspace), dynamic state changes
79 * (as updated by the timer event routine), and timer data structures.
80 * Since it can be taken with interrupts disabled in some cases, all
81 * uses should be taken with interrupts disabled at splsched(). The
82 * "sfi_lock" also guards the "sfi_wait_class" field of thread_t, and
83 * must only be accessed with it held.
84 *
85 * When an "on timer" fires, we must deterministically be able to drain
86 * the wait queue, since if any threads are added to the queue afterwards,
87 * they may never get woken out of SFI wait. So sfi_lock must be
88 * taken before the wait queue's own spinlock.
89 *
90 * The wait queue will take the thread's scheduling lock. We may also take
91 * the thread_lock directly to update the "sfi_class" field and determine
92 * if the thread should block in the wait queue, but the lock will be
93 * released before doing so.
94 *
95 * The pset lock may also be taken, but not while any other locks are held.
96 *
97 * The task and thread mutex may also be held while reevaluating sfi state.
98 *
99 * splsched ---> sfi_lock ---> waitq ---> thread_lock
100 * \ \ \__ thread_lock (*)
101 * \ \__ pset_lock
102 * \
103 * \__ thread_lock
104 */
105
106 decl_simple_lock_data(static, sfi_lock);
107 static timer_call_data_t sfi_timer_call_entry;
108 volatile boolean_t sfi_is_enabled;
109
110 boolean_t sfi_window_is_set;
111 uint64_t sfi_window_usecs;
112 uint64_t sfi_window_interval;
113 uint64_t sfi_next_off_deadline;
114
115 typedef struct {
116 sfi_class_id_t class_id;
117 thread_continue_t class_continuation;
118 const char * class_name;
119 const char * class_ledger_name;
120 } sfi_class_registration_t;
121
122 /*
123 * To add a new SFI class:
124 *
125 * 1) Raise MAX_SFI_CLASS_ID in mach/sfi_class.h
126 * 2) Add a #define for it to mach/sfi_class.h. It need not be inserted in order of restrictiveness.
127 * 3) Add a call to SFI_CLASS_REGISTER below
128 * 4) Augment sfi_thread_classify to categorize threads as early as possible for as restrictive as possible.
129 * 5) Modify thermald to use the SFI class
130 */
131
132 static inline void _sfi_wait_cleanup(void);
133
134 static void sfi_class_register(sfi_class_registration_t *);
135
136 #define SFI_CLASS_REGISTER(clsid, ledger_name) \
137 \
138 static void __attribute__((noinline, noreturn)) \
139 SFI_ ## clsid ## _THREAD_IS_WAITING(void *arg __unused, wait_result_t wret __unused) \
140 { \
141 _sfi_wait_cleanup(); \
142 thread_exception_return(); \
143 } \
144 \
145 static_assert(SFI_CLASS_ ## clsid < MAX_SFI_CLASS_ID, "Invalid ID"); \
146 \
147 static __startup_data sfi_class_registration_t \
148 SFI_ ## clsid ## _registration = { \
149 .class_id = SFI_CLASS_ ## clsid, \
150 .class_continuation = SFI_ ## clsid ## _THREAD_IS_WAITING, \
151 .class_name = "SFI_CLASS_" # clsid, \
152 .class_ledger_name = "SFI_CLASS_" # ledger_name, \
153 }; \
154 STARTUP_ARG(TUNABLES, STARTUP_RANK_MIDDLE, \
155 sfi_class_register, &SFI_ ## clsid ## _registration)
156
157 /* SFI_CLASS_UNSPECIFIED not included here */
158 SFI_CLASS_REGISTER(MAINTENANCE, MAINTENANCE);
159 SFI_CLASS_REGISTER(DARWIN_BG, DARWIN_BG);
160 SFI_CLASS_REGISTER(APP_NAP, APP_NAP);
161 SFI_CLASS_REGISTER(MANAGED_FOCAL, MANAGED);
162 SFI_CLASS_REGISTER(MANAGED_NONFOCAL, MANAGED);
163 SFI_CLASS_REGISTER(UTILITY, UTILITY);
164 SFI_CLASS_REGISTER(DEFAULT_FOCAL, DEFAULT);
165 SFI_CLASS_REGISTER(DEFAULT_NONFOCAL, DEFAULT);
166 SFI_CLASS_REGISTER(LEGACY_FOCAL, LEGACY);
167 SFI_CLASS_REGISTER(LEGACY_NONFOCAL, LEGACY);
168 SFI_CLASS_REGISTER(USER_INITIATED_FOCAL, USER_INITIATED);
169 SFI_CLASS_REGISTER(USER_INITIATED_NONFOCAL, USER_INITIATED);
170 SFI_CLASS_REGISTER(USER_INTERACTIVE_FOCAL, USER_INTERACTIVE);
171 SFI_CLASS_REGISTER(USER_INTERACTIVE_NONFOCAL, USER_INTERACTIVE);
172 SFI_CLASS_REGISTER(KERNEL, OPTED_OUT);
173 SFI_CLASS_REGISTER(OPTED_OUT, OPTED_OUT);
174
175 struct sfi_class_state {
176 uint64_t off_time_usecs;
177 uint64_t off_time_interval;
178
179 timer_call_data_t on_timer;
180 uint64_t on_timer_deadline;
181 boolean_t on_timer_programmed;
182
183 boolean_t class_sfi_is_enabled;
184 volatile boolean_t class_in_on_phase;
185
186 struct waitq waitq; /* threads in ready state */
187 thread_continue_t continuation;
188
189 const char * class_name;
190 const char * class_ledger_name;
191 };
192
193 /* Static configuration performed in sfi_early_init() */
194 struct sfi_class_state sfi_classes[MAX_SFI_CLASS_ID];
195
196 int sfi_enabled_class_count; // protected by sfi_lock and used atomically
197
198 static void sfi_timer_global_off(
199 timer_call_param_t param0,
200 timer_call_param_t param1);
201
202 static void sfi_timer_per_class_on(
203 timer_call_param_t param0,
204 timer_call_param_t param1);
205
206 /* Called early in boot, when kernel is single-threaded */
207 __startup_func
208 static void
sfi_class_register(sfi_class_registration_t * reg)209 sfi_class_register(sfi_class_registration_t *reg)
210 {
211 sfi_class_id_t class_id = reg->class_id;
212
213 if (class_id >= MAX_SFI_CLASS_ID) {
214 panic("Invalid SFI class 0x%x", class_id);
215 }
216 if (sfi_classes[class_id].continuation != NULL) {
217 panic("Duplicate SFI registration for class 0x%x", class_id);
218 }
219 sfi_classes[class_id].class_sfi_is_enabled = FALSE;
220 sfi_classes[class_id].class_in_on_phase = TRUE;
221 sfi_classes[class_id].continuation = reg->class_continuation;
222 sfi_classes[class_id].class_name = reg->class_name;
223 sfi_classes[class_id].class_ledger_name = reg->class_ledger_name;
224 }
225
226 void
sfi_init(void)227 sfi_init(void)
228 {
229 sfi_class_id_t i;
230
231 simple_lock_init(&sfi_lock, 0);
232 timer_call_setup(&sfi_timer_call_entry, sfi_timer_global_off, NULL);
233 sfi_window_is_set = FALSE;
234 os_atomic_init(&sfi_enabled_class_count, 0);
235 sfi_is_enabled = FALSE;
236
237 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
238 /* If the class was set up in sfi_early_init(), initialize remaining fields */
239 if (sfi_classes[i].continuation) {
240 timer_call_setup(&sfi_classes[i].on_timer, sfi_timer_per_class_on, (void *)(uintptr_t)i);
241 sfi_classes[i].on_timer_programmed = FALSE;
242
243 waitq_init(&sfi_classes[i].waitq, WQT_QUEUE, SYNC_POLICY_FIFO);
244 } else {
245 /* The only allowed gap is for SFI_CLASS_UNSPECIFIED */
246 if (i != SFI_CLASS_UNSPECIFIED) {
247 panic("Gap in registered SFI classes");
248 }
249 }
250 }
251 }
252
253 /* Can be called before sfi_init() by task initialization, but after sfi_early_init() */
254 sfi_class_id_t
sfi_get_ledger_alias_for_class(sfi_class_id_t class_id)255 sfi_get_ledger_alias_for_class(sfi_class_id_t class_id)
256 {
257 sfi_class_id_t i;
258 const char *ledger_name = NULL;
259
260 ledger_name = sfi_classes[class_id].class_ledger_name;
261
262 /* Find the first class in the registration table with this ledger name */
263 if (ledger_name) {
264 for (i = SFI_CLASS_UNSPECIFIED + 1; i < class_id; i++) {
265 if (0 == strcmp(sfi_classes[i].class_ledger_name, ledger_name)) {
266 dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id, i);
267 return i;
268 }
269 }
270
271 /* This class is the primary one for the ledger, so there is no alias */
272 dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id, SFI_CLASS_UNSPECIFIED);
273 return SFI_CLASS_UNSPECIFIED;
274 }
275
276 /* We are permissive on SFI class lookup failures. In sfi_init(), we assert more */
277 return SFI_CLASS_UNSPECIFIED;
278 }
279
280 int
sfi_ledger_entry_add(ledger_template_t template,sfi_class_id_t class_id)281 sfi_ledger_entry_add(ledger_template_t template, sfi_class_id_t class_id)
282 {
283 const char *ledger_name = NULL;
284
285 ledger_name = sfi_classes[class_id].class_ledger_name;
286
287 dprintf("sfi_ledger_entry_add(%p, 0x%x) -> %s\n", template, class_id, ledger_name);
288 return ledger_entry_add(template, ledger_name, "sfi", "MATUs");
289 }
290
291 static void
sfi_timer_global_off(timer_call_param_t param0 __unused,timer_call_param_t param1 __unused)292 sfi_timer_global_off(
293 timer_call_param_t param0 __unused,
294 timer_call_param_t param1 __unused)
295 {
296 uint64_t now = mach_absolute_time();
297 sfi_class_id_t i;
298 processor_set_t pset, nset;
299 processor_t processor;
300 uint32_t needs_cause_ast_mask = 0x0;
301 spl_t s;
302
303 s = splsched();
304
305 simple_lock(&sfi_lock, LCK_GRP_NULL);
306 if (!sfi_is_enabled) {
307 /* If SFI has been disabled, let all "on" timers drain naturally */
308 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_NONE, 1, 0, 0, 0, 0);
309
310 simple_unlock(&sfi_lock);
311 splx(s);
312 return;
313 }
314
315 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_START, 0, 0, 0, 0, 0);
316
317 /* First set all configured classes into the off state, and program their "on" timer */
318 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
319 if (sfi_classes[i].class_sfi_is_enabled) {
320 uint64_t on_timer_deadline;
321
322 sfi_classes[i].class_in_on_phase = FALSE;
323 sfi_classes[i].on_timer_programmed = TRUE;
324
325 /* Push out on-timer */
326 on_timer_deadline = now + sfi_classes[i].off_time_interval;
327 sfi_classes[i].on_timer_deadline = on_timer_deadline;
328
329 timer_call_enter1(&sfi_classes[i].on_timer, NULL, on_timer_deadline, TIMER_CALL_SYS_CRITICAL);
330 } else {
331 /* If this class no longer needs SFI, make sure the timer is cancelled */
332 sfi_classes[i].class_in_on_phase = TRUE;
333 if (sfi_classes[i].on_timer_programmed) {
334 sfi_classes[i].on_timer_programmed = FALSE;
335 sfi_classes[i].on_timer_deadline = ~0ULL;
336 timer_call_cancel(&sfi_classes[i].on_timer);
337 }
338 }
339 }
340 simple_unlock(&sfi_lock);
341
342 /* Iterate over processors, call cause_ast_check() on ones running a thread that should be in an off phase */
343 processor = processor_list;
344 pset = processor->processor_set;
345
346 pset_lock(pset);
347
348 do {
349 nset = processor->processor_set;
350 if (nset != pset) {
351 pset_unlock(pset);
352 pset = nset;
353 pset_lock(pset);
354 }
355
356 /* "processor" and its pset are locked */
357 if (processor->state == PROCESSOR_RUNNING) {
358 if (AST_NONE != sfi_processor_needs_ast(processor)) {
359 needs_cause_ast_mask |= (1U << processor->cpu_id);
360 }
361 }
362 } while ((processor = processor->processor_list) != NULL);
363
364 pset_unlock(pset);
365
366 for (int cpuid = lsb_first(needs_cause_ast_mask); cpuid >= 0; cpuid = lsb_next(needs_cause_ast_mask, cpuid)) {
367 processor = processor_array[cpuid];
368 if (processor == current_processor()) {
369 ast_on(AST_SFI);
370 } else {
371 cause_ast_check(processor);
372 }
373 }
374
375 /* Re-arm timer if still enabled */
376 simple_lock(&sfi_lock, LCK_GRP_NULL);
377 if (sfi_is_enabled) {
378 clock_deadline_for_periodic_event(sfi_window_interval,
379 now,
380 &sfi_next_off_deadline);
381 timer_call_enter1(&sfi_timer_call_entry,
382 NULL,
383 sfi_next_off_deadline,
384 TIMER_CALL_SYS_CRITICAL);
385 }
386
387 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
388
389 simple_unlock(&sfi_lock);
390
391 splx(s);
392 }
393
394 static void
sfi_timer_per_class_on(timer_call_param_t param0,timer_call_param_t param1 __unused)395 sfi_timer_per_class_on(
396 timer_call_param_t param0,
397 timer_call_param_t param1 __unused)
398 {
399 sfi_class_id_t sfi_class_id = (sfi_class_id_t)(uintptr_t)param0;
400 struct sfi_class_state *sfi_class = &sfi_classes[sfi_class_id];
401 kern_return_t kret;
402 spl_t s;
403
404 s = splsched();
405
406 simple_lock(&sfi_lock, LCK_GRP_NULL);
407
408 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_START, sfi_class_id, 0, 0, 0, 0);
409
410 /*
411 * Any threads that may have accumulated in the ready queue for this class should get re-enqueued.
412 * Since we have the sfi_lock held and have changed "class_in_on_phase", we expect
413 * no new threads to be put on this wait queue until the global "off timer" has fired.
414 */
415
416 sfi_class->class_in_on_phase = TRUE;
417 sfi_class->on_timer_programmed = FALSE;
418
419 simple_unlock(&sfi_lock);
420
421 /*
422 * Issue the wakeup outside the lock to reduce lock hold time
423 * rdar://problem/96463639
424 */
425
426 kret = waitq_wakeup64_all(&sfi_class->waitq,
427 CAST_EVENT64_T(sfi_class_id),
428 THREAD_AWAKENED, WAITQ_WAKEUP_DEFAULT);
429 assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING);
430
431 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
432
433 splx(s);
434 }
435
436
437 kern_return_t
sfi_set_window(uint64_t window_usecs)438 sfi_set_window(uint64_t window_usecs)
439 {
440 uint64_t interval, deadline;
441 uint64_t now = mach_absolute_time();
442 sfi_class_id_t i;
443 spl_t s;
444 uint64_t largest_class_off_interval = 0;
445
446 if (window_usecs < MIN_SFI_WINDOW_USEC) {
447 window_usecs = MIN_SFI_WINDOW_USEC;
448 }
449
450 if (window_usecs > UINT32_MAX) {
451 return KERN_INVALID_ARGUMENT;
452 }
453
454 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_WINDOW), window_usecs, 0, 0, 0, 0);
455
456 clock_interval_to_absolutetime_interval((uint32_t)window_usecs, NSEC_PER_USEC, &interval);
457 deadline = now + interval;
458
459 s = splsched();
460
461 simple_lock(&sfi_lock, LCK_GRP_NULL);
462
463 /* Check that we are not bringing in the SFI window smaller than any class */
464 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
465 if (sfi_classes[i].class_sfi_is_enabled) {
466 largest_class_off_interval = MAX(largest_class_off_interval, sfi_classes[i].off_time_interval);
467 }
468 }
469
470 /*
471 * Off window must be strictly greater than all enabled classes,
472 * otherwise threads would build up on ready queue and never be able to run.
473 */
474 if (interval <= largest_class_off_interval) {
475 simple_unlock(&sfi_lock);
476 splx(s);
477 return KERN_INVALID_ARGUMENT;
478 }
479
480 /*
481 * If the new "off" deadline is further out than the current programmed timer,
482 * just let the current one expire (and the new cadence will be established thereafter).
483 * If the new "off" deadline is nearer than the current one, bring it in, so we
484 * can start the new behavior sooner. Note that this may cause the "off" timer to
485 * fire before some of the class "on" timers have fired.
486 */
487 sfi_window_usecs = window_usecs;
488 sfi_window_interval = interval;
489 sfi_window_is_set = TRUE;
490
491 if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) {
492 /* Can't program timer yet */
493 } else if (!sfi_is_enabled) {
494 sfi_is_enabled = TRUE;
495 sfi_next_off_deadline = deadline;
496 timer_call_enter1(&sfi_timer_call_entry,
497 NULL,
498 sfi_next_off_deadline,
499 TIMER_CALL_SYS_CRITICAL);
500 } else if (deadline >= sfi_next_off_deadline) {
501 sfi_next_off_deadline = deadline;
502 } else {
503 sfi_next_off_deadline = deadline;
504 timer_call_enter1(&sfi_timer_call_entry,
505 NULL,
506 sfi_next_off_deadline,
507 TIMER_CALL_SYS_CRITICAL);
508 }
509
510 simple_unlock(&sfi_lock);
511 splx(s);
512
513 return KERN_SUCCESS;
514 }
515
516 kern_return_t
sfi_window_cancel(void)517 sfi_window_cancel(void)
518 {
519 spl_t s;
520
521 s = splsched();
522
523 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_WINDOW), 0, 0, 0, 0, 0);
524
525 /* Disable globals so that global "off-timer" is not re-armed */
526 simple_lock(&sfi_lock, LCK_GRP_NULL);
527 sfi_window_is_set = FALSE;
528 sfi_window_usecs = 0;
529 sfi_window_interval = 0;
530 sfi_next_off_deadline = 0;
531 sfi_is_enabled = FALSE;
532 simple_unlock(&sfi_lock);
533
534 splx(s);
535
536 return KERN_SUCCESS;
537 }
538
539 /* Defers SFI off and per-class on timers (if live) by the specified interval
540 * in Mach Absolute Time Units. Currently invoked to align with the global
541 * forced idle mechanism. Making some simplifying assumptions, the iterative GFI
542 * induced SFI on+off deferrals form a geometric series that converges to yield
543 * an effective SFI duty cycle that is scaled by the GFI duty cycle. Initial phase
544 * alignment and congruency of the SFI/GFI periods can distort this to some extent.
545 */
546
547 kern_return_t
sfi_defer(uint64_t sfi_defer_matus)548 sfi_defer(uint64_t sfi_defer_matus)
549 {
550 spl_t s;
551 kern_return_t kr = KERN_FAILURE;
552 s = splsched();
553
554 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_GLOBAL_DEFER), sfi_defer_matus, 0, 0, 0, 0);
555
556 simple_lock(&sfi_lock, LCK_GRP_NULL);
557 if (!sfi_is_enabled) {
558 goto sfi_defer_done;
559 }
560
561 assert(sfi_next_off_deadline != 0);
562
563 sfi_next_off_deadline += sfi_defer_matus;
564 timer_call_enter1(&sfi_timer_call_entry, NULL, sfi_next_off_deadline, TIMER_CALL_SYS_CRITICAL);
565
566 int i;
567 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
568 if (sfi_classes[i].class_sfi_is_enabled) {
569 if (sfi_classes[i].on_timer_programmed) {
570 uint64_t new_on_deadline = sfi_classes[i].on_timer_deadline + sfi_defer_matus;
571 sfi_classes[i].on_timer_deadline = new_on_deadline;
572 timer_call_enter1(&sfi_classes[i].on_timer, NULL, new_on_deadline, TIMER_CALL_SYS_CRITICAL);
573 }
574 }
575 }
576
577 kr = KERN_SUCCESS;
578 sfi_defer_done:
579 simple_unlock(&sfi_lock);
580
581 splx(s);
582
583 return kr;
584 }
585
586
587 kern_return_t
sfi_get_window(uint64_t * window_usecs)588 sfi_get_window(uint64_t *window_usecs)
589 {
590 spl_t s;
591 uint64_t off_window_us;
592
593 s = splsched();
594 simple_lock(&sfi_lock, LCK_GRP_NULL);
595
596 off_window_us = sfi_window_usecs;
597
598 simple_unlock(&sfi_lock);
599 splx(s);
600
601 *window_usecs = off_window_us;
602
603 return KERN_SUCCESS;
604 }
605
606
607 kern_return_t
sfi_set_class_offtime(sfi_class_id_t class_id,uint64_t offtime_usecs)608 sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usecs)
609 {
610 uint64_t interval;
611 spl_t s;
612 uint64_t off_window_interval;
613
614 if (offtime_usecs < MIN_SFI_WINDOW_USEC) {
615 offtime_usecs = MIN_SFI_WINDOW_USEC;
616 }
617
618 if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) {
619 return KERN_INVALID_ARGUMENT;
620 }
621
622 if (offtime_usecs > UINT32_MAX) {
623 return KERN_INVALID_ARGUMENT;
624 }
625
626 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_CLASS_OFFTIME), offtime_usecs, class_id, 0, 0, 0);
627
628 clock_interval_to_absolutetime_interval((uint32_t)offtime_usecs, NSEC_PER_USEC, &interval);
629
630 s = splsched();
631
632 simple_lock(&sfi_lock, LCK_GRP_NULL);
633 off_window_interval = sfi_window_interval;
634
635 /* Check that we are not bringing in class off-time larger than the SFI window */
636 if (off_window_interval && (interval >= off_window_interval)) {
637 simple_unlock(&sfi_lock);
638 splx(s);
639 return KERN_INVALID_ARGUMENT;
640 }
641
642 /* We never re-program the per-class on-timer, but rather just let it expire naturally */
643 if (!sfi_classes[class_id].class_sfi_is_enabled) {
644 os_atomic_inc(&sfi_enabled_class_count, relaxed);
645 }
646 sfi_classes[class_id].off_time_usecs = offtime_usecs;
647 sfi_classes[class_id].off_time_interval = interval;
648 sfi_classes[class_id].class_sfi_is_enabled = TRUE;
649
650 if (sfi_window_is_set && !sfi_is_enabled) {
651 /* start global off timer */
652 sfi_is_enabled = TRUE;
653 sfi_next_off_deadline = mach_absolute_time() + sfi_window_interval;
654 timer_call_enter1(&sfi_timer_call_entry,
655 NULL,
656 sfi_next_off_deadline,
657 TIMER_CALL_SYS_CRITICAL);
658 }
659
660 simple_unlock(&sfi_lock);
661
662 splx(s);
663
664 return KERN_SUCCESS;
665 }
666
667 kern_return_t
sfi_class_offtime_cancel(sfi_class_id_t class_id)668 sfi_class_offtime_cancel(sfi_class_id_t class_id)
669 {
670 spl_t s;
671
672 if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) {
673 return KERN_INVALID_ARGUMENT;
674 }
675
676 s = splsched();
677
678 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_CLASS_OFFTIME), class_id, 0, 0, 0, 0);
679
680 simple_lock(&sfi_lock, LCK_GRP_NULL);
681
682 /* We never re-program the per-class on-timer, but rather just let it expire naturally */
683 if (sfi_classes[class_id].class_sfi_is_enabled) {
684 os_atomic_dec(&sfi_enabled_class_count, relaxed);
685 }
686 sfi_classes[class_id].off_time_usecs = 0;
687 sfi_classes[class_id].off_time_interval = 0;
688 sfi_classes[class_id].class_sfi_is_enabled = FALSE;
689
690 if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) {
691 sfi_is_enabled = FALSE;
692 }
693
694 simple_unlock(&sfi_lock);
695
696 splx(s);
697
698 return KERN_SUCCESS;
699 }
700
701 kern_return_t
sfi_get_class_offtime(sfi_class_id_t class_id,uint64_t * offtime_usecs)702 sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_usecs)
703 {
704 uint64_t off_time_us;
705 spl_t s;
706
707 if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) {
708 return 0;
709 }
710
711 s = splsched();
712
713 simple_lock(&sfi_lock, LCK_GRP_NULL);
714 off_time_us = sfi_classes[class_id].off_time_usecs;
715 simple_unlock(&sfi_lock);
716
717 splx(s);
718
719 *offtime_usecs = off_time_us;
720
721 return KERN_SUCCESS;
722 }
723
724 /*
725 * sfi_thread_classify and sfi_processor_active_thread_classify perform the critical
726 * role of quickly categorizing a thread into its SFI class so that an AST_SFI can be
727 * set. As the thread is unwinding to userspace, sfi_ast() performs full locking
728 * and determines whether the thread should enter an SFI wait state. Because of
729 * the inherent races between the time the AST is set and when it is evaluated,
730 * thread classification can be inaccurate (but should always be safe). This is
731 * especially the case for sfi_processor_active_thread_classify, which must
732 * classify the active thread on a remote processor without taking the thread lock.
733 * When in doubt, classification should err on the side of *not* classifying a
734 * thread at all, and wait for the thread itself to either hit a quantum expiration
735 * or block inside the kernel.
736 */
737
738 /*
739 * Thread must be locked. Ultimately, the real decision to enter
740 * SFI wait happens at the AST boundary.
741 */
742 sfi_class_id_t
sfi_thread_classify(thread_t thread)743 sfi_thread_classify(thread_t thread)
744 {
745 task_t task = get_threadtask(thread);
746 boolean_t is_kernel_thread = (task == kernel_task);
747 sched_mode_t thmode = thread->sched_mode;
748 boolean_t focal = FALSE;
749
750 /* kernel threads never reach the user AST boundary, and are in a separate world for SFI */
751 if (is_kernel_thread) {
752 return SFI_CLASS_KERNEL;
753 }
754
755 /* no need to re-classify threads unless there is at least one enabled SFI class */
756 if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) {
757 return SFI_CLASS_OPTED_OUT;
758 }
759
760 int task_role = proc_get_effective_task_policy(task, TASK_POLICY_ROLE);
761 int latency_qos = proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS);
762 int managed_task = proc_get_effective_task_policy(task, TASK_POLICY_SFI_MANAGED);
763
764 int thread_qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
765 int thread_bg = proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG);
766
767 if (thread_qos == THREAD_QOS_MAINTENANCE) {
768 return SFI_CLASS_MAINTENANCE;
769 }
770
771 if (thread_bg || thread_qos == THREAD_QOS_BACKGROUND) {
772 return SFI_CLASS_DARWIN_BG;
773 }
774
775 if (latency_qos != 0) {
776 int latency_qos_wtf = latency_qos - 1;
777
778 if ((latency_qos_wtf >= 4) && (latency_qos_wtf <= 5)) {
779 return SFI_CLASS_APP_NAP;
780 }
781 }
782
783 /*
784 * Realtime and fixed priority threads express their duty cycle constraints
785 * via other mechanisms, and are opted out of (most) forms of SFI
786 */
787 if (thmode == TH_MODE_REALTIME || thmode == TH_MODE_FIXED || task_role == TASK_GRAPHICS_SERVER) {
788 return SFI_CLASS_OPTED_OUT;
789 }
790
791 /*
792 * Threads with unspecified, legacy, or user-initiated QOS class can be individually managed.
793 */
794 switch (task_role) {
795 case TASK_CONTROL_APPLICATION:
796 case TASK_FOREGROUND_APPLICATION:
797 focal = TRUE;
798 break;
799 case TASK_BACKGROUND_APPLICATION:
800 case TASK_DEFAULT_APPLICATION:
801 case TASK_UNSPECIFIED:
802 /* Focal if the task is in a coalition with a FG/focal app */
803 if (task_coalition_focal_count(task) > 0) {
804 focal = TRUE;
805 }
806 break;
807 case TASK_THROTTLE_APPLICATION:
808 case TASK_DARWINBG_APPLICATION:
809 case TASK_NONUI_APPLICATION:
810 /* Definitely not focal */
811 default:
812 break;
813 }
814
815 if (managed_task) {
816 switch (thread_qos) {
817 case THREAD_QOS_UNSPECIFIED:
818 case THREAD_QOS_LEGACY:
819 case THREAD_QOS_USER_INITIATED:
820 if (focal) {
821 return SFI_CLASS_MANAGED_FOCAL;
822 } else {
823 return SFI_CLASS_MANAGED_NONFOCAL;
824 }
825 default:
826 break;
827 }
828 }
829
830 if (thread_qos == THREAD_QOS_UTILITY) {
831 return SFI_CLASS_UTILITY;
832 }
833
834 /*
835 * Classify threads in non-managed tasks
836 */
837 if (focal) {
838 switch (thread_qos) {
839 case THREAD_QOS_USER_INTERACTIVE:
840 return SFI_CLASS_USER_INTERACTIVE_FOCAL;
841 case THREAD_QOS_USER_INITIATED:
842 return SFI_CLASS_USER_INITIATED_FOCAL;
843 case THREAD_QOS_LEGACY:
844 return SFI_CLASS_LEGACY_FOCAL;
845 default:
846 return SFI_CLASS_DEFAULT_FOCAL;
847 }
848 } else {
849 switch (thread_qos) {
850 case THREAD_QOS_USER_INTERACTIVE:
851 return SFI_CLASS_USER_INTERACTIVE_NONFOCAL;
852 case THREAD_QOS_USER_INITIATED:
853 return SFI_CLASS_USER_INITIATED_NONFOCAL;
854 case THREAD_QOS_LEGACY:
855 return SFI_CLASS_LEGACY_NONFOCAL;
856 default:
857 return SFI_CLASS_DEFAULT_NONFOCAL;
858 }
859 }
860 }
861
862 /*
863 * pset must be locked.
864 */
865 sfi_class_id_t
sfi_processor_active_thread_classify(processor_t processor)866 sfi_processor_active_thread_classify(processor_t processor)
867 {
868 return processor->current_sfi_class;
869 }
870
871 /*
872 * thread must be locked. This is inherently racy, with the intent that
873 * at the AST boundary, it will be fully evaluated whether we need to
874 * perform an AST wait
875 */
876 ast_t
sfi_thread_needs_ast(thread_t thread,sfi_class_id_t * out_class)877 sfi_thread_needs_ast(thread_t thread, sfi_class_id_t *out_class)
878 {
879 sfi_class_id_t class_id;
880
881 class_id = sfi_thread_classify(thread);
882
883 if (out_class) {
884 *out_class = class_id;
885 }
886
887 /* No lock taken, so a stale value may be used. */
888 if (!sfi_classes[class_id].class_in_on_phase) {
889 return AST_SFI;
890 } else {
891 return AST_NONE;
892 }
893 }
894
895 /*
896 * pset must be locked. We take the SFI class for
897 * the currently running thread which is cached on
898 * the processor_t, and assume it is accurate. In the
899 * worst case, the processor will get an IPI and be asked
900 * to evaluate if the current running thread at that
901 * later point in time should be in an SFI wait.
902 */
903 ast_t
sfi_processor_needs_ast(processor_t processor)904 sfi_processor_needs_ast(processor_t processor)
905 {
906 sfi_class_id_t class_id;
907
908 class_id = sfi_processor_active_thread_classify(processor);
909
910 /* No lock taken, so a stale value may be used. */
911 if (!sfi_classes[class_id].class_in_on_phase) {
912 return AST_SFI;
913 } else {
914 return AST_NONE;
915 }
916 }
917
918 static inline void
_sfi_wait_cleanup(void)919 _sfi_wait_cleanup(void)
920 {
921 thread_t self = current_thread();
922
923 spl_t s = splsched();
924 simple_lock(&sfi_lock, LCK_GRP_NULL);
925
926 sfi_class_id_t current_sfi_wait_class = self->sfi_wait_class;
927
928 assert((SFI_CLASS_UNSPECIFIED < current_sfi_wait_class) &&
929 (current_sfi_wait_class < MAX_SFI_CLASS_ID));
930
931 self->sfi_wait_class = SFI_CLASS_UNSPECIFIED;
932
933 simple_unlock(&sfi_lock);
934 splx(s);
935
936 /*
937 * It's possible for the thread to be woken up due to the SFI period
938 * ending *before* it finishes blocking. In that case,
939 * wait_sfi_begin_time won't be set.
940 *
941 * Derive the time sacrificed to SFI by looking at when this thread was
942 * awoken by the on-timer, to avoid counting the time this thread spent
943 * waiting to get scheduled.
944 *
945 * Note that last_made_runnable_time could be reset if this thread
946 * gets preempted before we read the value. To fix that, we'd need to
947 * track wait time in a thread timer, sample the timer before blocking,
948 * pass the value through thread->parameter, and subtract that.
949 */
950
951 if (self->wait_sfi_begin_time != 0) {
952 uint64_t made_runnable = os_atomic_load(&self->last_made_runnable_time, relaxed);
953 int64_t sfi_wait_time = made_runnable - self->wait_sfi_begin_time;
954 assert(sfi_wait_time >= 0);
955
956 ledger_credit(get_threadtask(self)->ledger,
957 task_ledgers.sfi_wait_times[current_sfi_wait_class],
958 sfi_wait_time);
959
960 self->wait_sfi_begin_time = 0;
961 }
962 }
963
964 /*
965 * Called at AST context to fully evaluate if the current thread
966 * (which is obviously running) should instead block in an SFI wait.
967 * We must take the sfi_lock to check whether we are in the "off" period
968 * for the class, and if so, block.
969 */
970 void
sfi_ast(thread_t thread)971 sfi_ast(thread_t thread)
972 {
973 sfi_class_id_t class_id;
974 spl_t s;
975 struct sfi_class_state *sfi_class;
976 wait_result_t waitret;
977 boolean_t did_wait = FALSE;
978 thread_continue_t continuation;
979
980 s = splsched();
981
982 simple_lock(&sfi_lock, LCK_GRP_NULL);
983
984 if (!sfi_is_enabled) {
985 /*
986 * SFI is not enabled, or has recently been disabled.
987 * There is no point putting this thread on a deferred ready
988 * queue, even if it were classified as needing it, since
989 * SFI will truly be off at the next global off timer
990 */
991 simple_unlock(&sfi_lock);
992 splx(s);
993
994 return;
995 }
996
997 thread_lock(thread);
998 thread->sfi_class = class_id = sfi_thread_classify(thread);
999 thread_unlock(thread);
1000
1001 /*
1002 * Once the sfi_lock is taken and the thread's ->sfi_class field is updated, we
1003 * are committed to transitioning to whatever state is indicated by "->class_in_on_phase".
1004 * If another thread tries to call sfi_reevaluate() after this point, it will take the
1005 * sfi_lock and see the thread in this wait state. If another thread calls
1006 * sfi_reevaluate() before this point, it would see a runnable thread and at most
1007 * attempt to send an AST to this processor, but we would have the most accurate
1008 * classification.
1009 */
1010
1011 sfi_class = &sfi_classes[class_id];
1012 if (!sfi_class->class_in_on_phase) {
1013 /* Need to block thread in wait queue */
1014 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_THREAD_DEFER),
1015 thread_tid(thread), class_id, 0, 0, 0);
1016
1017 waitret = waitq_assert_wait64(&sfi_class->waitq,
1018 CAST_EVENT64_T(class_id),
1019 THREAD_INTERRUPTIBLE | THREAD_WAIT_NOREPORT, 0);
1020 if (waitret == THREAD_WAITING) {
1021 thread->sfi_wait_class = class_id;
1022 did_wait = TRUE;
1023 continuation = sfi_class->continuation;
1024 } else {
1025 /* thread may be exiting already, all other errors are unexpected */
1026 assert(waitret == THREAD_INTERRUPTED);
1027 }
1028 }
1029 simple_unlock(&sfi_lock);
1030
1031 splx(s);
1032
1033 if (did_wait) {
1034 assert(thread->wait_sfi_begin_time == 0);
1035
1036 thread_block_reason(continuation, NULL, AST_SFI);
1037 }
1038 }
1039
1040 /* Thread must be unlocked */
1041 void
sfi_reevaluate(thread_t thread)1042 sfi_reevaluate(thread_t thread)
1043 {
1044 kern_return_t kret;
1045 spl_t s;
1046 sfi_class_id_t class_id, current_class_id;
1047 ast_t sfi_ast;
1048
1049 s = splsched();
1050
1051 simple_lock(&sfi_lock, LCK_GRP_NULL);
1052
1053 thread_lock(thread);
1054 sfi_ast = sfi_thread_needs_ast(thread, &class_id);
1055 thread->sfi_class = class_id;
1056
1057 /*
1058 * This routine chiefly exists to boost threads out of an SFI wait
1059 * if their classification changes before the "on" timer fires.
1060 *
1061 * If we calculate that a thread is in a different ->sfi_wait_class
1062 * than we think it should be (including no-SFI-wait), we need to
1063 * correct that:
1064 *
1065 * If the thread is in SFI wait and should not be (or should be waiting
1066 * on a different class' "on" timer), we wake it up. If needed, the
1067 * thread may immediately block again in the different SFI wait state.
1068 *
1069 * If the thread is not in an SFI wait state and it should be, we need
1070 * to get that thread's attention, possibly by sending an AST to another
1071 * processor.
1072 */
1073
1074 if ((current_class_id = thread->sfi_wait_class) != SFI_CLASS_UNSPECIFIED) {
1075 thread_unlock(thread); /* not needed anymore */
1076
1077 assert(current_class_id < MAX_SFI_CLASS_ID);
1078
1079 if ((sfi_ast == AST_NONE) || (class_id != current_class_id)) {
1080 struct sfi_class_state *sfi_class = &sfi_classes[current_class_id];
1081
1082 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_WAIT_CANCELED), thread_tid(thread), current_class_id, class_id, 0, 0);
1083
1084 kret = waitq_wakeup64_thread(&sfi_class->waitq,
1085 CAST_EVENT64_T(current_class_id),
1086 thread,
1087 THREAD_AWAKENED);
1088 assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING);
1089 }
1090 } else {
1091 /*
1092 * Thread's current SFI wait class is not set, and because we
1093 * have the sfi_lock, it won't get set.
1094 */
1095
1096 if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
1097 if (sfi_ast != AST_NONE) {
1098 if (thread == current_thread()) {
1099 ast_on(sfi_ast);
1100 } else {
1101 processor_t processor = thread->last_processor;
1102
1103 if (processor != PROCESSOR_NULL &&
1104 processor->state == PROCESSOR_RUNNING &&
1105 processor->active_thread == thread) {
1106 cause_ast_check(processor);
1107 } else {
1108 /*
1109 * Runnable thread that's not on a CPU currently. When a processor
1110 * does context switch to it, the AST will get set based on whether
1111 * the thread is in its "off time".
1112 */
1113 }
1114 }
1115 }
1116 }
1117
1118 thread_unlock(thread);
1119 }
1120
1121 simple_unlock(&sfi_lock);
1122 splx(s);
1123 }
1124
1125 #else /* !CONFIG_SCHED_SFI */
1126
1127 kern_return_t
sfi_set_window(uint64_t window_usecs __unused)1128 sfi_set_window(uint64_t window_usecs __unused)
1129 {
1130 return KERN_NOT_SUPPORTED;
1131 }
1132
1133 kern_return_t
sfi_window_cancel(void)1134 sfi_window_cancel(void)
1135 {
1136 return KERN_NOT_SUPPORTED;
1137 }
1138
1139
1140 kern_return_t
sfi_get_window(uint64_t * window_usecs __unused)1141 sfi_get_window(uint64_t *window_usecs __unused)
1142 {
1143 return KERN_NOT_SUPPORTED;
1144 }
1145
1146
1147 kern_return_t
sfi_set_class_offtime(sfi_class_id_t class_id __unused,uint64_t offtime_usecs __unused)1148 sfi_set_class_offtime(sfi_class_id_t class_id __unused, uint64_t offtime_usecs __unused)
1149 {
1150 return KERN_NOT_SUPPORTED;
1151 }
1152
1153 kern_return_t
sfi_class_offtime_cancel(sfi_class_id_t class_id __unused)1154 sfi_class_offtime_cancel(sfi_class_id_t class_id __unused)
1155 {
1156 return KERN_NOT_SUPPORTED;
1157 }
1158
1159 kern_return_t
sfi_get_class_offtime(sfi_class_id_t class_id __unused,uint64_t * offtime_usecs __unused)1160 sfi_get_class_offtime(sfi_class_id_t class_id __unused, uint64_t *offtime_usecs __unused)
1161 {
1162 return KERN_NOT_SUPPORTED;
1163 }
1164
1165 void
sfi_reevaluate(thread_t thread __unused)1166 sfi_reevaluate(thread_t thread __unused)
1167 {
1168 return;
1169 }
1170
1171 sfi_class_id_t
sfi_thread_classify(thread_t thread)1172 sfi_thread_classify(thread_t thread)
1173 {
1174 task_t task = get_threadtask(thread);
1175 boolean_t is_kernel_thread = (task == kernel_task);
1176
1177 if (is_kernel_thread) {
1178 return SFI_CLASS_KERNEL;
1179 }
1180
1181 return SFI_CLASS_OPTED_OUT;
1182 }
1183
1184 #endif /* !CONFIG_SCHED_SFI */
1185