xref: /xnu-12377.41.6/osfmk/kern/sfi.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2013 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <mach/mach_types.h>
29 #include <kern/assert.h>
30 #include <kern/clock.h>
31 #include <kern/coalition.h>
32 #include <kern/debug.h>
33 #include <kern/startup.h>
34 #include <kern/host.h>
35 #include <kern/kern_types.h>
36 #include <kern/machine.h>
37 #include <kern/simple_lock.h>
38 #include <kern/misc_protos.h>
39 #include <kern/sched.h>
40 #include <kern/sched_prim.h>
41 #include <kern/sfi.h>
42 #include <kern/timer_call.h>
43 #include <kern/waitq.h>
44 #include <kern/ledger.h>
45 #include <kern/policy_internal.h>
46 
47 #include <machine/atomic.h>
48 
49 #include <pexpert/pexpert.h>
50 
51 #include <libkern/kernel_mach_header.h>
52 
53 #include <sys/kdebug.h>
54 
55 #if CONFIG_SCHED_SFI
56 
57 #define SFI_DEBUG 0
58 
59 #if SFI_DEBUG
60 #define dprintf(...) kprintf(__VA_ARGS__)
61 #else
62 #define dprintf(...) do { } while(0)
63 #endif
64 
65 /*
66  * SFI (Selective Forced Idle) operates by enabling a global
67  * timer on the SFI window interval. When it fires, all processors
68  * running a thread that should be SFI-ed are sent an AST.
69  * As threads become runnable while in their "off phase", they
70  * are placed on a deferred ready queue. When a per-class
71  * "on timer" fires, the ready threads for that class are
72  * re-enqueued for running. As an optimization to avoid spurious
73  * wakeups, the timer may be lazily programmed.
74  */
75 
76 /*
77  * The "sfi_lock" simple lock guards access to static configuration
78  * parameters (as specified by userspace), dynamic state changes
79  * (as updated by the timer event routine), and timer data structures.
80  * Since it can be taken with interrupts disabled in some cases, all
81  * uses should be taken with interrupts disabled at splsched(). The
82  * "sfi_lock" also guards the "sfi_wait_class" field of thread_t, and
83  * must only be accessed with it held.
84  *
85  * When an "on timer" fires, we must deterministically be able to drain
86  * the wait queue, since if any threads are added to the queue afterwards,
87  * they may never get woken out of SFI wait. So sfi_lock must be
88  * taken before the wait queue's own spinlock.
89  *
90  * The wait queue will take the thread's scheduling lock. We may also take
91  * the thread_lock directly to update the "sfi_class" field and determine
92  * if the thread should block in the wait queue, but the lock will be
93  * released before doing so.
94  *
95  * The pset lock may also be taken, but not while any other locks are held.
96  *
97  * The task and thread mutex may also be held while reevaluating sfi state.
98  *
99  * splsched ---> sfi_lock ---> waitq ---> thread_lock
100  *        \  \              \__ thread_lock (*)
101  *         \  \__ pset_lock
102  *          \
103  *           \__ thread_lock
104  */
105 
106 decl_simple_lock_data(static, sfi_lock);
107 static timer_call_data_t        sfi_timer_call_entry;
108 volatile boolean_t      sfi_is_enabled;
109 
110 boolean_t sfi_window_is_set;
111 uint64_t sfi_window_usecs;
112 uint64_t sfi_window_interval;
113 uint64_t sfi_next_off_deadline;
114 
115 typedef struct {
116 	sfi_class_id_t  class_id;
117 	thread_continue_t       class_continuation;
118 	const char *    class_name;
119 	const char *    class_ledger_name;
120 } sfi_class_registration_t;
121 
122 /*
123  * To add a new SFI class:
124  *
125  * 1) Raise MAX_SFI_CLASS_ID in mach/sfi_class.h
126  * 2) Add a #define for it to mach/sfi_class.h. It need not be inserted in order of restrictiveness.
127  * 3) Add a call to SFI_CLASS_REGISTER below
128  * 4) Augment sfi_thread_classify to categorize threads as early as possible for as restrictive as possible.
129  * 5) Modify thermald to use the SFI class
130  */
131 
132 static inline void _sfi_wait_cleanup(void);
133 
134 static void sfi_class_register(sfi_class_registration_t *);
135 
136 #define SFI_CLASS_REGISTER(clsid, ledger_name)                                  \
137                                                                                 \
138 static void __attribute__((noinline, noreturn))                                 \
139 SFI_ ## clsid ## _THREAD_IS_WAITING(void *arg __unused, wait_result_t wret __unused) \
140 {                                                                               \
141 	_sfi_wait_cleanup();                                                    \
142 	thread_exception_return();                                              \
143 }                                                                               \
144                                                                                 \
145 static_assert(SFI_CLASS_ ## clsid < MAX_SFI_CLASS_ID, "Invalid ID");            \
146                                                                                 \
147 static __startup_data sfi_class_registration_t                                  \
148 SFI_ ## clsid ## _registration = {                                              \
149 	.class_id = SFI_CLASS_ ## clsid,                                        \
150 	.class_continuation = SFI_ ## clsid ## _THREAD_IS_WAITING,              \
151 	.class_name = "SFI_CLASS_" # clsid,                                     \
152 	.class_ledger_name = "SFI_CLASS_" # ledger_name,                        \
153 };                                                                              \
154 STARTUP_ARG(TUNABLES, STARTUP_RANK_MIDDLE,                                      \
155     sfi_class_register, &SFI_ ## clsid ## _registration)
156 
157 /* SFI_CLASS_UNSPECIFIED not included here */
158 SFI_CLASS_REGISTER(MAINTENANCE, MAINTENANCE);
159 SFI_CLASS_REGISTER(DARWIN_BG, DARWIN_BG);
160 SFI_CLASS_REGISTER(APP_NAP, APP_NAP);
161 SFI_CLASS_REGISTER(MANAGED_FOCAL, MANAGED);
162 SFI_CLASS_REGISTER(MANAGED_NONFOCAL, MANAGED);
163 SFI_CLASS_REGISTER(UTILITY, UTILITY);
164 SFI_CLASS_REGISTER(DEFAULT_FOCAL, DEFAULT);
165 SFI_CLASS_REGISTER(DEFAULT_NONFOCAL, DEFAULT);
166 SFI_CLASS_REGISTER(LEGACY_FOCAL, LEGACY);
167 SFI_CLASS_REGISTER(LEGACY_NONFOCAL, LEGACY);
168 SFI_CLASS_REGISTER(USER_INITIATED_FOCAL, USER_INITIATED);
169 SFI_CLASS_REGISTER(USER_INITIATED_NONFOCAL, USER_INITIATED);
170 SFI_CLASS_REGISTER(USER_INTERACTIVE_FOCAL, USER_INTERACTIVE);
171 SFI_CLASS_REGISTER(USER_INTERACTIVE_NONFOCAL, USER_INTERACTIVE);
172 SFI_CLASS_REGISTER(KERNEL, OPTED_OUT);
173 SFI_CLASS_REGISTER(OPTED_OUT, OPTED_OUT);
174 SFI_CLASS_REGISTER(RUNAWAY_MITIGATION, RUNAWAY_MITIGATION);
175 
176 struct sfi_class_state {
177 	uint64_t        off_time_usecs;
178 	uint64_t        off_time_interval;
179 
180 	thread_call_t       on_timer;
181 	uint64_t        on_timer_deadline;
182 	boolean_t                       on_timer_programmed;
183 
184 	boolean_t       class_sfi_is_enabled;
185 	volatile boolean_t      class_in_on_phase;
186 
187 	struct waitq            waitq;  /* threads in ready state */
188 	thread_continue_t       continuation;
189 
190 	const char *    class_name;
191 	const char *    class_ledger_name;
192 };
193 
194 /* Static configuration performed in sfi_early_init() */
195 struct sfi_class_state sfi_classes[MAX_SFI_CLASS_ID];
196 
197 int sfi_enabled_class_count; // protected by sfi_lock and used atomically
198 
199 static void sfi_timer_global_off(
200 	timer_call_param_t      param0,
201 	timer_call_param_t      param1);
202 
203 static void sfi_timer_per_class_on(
204 	timer_call_param_t      param0,
205 	timer_call_param_t      param1);
206 
207 /* Called early in boot, when kernel is single-threaded */
208 __startup_func
209 static void
sfi_class_register(sfi_class_registration_t * reg)210 sfi_class_register(sfi_class_registration_t *reg)
211 {
212 	sfi_class_id_t class_id = reg->class_id;
213 
214 	if (class_id >= MAX_SFI_CLASS_ID) {
215 		panic("Invalid SFI class 0x%x", class_id);
216 	}
217 	if (sfi_classes[class_id].continuation != NULL) {
218 		panic("Duplicate SFI registration for class 0x%x", class_id);
219 	}
220 	sfi_classes[class_id].class_sfi_is_enabled = FALSE;
221 	sfi_classes[class_id].class_in_on_phase = TRUE;
222 	sfi_classes[class_id].continuation = reg->class_continuation;
223 	sfi_classes[class_id].class_name = reg->class_name;
224 	sfi_classes[class_id].class_ledger_name = reg->class_ledger_name;
225 }
226 
227 void
sfi_init(void)228 sfi_init(void)
229 {
230 	sfi_class_id_t i;
231 
232 	simple_lock_init(&sfi_lock, 0);
233 	timer_call_setup(&sfi_timer_call_entry, sfi_timer_global_off, NULL);
234 	sfi_window_is_set = FALSE;
235 	os_atomic_init(&sfi_enabled_class_count, 0);
236 	sfi_is_enabled = FALSE;
237 
238 	for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
239 		/* If the class was set up in sfi_early_init(), initialize remaining fields */
240 		if (sfi_classes[i].continuation) {
241 			sfi_classes[i].on_timer = thread_call_allocate_with_options(
242 				sfi_timer_per_class_on, (void *)(uintptr_t)i, THREAD_CALL_PRIORITY_HIGH,
243 				THREAD_CALL_OPTIONS_ONCE);
244 			sfi_classes[i].on_timer_programmed = FALSE;
245 
246 			waitq_init(&sfi_classes[i].waitq, WQT_QUEUE, SYNC_POLICY_FIFO);
247 		} else {
248 			/* The only allowed gap is for SFI_CLASS_UNSPECIFIED */
249 			if (i != SFI_CLASS_UNSPECIFIED) {
250 				panic("Gap in registered SFI classes");
251 			}
252 		}
253 	}
254 }
255 
256 /* Can be called before sfi_init() by task initialization, but after sfi_early_init() */
257 sfi_class_id_t
sfi_get_ledger_alias_for_class(sfi_class_id_t class_id)258 sfi_get_ledger_alias_for_class(sfi_class_id_t class_id)
259 {
260 	sfi_class_id_t i;
261 	const char *ledger_name = NULL;
262 
263 	ledger_name = sfi_classes[class_id].class_ledger_name;
264 
265 	/* Find the first class in the registration table with this ledger name */
266 	if (ledger_name) {
267 		for (i = SFI_CLASS_UNSPECIFIED + 1; i < class_id; i++) {
268 			if (0 == strcmp(sfi_classes[i].class_ledger_name, ledger_name)) {
269 				dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id, i);
270 				return i;
271 			}
272 		}
273 
274 		/* This class is the primary one for the ledger, so there is no alias */
275 		dprintf("sfi_get_ledger_alias_for_class(0x%x) -> 0x%x\n", class_id, SFI_CLASS_UNSPECIFIED);
276 		return SFI_CLASS_UNSPECIFIED;
277 	}
278 
279 	/* We are permissive on SFI class lookup failures. In sfi_init(), we assert more */
280 	return SFI_CLASS_UNSPECIFIED;
281 }
282 
283 int
sfi_ledger_entry_add(ledger_template_t template,sfi_class_id_t class_id)284 sfi_ledger_entry_add(ledger_template_t template, sfi_class_id_t class_id)
285 {
286 	const char *ledger_name = NULL;
287 
288 	ledger_name = sfi_classes[class_id].class_ledger_name;
289 
290 	dprintf("sfi_ledger_entry_add(%p, 0x%x) -> %s\n", template, class_id, ledger_name);
291 	return ledger_entry_add(template, ledger_name, "sfi", "MATUs");
292 }
293 
294 static void
sfi_timer_global_off(timer_call_param_t param0 __unused,timer_call_param_t param1 __unused)295 sfi_timer_global_off(
296 	timer_call_param_t      param0 __unused,
297 	timer_call_param_t      param1 __unused)
298 {
299 	uint64_t        now = mach_absolute_time();
300 	sfi_class_id_t  i;
301 	processor_set_t pset, nset;
302 	processor_t             processor;
303 	uint32_t                needs_cause_ast_mask = 0x0;
304 	spl_t           s;
305 
306 	s = splsched();
307 
308 	simple_lock(&sfi_lock, LCK_GRP_NULL);
309 	if (!sfi_is_enabled) {
310 		/* If SFI has been disabled, let all "on" timers drain naturally */
311 		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_NONE, 1, 0, 0, 0, 0);
312 
313 		simple_unlock(&sfi_lock);
314 		splx(s);
315 		return;
316 	}
317 
318 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_START, 0, 0, 0, 0, 0);
319 
320 	/* First set all configured classes into the off state, and program their "on" timer */
321 	for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
322 		if (sfi_classes[i].class_sfi_is_enabled) {
323 			uint64_t on_timer_deadline;
324 
325 			sfi_classes[i].class_in_on_phase = FALSE;
326 			sfi_classes[i].on_timer_programmed = TRUE;
327 
328 			/* Push out on-timer */
329 			on_timer_deadline = now + sfi_classes[i].off_time_interval;
330 			sfi_classes[i].on_timer_deadline = on_timer_deadline;
331 
332 			thread_call_enter_delayed_with_leeway(sfi_classes[i].on_timer, NULL, on_timer_deadline, 0, THREAD_CALL_DELAY_SYS_CRITICAL);
333 		} else {
334 			/* If this class no longer needs SFI, make sure the timer is cancelled */
335 			sfi_classes[i].class_in_on_phase = TRUE;
336 			if (sfi_classes[i].on_timer_programmed) {
337 				sfi_classes[i].on_timer_programmed = FALSE;
338 				sfi_classes[i].on_timer_deadline = ~0ULL;
339 				thread_call_cancel(sfi_classes[i].on_timer);
340 			}
341 		}
342 	}
343 	simple_unlock(&sfi_lock);
344 
345 	/* Iterate over processors, call cause_ast_check() on ones running a thread that should be in an off phase */
346 	processor = processor_list;
347 	pset = processor->processor_set;
348 
349 	pset_lock(pset);
350 
351 	do {
352 		nset = processor->processor_set;
353 		if (nset != pset) {
354 			pset_unlock(pset);
355 			pset = nset;
356 			pset_lock(pset);
357 		}
358 
359 		/* "processor" and its pset are locked */
360 		if (processor->state == PROCESSOR_RUNNING) {
361 			if (AST_NONE != sfi_processor_needs_ast(processor)) {
362 				needs_cause_ast_mask |= (1U << processor->cpu_id);
363 			}
364 		}
365 	} while ((processor = processor->processor_list) != NULL);
366 
367 	pset_unlock(pset);
368 
369 	for (int cpuid = lsb_first(needs_cause_ast_mask); cpuid >= 0; cpuid = lsb_next(needs_cause_ast_mask, cpuid)) {
370 		processor = processor_array[cpuid];
371 		if (processor == current_processor()) {
372 			ast_on(AST_SFI);
373 		} else {
374 			cause_ast_check(processor);
375 		}
376 	}
377 
378 	/* Re-arm timer if still enabled */
379 	simple_lock(&sfi_lock, LCK_GRP_NULL);
380 	if (sfi_is_enabled) {
381 		clock_deadline_for_periodic_event(sfi_window_interval,
382 		    now,
383 		    &sfi_next_off_deadline);
384 		timer_call_enter1(&sfi_timer_call_entry,
385 		    NULL,
386 		    sfi_next_off_deadline,
387 		    TIMER_CALL_SYS_CRITICAL);
388 	}
389 
390 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
391 
392 	simple_unlock(&sfi_lock);
393 
394 	splx(s);
395 }
396 
397 static void
sfi_timer_per_class_on(timer_call_param_t param0,timer_call_param_t param1 __unused)398 sfi_timer_per_class_on(
399 	timer_call_param_t      param0,
400 	timer_call_param_t      param1 __unused)
401 {
402 	sfi_class_id_t sfi_class_id = (sfi_class_id_t)(uintptr_t)param0;
403 	struct sfi_class_state  *sfi_class = &sfi_classes[sfi_class_id];
404 
405 	spl_t s = splsched();
406 
407 	simple_lock(&sfi_lock, LCK_GRP_NULL);
408 
409 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_START, sfi_class_id, 0, 0, 0, 0);
410 
411 	/*
412 	 * Any threads that may have accumulated in the ready queue for this class should get re-enqueued.
413 	 * Since we have the sfi_lock held and have changed "class_in_on_phase", we expect
414 	 * no new threads to be put on this wait queue until the global "off timer" has fired.
415 	 */
416 
417 	sfi_class->class_in_on_phase = TRUE;
418 	sfi_class->on_timer_programmed = FALSE;
419 
420 	simple_unlock(&sfi_lock);
421 
422 	/*
423 	 * Issue the wakeup outside the lock to reduce lock hold time
424 	 * rdar://problem/96463639
425 	 */
426 	__assert_only kern_return_t kret;
427 
428 	kret = waitq_wakeup64_all(&sfi_class->waitq,
429 	    CAST_EVENT64_T(sfi_class_id),
430 	    THREAD_AWAKENED, waitq_flags_splx(s));
431 	assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING);
432 
433 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
434 }
435 
436 
437 kern_return_t
sfi_set_window(uint64_t window_usecs)438 sfi_set_window(uint64_t window_usecs)
439 {
440 	uint64_t        interval, deadline;
441 	uint64_t        now = mach_absolute_time();
442 	sfi_class_id_t  i;
443 	spl_t           s;
444 	uint64_t        largest_class_off_interval = 0;
445 
446 	if (window_usecs < MIN_SFI_WINDOW_USEC) {
447 		window_usecs = MIN_SFI_WINDOW_USEC;
448 	}
449 
450 	if (window_usecs > UINT32_MAX) {
451 		return KERN_INVALID_ARGUMENT;
452 	}
453 
454 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_WINDOW), window_usecs, 0, 0, 0, 0);
455 
456 	clock_interval_to_absolutetime_interval((uint32_t)window_usecs, NSEC_PER_USEC, &interval);
457 	deadline = now + interval;
458 
459 	s = splsched();
460 
461 	simple_lock(&sfi_lock, LCK_GRP_NULL);
462 
463 	/* Check that we are not bringing in the SFI window smaller than any class */
464 	for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
465 		if (sfi_classes[i].class_sfi_is_enabled) {
466 			largest_class_off_interval = MAX(largest_class_off_interval, sfi_classes[i].off_time_interval);
467 		}
468 	}
469 
470 	/*
471 	 * Off window must be strictly greater than all enabled classes,
472 	 * otherwise threads would build up on ready queue and never be able to run.
473 	 */
474 	if (interval <= largest_class_off_interval) {
475 		simple_unlock(&sfi_lock);
476 		splx(s);
477 		return KERN_INVALID_ARGUMENT;
478 	}
479 
480 	/*
481 	 * If the new "off" deadline is further out than the current programmed timer,
482 	 * just let the current one expire (and the new cadence will be established thereafter).
483 	 * If the new "off" deadline is nearer than the current one, bring it in, so we
484 	 * can start the new behavior sooner. Note that this may cause the "off" timer to
485 	 * fire before some of the class "on" timers have fired.
486 	 */
487 	sfi_window_usecs = window_usecs;
488 	sfi_window_interval = interval;
489 	sfi_window_is_set = TRUE;
490 
491 	if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) {
492 		/* Can't program timer yet */
493 	} else if (!sfi_is_enabled) {
494 		sfi_is_enabled = TRUE;
495 		sfi_next_off_deadline = deadline;
496 		timer_call_enter1(&sfi_timer_call_entry,
497 		    NULL,
498 		    sfi_next_off_deadline,
499 		    TIMER_CALL_SYS_CRITICAL);
500 	} else if (deadline >= sfi_next_off_deadline) {
501 		sfi_next_off_deadline = deadline;
502 	} else {
503 		sfi_next_off_deadline = deadline;
504 		timer_call_enter1(&sfi_timer_call_entry,
505 		    NULL,
506 		    sfi_next_off_deadline,
507 		    TIMER_CALL_SYS_CRITICAL);
508 	}
509 
510 	simple_unlock(&sfi_lock);
511 	splx(s);
512 
513 	return KERN_SUCCESS;
514 }
515 
516 kern_return_t
sfi_window_cancel(void)517 sfi_window_cancel(void)
518 {
519 	spl_t           s;
520 
521 	s = splsched();
522 
523 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_WINDOW), 0, 0, 0, 0, 0);
524 
525 	/* Disable globals so that global "off-timer" is not re-armed */
526 	simple_lock(&sfi_lock, LCK_GRP_NULL);
527 	sfi_window_is_set = FALSE;
528 	sfi_window_usecs = 0;
529 	sfi_window_interval = 0;
530 	sfi_next_off_deadline = 0;
531 	sfi_is_enabled = FALSE;
532 	simple_unlock(&sfi_lock);
533 
534 	splx(s);
535 
536 	return KERN_SUCCESS;
537 }
538 
539 /* Defers SFI off and per-class on timers (if live) by the specified interval
540  * in Mach Absolute Time Units. Currently invoked to align with the global
541  * forced idle mechanism. Making some simplifying assumptions, the iterative GFI
542  * induced SFI on+off deferrals form a geometric series that converges to yield
543  * an effective SFI duty cycle that is scaled by the GFI duty cycle. Initial phase
544  * alignment and congruency of the SFI/GFI periods can distort this to some extent.
545  */
546 
547 kern_return_t
sfi_defer(uint64_t sfi_defer_matus)548 sfi_defer(uint64_t sfi_defer_matus)
549 {
550 	kern_return_t kr = KERN_FAILURE;
551 	spl_t s = splsched();
552 
553 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_GLOBAL_DEFER), sfi_defer_matus, 0, 0, 0, 0);
554 
555 	simple_lock(&sfi_lock, LCK_GRP_NULL);
556 	if (!sfi_is_enabled) {
557 		goto sfi_defer_done;
558 	}
559 
560 	assert(sfi_next_off_deadline != 0);
561 
562 	sfi_next_off_deadline += sfi_defer_matus;
563 	timer_call_enter1(&sfi_timer_call_entry, NULL, sfi_next_off_deadline, TIMER_CALL_SYS_CRITICAL);
564 
565 	for (int i = 0; i < MAX_SFI_CLASS_ID; i++) {
566 		if (sfi_classes[i].class_sfi_is_enabled) {
567 			if (sfi_classes[i].on_timer_programmed) {
568 				uint64_t new_on_deadline = sfi_classes[i].on_timer_deadline + sfi_defer_matus;
569 				sfi_classes[i].on_timer_deadline = new_on_deadline;
570 				thread_call_enter_delayed_with_leeway(sfi_classes[i].on_timer, NULL, new_on_deadline, 0, THREAD_CALL_DELAY_SYS_CRITICAL);
571 			}
572 		}
573 	}
574 
575 	kr = KERN_SUCCESS;
576 sfi_defer_done:
577 	simple_unlock(&sfi_lock);
578 
579 	splx(s);
580 
581 	return kr;
582 }
583 
584 
585 kern_return_t
sfi_get_window(uint64_t * window_usecs)586 sfi_get_window(uint64_t *window_usecs)
587 {
588 	spl_t           s;
589 	uint64_t        off_window_us;
590 
591 	s = splsched();
592 	simple_lock(&sfi_lock, LCK_GRP_NULL);
593 
594 	off_window_us = sfi_window_usecs;
595 
596 	simple_unlock(&sfi_lock);
597 	splx(s);
598 
599 	*window_usecs = off_window_us;
600 
601 	return KERN_SUCCESS;
602 }
603 
604 
605 kern_return_t
sfi_set_class_offtime(sfi_class_id_t class_id,uint64_t offtime_usecs)606 sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usecs)
607 {
608 	uint64_t        interval;
609 	spl_t           s;
610 	uint64_t        off_window_interval;
611 
612 	if (offtime_usecs < MIN_SFI_WINDOW_USEC) {
613 		offtime_usecs = MIN_SFI_WINDOW_USEC;
614 	}
615 
616 	if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) {
617 		return KERN_INVALID_ARGUMENT;
618 	}
619 
620 	if (offtime_usecs > UINT32_MAX) {
621 		return KERN_INVALID_ARGUMENT;
622 	}
623 
624 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_CLASS_OFFTIME), offtime_usecs, class_id, 0, 0, 0);
625 
626 	clock_interval_to_absolutetime_interval((uint32_t)offtime_usecs, NSEC_PER_USEC, &interval);
627 
628 	s = splsched();
629 
630 	simple_lock(&sfi_lock, LCK_GRP_NULL);
631 	off_window_interval = sfi_window_interval;
632 
633 	/* Check that we are not bringing in class off-time larger than the SFI window */
634 	if (off_window_interval && (interval >= off_window_interval)) {
635 		simple_unlock(&sfi_lock);
636 		splx(s);
637 		return KERN_INVALID_ARGUMENT;
638 	}
639 
640 	/* We never re-program the per-class on-timer, but rather just let it expire naturally */
641 	if (!sfi_classes[class_id].class_sfi_is_enabled) {
642 		os_atomic_inc(&sfi_enabled_class_count, relaxed);
643 	}
644 	sfi_classes[class_id].off_time_usecs = offtime_usecs;
645 	sfi_classes[class_id].off_time_interval = interval;
646 	sfi_classes[class_id].class_sfi_is_enabled = TRUE;
647 
648 	if (sfi_window_is_set && !sfi_is_enabled) {
649 		/* start global off timer */
650 		sfi_is_enabled = TRUE;
651 		sfi_next_off_deadline = mach_absolute_time() + sfi_window_interval;
652 		timer_call_enter1(&sfi_timer_call_entry,
653 		    NULL,
654 		    sfi_next_off_deadline,
655 		    TIMER_CALL_SYS_CRITICAL);
656 	}
657 
658 	simple_unlock(&sfi_lock);
659 
660 	splx(s);
661 
662 	return KERN_SUCCESS;
663 }
664 
665 kern_return_t
sfi_class_offtime_cancel(sfi_class_id_t class_id)666 sfi_class_offtime_cancel(sfi_class_id_t class_id)
667 {
668 	spl_t           s;
669 
670 	if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) {
671 		return KERN_INVALID_ARGUMENT;
672 	}
673 
674 	s = splsched();
675 
676 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_CLASS_OFFTIME), class_id, 0, 0, 0, 0);
677 
678 	simple_lock(&sfi_lock, LCK_GRP_NULL);
679 
680 	/* We never re-program the per-class on-timer, but rather just let it expire naturally */
681 	if (sfi_classes[class_id].class_sfi_is_enabled) {
682 		os_atomic_dec(&sfi_enabled_class_count, relaxed);
683 	}
684 	sfi_classes[class_id].off_time_usecs = 0;
685 	sfi_classes[class_id].off_time_interval = 0;
686 	sfi_classes[class_id].class_sfi_is_enabled = FALSE;
687 
688 	if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) {
689 		sfi_is_enabled = FALSE;
690 	}
691 
692 	simple_unlock(&sfi_lock);
693 
694 	splx(s);
695 
696 	return KERN_SUCCESS;
697 }
698 
699 kern_return_t
sfi_get_class_offtime(sfi_class_id_t class_id,uint64_t * offtime_usecs)700 sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_usecs)
701 {
702 	uint64_t        off_time_us;
703 	spl_t           s;
704 
705 	if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) {
706 		return 0;
707 	}
708 
709 	s = splsched();
710 
711 	simple_lock(&sfi_lock, LCK_GRP_NULL);
712 	off_time_us = sfi_classes[class_id].off_time_usecs;
713 	simple_unlock(&sfi_lock);
714 
715 	splx(s);
716 
717 	*offtime_usecs = off_time_us;
718 
719 	return KERN_SUCCESS;
720 }
721 
722 /*
723  * sfi_thread_classify and sfi_processor_active_thread_classify perform the critical
724  * role of quickly categorizing a thread into its SFI class so that an AST_SFI can be
725  * set. As the thread is unwinding to userspace, sfi_ast() performs full locking
726  * and determines whether the thread should enter an SFI wait state. Because of
727  * the inherent races between the time the AST is set and when it is evaluated,
728  * thread classification can be inaccurate (but should always be safe). This is
729  * especially the case for sfi_processor_active_thread_classify, which must
730  * classify the active thread on a remote processor without taking the thread lock.
731  * When in doubt, classification should err on the side of *not* classifying a
732  * thread at all, and wait for the thread itself to either hit a quantum expiration
733  * or block inside the kernel.
734  */
735 
736 /*
737  * Thread must be locked. Ultimately, the real decision to enter
738  * SFI wait happens at the AST boundary.
739  */
740 sfi_class_id_t
sfi_thread_classify(thread_t thread)741 sfi_thread_classify(thread_t thread)
742 {
743 	task_t task = get_threadtask(thread);
744 	boolean_t is_kernel_thread = (task == kernel_task);
745 	sched_mode_t thmode = thread->sched_mode;
746 	boolean_t focal = FALSE;
747 
748 	/* kernel threads never reach the user AST boundary, and are in a separate world for SFI */
749 	if (is_kernel_thread) {
750 		return SFI_CLASS_KERNEL;
751 	}
752 
753 	/* no need to re-classify threads unless there is at least one enabled SFI class */
754 	if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) {
755 		return SFI_CLASS_OPTED_OUT;
756 	}
757 
758 	int task_role       = proc_get_effective_task_policy(task, TASK_POLICY_ROLE);
759 	int latency_qos     = proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS);
760 	int managed_task    = proc_get_effective_task_policy(task, TASK_POLICY_SFI_MANAGED);
761 	int runaway_bg      = proc_get_effective_task_policy(task, TASK_POLICY_RUNAWAY_MITIGATION);
762 
763 	int thread_qos      = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
764 	int thread_bg       = proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG);
765 
766 	if (thread_qos == THREAD_QOS_MAINTENANCE) {
767 		return runaway_bg ? SFI_CLASS_RUNAWAY_MITIGATION : SFI_CLASS_MAINTENANCE;
768 	}
769 
770 	if (thread_bg || thread_qos == THREAD_QOS_BACKGROUND) {
771 		return runaway_bg ? SFI_CLASS_RUNAWAY_MITIGATION : SFI_CLASS_DARWIN_BG;
772 	}
773 
774 	if (latency_qos != 0) {
775 		int latency_qos_wtf = latency_qos - 1;
776 
777 		if ((latency_qos_wtf >= 4) && (latency_qos_wtf <= 5)) {
778 			return SFI_CLASS_APP_NAP;
779 		}
780 	}
781 
782 	/*
783 	 * Realtime and fixed priority threads express their duty cycle constraints
784 	 * via other mechanisms, and are opted out of (most) forms of SFI
785 	 */
786 	if (thmode == TH_MODE_REALTIME || thmode == TH_MODE_FIXED || task_role == TASK_GRAPHICS_SERVER) {
787 		return SFI_CLASS_OPTED_OUT;
788 	}
789 
790 	/*
791 	 * Threads with unspecified, legacy, or user-initiated QOS class can be individually managed.
792 	 */
793 	switch (task_role) {
794 	case TASK_CONTROL_APPLICATION:
795 	case TASK_FOREGROUND_APPLICATION:
796 		focal = TRUE;
797 		break;
798 	case TASK_BACKGROUND_APPLICATION:
799 	case TASK_DEFAULT_APPLICATION:
800 	case TASK_UNSPECIFIED:
801 		/* Focal if the task is in a coalition with a FG/focal app */
802 		if (task_coalition_focal_count(task) > 0) {
803 			focal = TRUE;
804 		}
805 		break;
806 	case TASK_THROTTLE_APPLICATION:
807 	case TASK_DARWINBG_APPLICATION:
808 	case TASK_NONUI_APPLICATION:
809 	/* Definitely not focal */
810 	default:
811 		break;
812 	}
813 
814 	if (managed_task) {
815 		switch (thread_qos) {
816 		case THREAD_QOS_UNSPECIFIED:
817 		case THREAD_QOS_LEGACY:
818 		case THREAD_QOS_USER_INITIATED:
819 			if (focal) {
820 				return SFI_CLASS_MANAGED_FOCAL;
821 			} else {
822 				return SFI_CLASS_MANAGED_NONFOCAL;
823 			}
824 		default:
825 			break;
826 		}
827 	}
828 
829 	if (thread_qos == THREAD_QOS_UTILITY) {
830 		return SFI_CLASS_UTILITY;
831 	}
832 
833 	/*
834 	 * Classify threads in non-managed tasks
835 	 */
836 	if (focal) {
837 		switch (thread_qos) {
838 		case THREAD_QOS_USER_INTERACTIVE:
839 			return SFI_CLASS_USER_INTERACTIVE_FOCAL;
840 		case THREAD_QOS_USER_INITIATED:
841 			return SFI_CLASS_USER_INITIATED_FOCAL;
842 		case THREAD_QOS_LEGACY:
843 			return SFI_CLASS_LEGACY_FOCAL;
844 		default:
845 			return SFI_CLASS_DEFAULT_FOCAL;
846 		}
847 	} else {
848 		switch (thread_qos) {
849 		case THREAD_QOS_USER_INTERACTIVE:
850 			return SFI_CLASS_USER_INTERACTIVE_NONFOCAL;
851 		case THREAD_QOS_USER_INITIATED:
852 			return SFI_CLASS_USER_INITIATED_NONFOCAL;
853 		case THREAD_QOS_LEGACY:
854 			return SFI_CLASS_LEGACY_NONFOCAL;
855 		default:
856 			return SFI_CLASS_DEFAULT_NONFOCAL;
857 		}
858 	}
859 }
860 
861 /*
862  * pset must be locked.
863  */
864 sfi_class_id_t
sfi_processor_active_thread_classify(processor_t processor)865 sfi_processor_active_thread_classify(processor_t processor)
866 {
867 	return processor->current_sfi_class;
868 }
869 
870 /*
871  * thread must be locked. This is inherently racy, with the intent that
872  * at the AST boundary, it will be fully evaluated whether we need to
873  * perform an AST wait
874  */
875 ast_t
sfi_thread_needs_ast(thread_t thread,sfi_class_id_t * out_class)876 sfi_thread_needs_ast(thread_t thread, sfi_class_id_t *out_class)
877 {
878 	sfi_class_id_t class_id;
879 
880 	class_id = sfi_thread_classify(thread);
881 
882 	if (out_class) {
883 		*out_class = class_id;
884 	}
885 
886 	/* No lock taken, so a stale value may be used. */
887 	if (!sfi_classes[class_id].class_in_on_phase) {
888 		return AST_SFI;
889 	} else {
890 		return AST_NONE;
891 	}
892 }
893 
894 /*
895  * pset must be locked. We take the SFI class for
896  * the currently running thread which is cached on
897  * the processor_t, and assume it is accurate. In the
898  * worst case, the processor will get an IPI and be asked
899  * to evaluate if the current running thread at that
900  * later point in time should be in an SFI wait.
901  */
902 ast_t
sfi_processor_needs_ast(processor_t processor)903 sfi_processor_needs_ast(processor_t processor)
904 {
905 	sfi_class_id_t class_id;
906 
907 	class_id = sfi_processor_active_thread_classify(processor);
908 
909 	/* No lock taken, so a stale value may be used. */
910 	if (!sfi_classes[class_id].class_in_on_phase) {
911 		return AST_SFI;
912 	} else {
913 		return AST_NONE;
914 	}
915 }
916 
917 static inline void
_sfi_wait_cleanup(void)918 _sfi_wait_cleanup(void)
919 {
920 	thread_t self = current_thread();
921 
922 	spl_t s = splsched();
923 	simple_lock(&sfi_lock, LCK_GRP_NULL);
924 
925 	sfi_class_id_t current_sfi_wait_class = self->sfi_wait_class;
926 
927 	assert((SFI_CLASS_UNSPECIFIED < current_sfi_wait_class) &&
928 	    (current_sfi_wait_class < MAX_SFI_CLASS_ID));
929 
930 	self->sfi_wait_class = SFI_CLASS_UNSPECIFIED;
931 
932 	simple_unlock(&sfi_lock);
933 	splx(s);
934 
935 	/*
936 	 * It's possible for the thread to be woken up due to the SFI period
937 	 * ending *before* it finishes blocking. In that case,
938 	 * wait_sfi_begin_time won't be set.
939 	 *
940 	 * Derive the time sacrificed to SFI by looking at when this thread was
941 	 * awoken by the on-timer, to avoid counting the time this thread spent
942 	 * waiting to get scheduled.
943 	 *
944 	 * Note that last_made_runnable_time could be reset if this thread
945 	 * gets preempted before we read the value. To fix that, we'd need to
946 	 * track wait time in a thread timer, sample the timer before blocking,
947 	 * pass the value through thread->parameter, and subtract that.
948 	 */
949 
950 	if (self->wait_sfi_begin_time != 0) {
951 		uint64_t made_runnable = os_atomic_load(&self->last_made_runnable_time, relaxed);
952 		int64_t sfi_wait_time = made_runnable - self->wait_sfi_begin_time;
953 		assert(sfi_wait_time >= 0);
954 
955 		ledger_credit(get_threadtask(self)->ledger,
956 		    task_ledgers.sfi_wait_times[current_sfi_wait_class],
957 		    sfi_wait_time);
958 
959 		self->wait_sfi_begin_time = 0;
960 	}
961 }
962 
963 /*
964  * Called at AST context to fully evaluate if the current thread
965  * (which is obviously running) should instead block in an SFI wait.
966  * We must take the sfi_lock to check whether we are in the "off" period
967  * for the class, and if so, block.
968  */
969 void
sfi_ast(thread_t thread)970 sfi_ast(thread_t thread)
971 {
972 	sfi_class_id_t class_id;
973 	spl_t           s;
974 	struct sfi_class_state  *sfi_class;
975 	wait_result_t   waitret;
976 	boolean_t       did_wait = FALSE;
977 	thread_continue_t       continuation;
978 
979 	s = splsched();
980 
981 	simple_lock(&sfi_lock, LCK_GRP_NULL);
982 
983 	if (!sfi_is_enabled) {
984 		/*
985 		 * SFI is not enabled, or has recently been disabled.
986 		 * There is no point putting this thread on a deferred ready
987 		 * queue, even if it were classified as needing it, since
988 		 * SFI will truly be off at the next global off timer
989 		 */
990 		simple_unlock(&sfi_lock);
991 		splx(s);
992 
993 		return;
994 	}
995 
996 	thread_lock(thread);
997 	thread->sfi_class = class_id = sfi_thread_classify(thread);
998 	thread_unlock(thread);
999 
1000 	/*
1001 	 * Once the sfi_lock is taken and the thread's ->sfi_class field is updated, we
1002 	 * are committed to transitioning to whatever state is indicated by "->class_in_on_phase".
1003 	 * If another thread tries to call sfi_reevaluate() after this point, it will take the
1004 	 * sfi_lock and see the thread in this wait state. If another thread calls
1005 	 * sfi_reevaluate() before this point, it would see a runnable thread and at most
1006 	 * attempt to send an AST to this processor, but we would have the most accurate
1007 	 * classification.
1008 	 */
1009 
1010 	sfi_class = &sfi_classes[class_id];
1011 	if (!sfi_class->class_in_on_phase) {
1012 		/* Need to block thread in wait queue */
1013 		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_THREAD_DEFER),
1014 		    thread_tid(thread), class_id, 0, 0, 0);
1015 
1016 		waitret = waitq_assert_wait64(&sfi_class->waitq,
1017 		    CAST_EVENT64_T(class_id),
1018 		    THREAD_INTERRUPTIBLE | THREAD_WAIT_NOREPORT, 0);
1019 		if (waitret == THREAD_WAITING) {
1020 			thread->sfi_wait_class = class_id;
1021 			did_wait = TRUE;
1022 			continuation = sfi_class->continuation;
1023 		} else {
1024 			/* thread may be exiting already, all other errors are unexpected */
1025 			assert(waitret == THREAD_INTERRUPTED);
1026 		}
1027 	}
1028 	simple_unlock(&sfi_lock);
1029 
1030 	splx(s);
1031 
1032 	if (did_wait) {
1033 		assert(thread->wait_sfi_begin_time == 0);
1034 
1035 		thread_block_reason(continuation, NULL, AST_SFI);
1036 	}
1037 }
1038 
1039 /* Thread must be unlocked */
1040 void
sfi_reevaluate(thread_t thread)1041 sfi_reevaluate(thread_t thread)
1042 {
1043 	kern_return_t kret;
1044 	spl_t           s;
1045 	sfi_class_id_t class_id, current_class_id;
1046 	ast_t           sfi_ast;
1047 
1048 	s = splsched();
1049 
1050 	simple_lock(&sfi_lock, LCK_GRP_NULL);
1051 
1052 	thread_lock(thread);
1053 	sfi_ast = sfi_thread_needs_ast(thread, &class_id);
1054 	thread->sfi_class = class_id;
1055 
1056 	/*
1057 	 * This routine chiefly exists to boost threads out of an SFI wait
1058 	 * if their classification changes before the "on" timer fires.
1059 	 *
1060 	 * If we calculate that a thread is in a different ->sfi_wait_class
1061 	 * than we think it should be (including no-SFI-wait), we need to
1062 	 * correct that:
1063 	 *
1064 	 * If the thread is in SFI wait and should not be (or should be waiting
1065 	 * on a different class' "on" timer), we wake it up. If needed, the
1066 	 * thread may immediately block again in the different SFI wait state.
1067 	 *
1068 	 * If the thread is not in an SFI wait state and it should be, we need
1069 	 * to get that thread's attention, possibly by sending an AST to another
1070 	 * processor.
1071 	 */
1072 
1073 	if ((current_class_id = thread->sfi_wait_class) != SFI_CLASS_UNSPECIFIED) {
1074 		thread_unlock(thread); /* not needed anymore */
1075 
1076 		assert(current_class_id < MAX_SFI_CLASS_ID);
1077 
1078 		if ((sfi_ast == AST_NONE) || (class_id != current_class_id)) {
1079 			struct sfi_class_state  *sfi_class = &sfi_classes[current_class_id];
1080 
1081 			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_WAIT_CANCELED), thread_tid(thread), current_class_id, class_id, 0, 0);
1082 
1083 			kret = waitq_wakeup64_thread(&sfi_class->waitq,
1084 			    CAST_EVENT64_T(current_class_id),
1085 			    thread,
1086 			    THREAD_AWAKENED);
1087 			assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING);
1088 		}
1089 	} else {
1090 		/*
1091 		 * Thread's current SFI wait class is not set, and because we
1092 		 * have the sfi_lock, it won't get set.
1093 		 */
1094 
1095 		if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) {
1096 			if (sfi_ast != AST_NONE) {
1097 				if (thread == current_thread()) {
1098 					ast_on(sfi_ast);
1099 				} else {
1100 					processor_t             processor = thread->last_processor;
1101 
1102 					if (processor != PROCESSOR_NULL &&
1103 					    processor->state == PROCESSOR_RUNNING &&
1104 					    processor->active_thread == thread) {
1105 						cause_ast_check(processor);
1106 					} else {
1107 						/*
1108 						 * Runnable thread that's not on a CPU currently. When a processor
1109 						 * does context switch to it, the AST will get set based on whether
1110 						 * the thread is in its "off time".
1111 						 */
1112 					}
1113 				}
1114 			}
1115 		}
1116 
1117 		thread_unlock(thread);
1118 	}
1119 
1120 	simple_unlock(&sfi_lock);
1121 	splx(s);
1122 }
1123 
1124 #else /* !CONFIG_SCHED_SFI */
1125 
1126 kern_return_t
sfi_set_window(uint64_t window_usecs __unused)1127 sfi_set_window(uint64_t window_usecs __unused)
1128 {
1129 	return KERN_NOT_SUPPORTED;
1130 }
1131 
1132 kern_return_t
sfi_window_cancel(void)1133 sfi_window_cancel(void)
1134 {
1135 	return KERN_NOT_SUPPORTED;
1136 }
1137 
1138 
1139 kern_return_t
sfi_get_window(uint64_t * window_usecs __unused)1140 sfi_get_window(uint64_t *window_usecs __unused)
1141 {
1142 	return KERN_NOT_SUPPORTED;
1143 }
1144 
1145 
1146 kern_return_t
sfi_set_class_offtime(sfi_class_id_t class_id __unused,uint64_t offtime_usecs __unused)1147 sfi_set_class_offtime(sfi_class_id_t class_id __unused, uint64_t offtime_usecs __unused)
1148 {
1149 	return KERN_NOT_SUPPORTED;
1150 }
1151 
1152 kern_return_t
sfi_class_offtime_cancel(sfi_class_id_t class_id __unused)1153 sfi_class_offtime_cancel(sfi_class_id_t class_id __unused)
1154 {
1155 	return KERN_NOT_SUPPORTED;
1156 }
1157 
1158 kern_return_t
sfi_get_class_offtime(sfi_class_id_t class_id __unused,uint64_t * offtime_usecs __unused)1159 sfi_get_class_offtime(sfi_class_id_t class_id __unused, uint64_t *offtime_usecs __unused)
1160 {
1161 	return KERN_NOT_SUPPORTED;
1162 }
1163 
1164 void
sfi_reevaluate(thread_t thread __unused)1165 sfi_reevaluate(thread_t thread __unused)
1166 {
1167 	return;
1168 }
1169 
1170 sfi_class_id_t
sfi_thread_classify(thread_t thread)1171 sfi_thread_classify(thread_t thread)
1172 {
1173 	task_t task = get_threadtask(thread);
1174 	boolean_t is_kernel_thread = (task == kernel_task);
1175 
1176 	if (is_kernel_thread) {
1177 		return SFI_CLASS_KERNEL;
1178 	}
1179 
1180 	return SFI_CLASS_OPTED_OUT;
1181 }
1182 
1183 #endif /* !CONFIG_SCHED_SFI */
1184