xref: /xnu-8796.121.2/osfmk/kern/processor.h (revision c54f35ca767986246321eb901baf8f5ff7923f6a) !
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	processor.h:	Processor and processor-related definitions.
61  */
62 
63 #ifndef _KERN_PROCESSOR_H_
64 #define _KERN_PROCESSOR_H_
65 
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <kern/kern_types.h>
69 
70 #include <sys/cdefs.h>
71 
72 #ifdef  MACH_KERNEL_PRIVATE
73 #include <mach/mach_types.h>
74 #include <kern/ast.h>
75 #include <kern/cpu_number.h>
76 #include <kern/smp.h>
77 #include <kern/simple_lock.h>
78 #include <kern/locks.h>
79 #include <kern/percpu.h>
80 #include <kern/queue.h>
81 #include <kern/recount.h>
82 #include <kern/sched.h>
83 #include <kern/sched_urgency.h>
84 #include <kern/timer.h>
85 #include <mach/sfi_class.h>
86 #include <kern/sched_clutch.h>
87 #include <kern/timer_call.h>
88 #include <kern/assert.h>
89 #include <machine/limits.h>
90 #endif
91 
92 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
93 
94 #ifdef  MACH_KERNEL_PRIVATE
95 
96 /*
97  *	Processor state is accessed by locking the scheduling lock
98  *	for the assigned processor set.
99  *
100  *           --- PENDING <------- SHUTDOWN
101  *          /                     ^     ^
102  *        _/                      |      \
103  *  OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
104  *         \_________________^   ^ ^______/           /
105  *                                \__________________/
106  *
107  *  Most of these state transitions are externally driven as a
108  *  a directive (for instance telling an IDLE processor to start
109  *  coming out of the idle state to run a thread). However these
110  *  are typically paired with a handshake by the processor itself
111  *  to indicate that it has completed a transition of indeterminate
112  *  length (for example, the DISPATCHING->RUNNING or START->RUNNING
113  *  transitions must occur on the processor itself).
114  *
115  *  The boot processor has some special cases, and skips the START state,
116  *  since it has already bootstrapped and is ready to context switch threads.
117  *
118  *  When a processor is in DISPATCHING or RUNNING state, the current_pri,
119  *  current_thmode, and deadline fields should be set, so that other
120  *  processors can evaluate if it is an appropriate candidate for preemption.
121  */
122 #if defined(CONFIG_SCHED_DEFERRED_AST)
123 /*
124  *           --- PENDING <------- SHUTDOWN
125  *          /                     ^     ^
126  *        _/                      |      \
127  *  OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
128  *         \_________________^   ^ ^______/ ^_____ /  /
129  *                                \__________________/
130  *
131  *  A DISPATCHING processor may be put back into IDLE, if another
132  *  processor determines that the target processor will have nothing to do
133  *  upon reaching the RUNNING state.  This is racy, but if the target
134  *  responds and becomes RUNNING, it will not break the processor state
135  *  machine.
136  *
137  *  This change allows us to cancel an outstanding signal/AST on a processor
138  *  (if such an operation is supported through hardware or software), and
139  *  push the processor back into the IDLE state as a power optimization.
140  */
141 #endif
142 
143 typedef enum {
144 	PROCESSOR_OFF_LINE        = 0,    /* Not available */
145 	PROCESSOR_SHUTDOWN        = 1,    /* Going off-line, but schedulable */
146 	PROCESSOR_START           = 2,    /* Being started */
147 	PROCESSOR_PENDING_OFFLINE = 3,    /* Going off-line, not schedulable */
148 	PROCESSOR_IDLE            = 4,    /* Idle (available) */
149 	PROCESSOR_DISPATCHING     = 5,    /* Dispatching (idle -> active) */
150 	PROCESSOR_RUNNING         = 6,    /* Normal execution */
151 	PROCESSOR_STATE_LEN       = (PROCESSOR_RUNNING + 1)
152 } processor_state_t;
153 
154 typedef enum {
155 	PSET_SMP,
156 #if __AMP__
157 	PSET_AMP_E,
158 	PSET_AMP_P,
159 #endif
160 } pset_cluster_type_t;
161 
162 #if __AMP__
163 
164 typedef enum {
165 	SCHED_PERFCTL_POLICY_DEFAULT,           /*  static policy: set at boot */
166 	SCHED_PERFCTL_POLICY_FOLLOW_GROUP,      /* dynamic policy: perfctl_class follows thread group across amp clusters */
167 	SCHED_PERFCTL_POLICY_RESTRICT_E,        /* dynamic policy: limits perfctl_class to amp e cluster */
168 } sched_perfctl_class_policy_t;
169 
170 extern _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_util;
171 extern _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_bg;
172 
173 #endif /* __AMP__ */
174 
175 typedef bitmap_t cpumap_t;
176 
177 #if __arm64__
178 
179 /*
180  * pset_execution_time_t
181  *
182  * The pset_execution_time_t type is used to maintain the average
183  * execution time of threads on a pset. Since the avg. execution time is
184  * updated from contexts where the pset lock is not held, it uses a
185  * double-wide RMW loop to update these values atomically.
186  */
187 typedef union {
188 	struct {
189 		uint64_t        pset_avg_thread_execution_time;
190 		uint64_t        pset_execution_time_last_update;
191 	};
192 	unsigned __int128       pset_execution_time_packed;
193 } pset_execution_time_t;
194 
195 #endif /* __arm64__ */
196 
197 struct processor_set {
198 	int                     pset_id;
199 	int                     online_processor_count;
200 	int                     cpu_set_low, cpu_set_hi;
201 	int                     cpu_set_count;
202 	int                     last_chosen;
203 
204 	uint64_t                load_average;
205 	uint64_t                pset_load_average[TH_BUCKET_SCHED_MAX];
206 	uint64_t                pset_load_last_update;
207 	cpumap_t                cpu_bitmask;
208 	cpumap_t                recommended_bitmask;
209 	cpumap_t                cpu_state_map[PROCESSOR_STATE_LEN];
210 	cpumap_t                primary_map;
211 	cpumap_t                realtime_map;
212 	cpumap_t                cpu_available_map;
213 
214 #define SCHED_PSET_TLOCK (1)
215 #if     defined(SCHED_PSET_TLOCK)
216 /* TODO: reorder struct for temporal cache locality */
217 	__attribute__((aligned(128))) lck_ticket_t      sched_lock;
218 #else /* SCHED_PSET_TLOCK*/
219 	__attribute__((aligned(128))) lck_spin_t        sched_lock;     /* lock for above */
220 #endif /* SCHED_PSET_TLOCK*/
221 
222 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ)
223 	struct run_queue        pset_runq;      /* runq for this processor set */
224 #endif
225 	struct rt_queue         rt_runq;        /* realtime runq for this processor set */
226 	uint64_t                stealable_rt_threads_earliest_deadline; /* if this pset has stealable RT threads, the earliest deadline; else UINT64_MAX */
227 #if CONFIG_SCHED_CLUTCH
228 	struct sched_clutch_root pset_clutch_root; /* clutch hierarchy root */
229 #endif /* CONFIG_SCHED_CLUTCH */
230 
231 #if defined(CONFIG_SCHED_TRADITIONAL)
232 	int                     pset_runq_bound_count;
233 	/* # of threads in runq bound to any processor in pset */
234 #endif
235 
236 	/* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */
237 	cpumap_t                pending_AST_URGENT_cpu_mask;
238 	cpumap_t                pending_AST_PREEMPT_cpu_mask;
239 #if defined(CONFIG_SCHED_DEFERRED_AST)
240 	/*
241 	 * A separate mask, for ASTs that we may be able to cancel.  This is dependent on
242 	 * some level of support for requesting an AST on a processor, and then quashing
243 	 * that request later.
244 	 *
245 	 * The purpose of this field (and the associated codepaths) is to infer when we
246 	 * no longer need a processor that is DISPATCHING to come up, and to prevent it
247 	 * from coming out of IDLE if possible.  This should serve to decrease the number
248 	 * of spurious ASTs in the system, and let processors spend longer periods in
249 	 * IDLE.
250 	 */
251 	cpumap_t                pending_deferred_AST_cpu_mask;
252 #endif
253 	cpumap_t                pending_spill_cpu_mask;
254 	cpumap_t                rt_pending_spill_cpu_mask;
255 
256 	struct ipc_port *       pset_self;              /* port for operations */
257 	struct ipc_port *       pset_name_self; /* port for information */
258 
259 	processor_set_t         pset_list;              /* chain of associated psets */
260 	pset_node_t             node;
261 	uint32_t                pset_cluster_id;
262 
263 	/*
264 	 * Currently the scheduler uses a mix of pset_cluster_type_t & cluster_type_t
265 	 * for recommendations etc. It might be useful to unify these as a single type.
266 	 */
267 	pset_cluster_type_t     pset_cluster_type;
268 	cluster_type_t          pset_type;
269 
270 #if CONFIG_SCHED_EDGE
271 	cpumap_t                cpu_running_foreign;
272 	cpumap_t                cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_COUNT];
273 	sched_bucket_t          cpu_running_buckets[MAX_CPUS];
274 
275 	bitmap_t                foreign_psets[BITMAP_LEN(MAX_PSETS)];
276 	bitmap_t                native_psets[BITMAP_LEN(MAX_PSETS)];
277 	bitmap_t                local_psets[BITMAP_LEN(MAX_PSETS)];
278 	bitmap_t                remote_psets[BITMAP_LEN(MAX_PSETS)];
279 	sched_clutch_edge       sched_edges[MAX_PSETS];
280 	pset_execution_time_t   pset_execution_time[TH_BUCKET_SCHED_MAX];
281 	uint64_t                pset_cluster_shared_rsrc_load[CLUSTER_SHARED_RSRC_TYPE_COUNT];
282 #endif /* CONFIG_SCHED_EDGE */
283 	bool                    is_SMT;                 /* pset contains SMT processors */
284 };
285 
286 extern struct processor_set     pset0;
287 
288 typedef bitmap_t pset_map_t;
289 
290 struct pset_node {
291 	processor_set_t         psets;                  /* list of associated psets */
292 
293 	pset_node_t             nodes;                  /* list of associated subnodes */
294 	pset_node_t             node_list;              /* chain of associated nodes */
295 
296 	pset_node_t             parent;
297 
298 	pset_cluster_type_t     pset_cluster_type;      /* Same as the type of all psets in this node */
299 
300 	pset_map_t              pset_map;               /* map of associated psets */
301 	_Atomic pset_map_t      pset_idle_map;          /* psets with at least one IDLE CPU */
302 	_Atomic pset_map_t      pset_idle_primary_map;  /* psets with at least one IDLE primary CPU */
303 	_Atomic pset_map_t      pset_non_rt_map;        /* psets with at least one available CPU not running a realtime thread */
304 	_Atomic pset_map_t      pset_non_rt_primary_map;/* psets with at least one available primary CPU not running a realtime thread */
305 };
306 
307 extern struct pset_node pset_node0;
308 #if __AMP__
309 extern struct pset_node pset_node1;
310 extern pset_node_t ecore_node;
311 extern pset_node_t pcore_node;
312 #endif
313 
314 extern queue_head_t tasks, threads, corpse_tasks;
315 extern int tasks_count, terminated_tasks_count, threads_count, terminated_threads_count;
316 decl_lck_mtx_data(extern, tasks_threads_lock);
317 decl_lck_mtx_data(extern, tasks_corpse_lock);
318 
319 /*
320  * The terminated tasks queue should only be inspected elsewhere by stackshot.
321  */
322 extern queue_head_t terminated_tasks;
323 
324 extern queue_head_t terminated_threads;
325 
326 struct processor {
327 	processor_state_t       state;                  /* See above */
328 	bool                    is_SMT;
329 	bool                    is_recommended;
330 	bool                    current_is_NO_SMT;      /* cached TH_SFLAG_NO_SMT of current thread */
331 	bool                    current_is_bound;       /* current thread is bound to this processor */
332 	bool                    current_is_eagerpreempt;/* current thread is TH_SFLAG_EAGERPREEMPT */
333 	struct thread          *active_thread;          /* thread running on processor */
334 	struct thread          *idle_thread;            /* this processor's idle thread. */
335 	struct thread          *startup_thread;
336 
337 	processor_set_t         processor_set;  /* assigned set */
338 
339 	/*
340 	 * XXX All current_* fields should be grouped together, as they're
341 	 * updated at the same time.
342 	 */
343 	int                     current_pri;            /* priority of current thread */
344 	sfi_class_id_t          current_sfi_class;      /* SFI class of current thread */
345 	perfcontrol_class_t     current_perfctl_class;  /* Perfcontrol class for current thread */
346 	/*
347 	 * The cluster type recommended for the current thread.
348 	 */
349 	pset_cluster_type_t     current_recommended_pset_type;
350 	thread_urgency_t        current_urgency;        /* cached urgency of current thread */
351 
352 #if CONFIG_SCHED_TRADITIONAL
353 	int                     runq_bound_count;       /* # of threads bound to this processor */
354 #endif /* CONFIG_SCHED_TRADITIONAL */
355 
356 #if CONFIG_THREAD_GROUPS
357 	struct thread_group    *current_thread_group;   /* thread_group of current thread */
358 #endif
359 	int                     starting_pri;           /* priority of current thread as it was when scheduled */
360 	int                     cpu_id;                 /* platform numeric id */
361 
362 	uint64_t                quantum_end;            /* time when current quantum ends */
363 	uint64_t                last_dispatch;          /* time of last dispatch */
364 
365 #if KPERF
366 	uint64_t                kperf_last_sample_time; /* time of last kperf sample */
367 #endif /* KPERF */
368 
369 	uint64_t                deadline;               /* for next realtime thread */
370 	bool                    first_timeslice;        /* has the quantum expired since context switch */
371 
372 	bool                    processor_offlined;     /* has the processor been explicitly processor_offline'ed */
373 	bool                    must_idle;              /* Needs to be forced idle as next selected thread is allowed on this processor */
374 
375 	bool                    running_timers_active;  /* whether the running timers should fire */
376 	struct timer_call       running_timers[RUNNING_TIMER_MAX];
377 
378 #if CONFIG_SCHED_TRADITIONAL || CONFIG_SCHED_MULTIQ
379 	struct run_queue        runq;                   /* runq for this processor */
380 #endif /* CONFIG_SCHED_TRADITIONAL || CONFIG_SCHED_MULTIQ */
381 
382 #if CONFIG_SCHED_GRRR
383 	struct grrr_run_queue   grrr_runq;              /* Group Ratio Round-Robin runq */
384 #endif /* CONFIG_SCHED_GRRR */
385 
386 	struct recount_processor pr_recount;
387 
388 	/*
389 	 * Pointer to primary processor for secondary SMT processors, or a
390 	 * pointer to ourselves for primaries or non-SMT.
391 	 */
392 	processor_t             processor_primary;
393 	processor_t             processor_secondary;
394 	struct ipc_port        *processor_self;         /* port for operations */
395 
396 	processor_t             processor_list;         /* all existing processors */
397 
398 	uint64_t                timer_call_ttd;         /* current timer call time-to-deadline */
399 	decl_simple_lock_data(, start_state_lock);
400 	processor_reason_t      last_startup_reason;
401 	processor_reason_t      last_shutdown_reason;
402 	processor_reason_t      last_recommend_reason;
403 	processor_reason_t      last_derecommend_reason;
404 	bool                    shutdown_temporary;     /* Shutdown should be transparent to user - don't update CPU counts */
405 	bool                    shutdown_locked;        /* Processor may not be shutdown (or started up) except by SYSTEM */
406 };
407 
408 extern processor_t processor_list;
409 decl_simple_lock_data(extern, processor_list_lock);
410 
411 /*
412  * Maximum number of CPUs supported by the scheduler.  bits.h bitmap macros
413  * need to be used to support greater than 64.
414  */
415 #define MAX_SCHED_CPUS          64
416 extern processor_t     __single processor_array[MAX_SCHED_CPUS];    /* array indexed by cpuid */
417 extern processor_set_t __single pset_array[MAX_PSETS];           /* array indexed by pset_id */
418 
419 extern uint32_t                 processor_avail_count;
420 extern uint32_t                 processor_avail_count_user;
421 extern uint32_t                 primary_processor_avail_count;
422 extern uint32_t                 primary_processor_avail_count_user;
423 
424 #define master_processor PERCPU_GET_MASTER(processor)
425 PERCPU_DECL(struct processor, processor);
426 
427 extern processor_t      current_processor(void);
428 
429 /* Lock macros, always acquired and released with interrupts disabled (splsched()) */
430 
431 extern lck_grp_t pset_lck_grp;
432 
433 #if defined(SCHED_PSET_TLOCK)
434 #define pset_lock_init(p)               lck_ticket_init(&(p)->sched_lock, &pset_lck_grp)
435 #define pset_lock(p)                    lck_ticket_lock(&(p)->sched_lock, &pset_lck_grp)
436 #define pset_unlock(p)                  lck_ticket_unlock(&(p)->sched_lock)
437 #define pset_assert_locked(p)           lck_ticket_assert_owned(&(p)->sched_lock)
438 #else /* SCHED_PSET_TLOCK*/
439 #define pset_lock_init(p)               lck_spin_init(&(p)->sched_lock, &pset_lck_grp, NULL)
440 #define pset_lock(p)                    lck_spin_lock_grp(&(p)->sched_lock, &pset_lck_grp)
441 #define pset_unlock(p)                  lck_spin_unlock(&(p)->sched_lock)
442 #define pset_assert_locked(p)           LCK_SPIN_ASSERT(&(p)->sched_lock, LCK_ASSERT_OWNED)
443 #endif /*!SCHED_PSET_TLOCK*/
444 
445 extern lck_spin_t       pset_node_lock;
446 
447 extern void             processor_bootstrap(void);
448 
449 extern void             processor_init(
450 	processor_t             processor,
451 	int                     cpu_id,
452 	processor_set_t         processor_set);
453 
454 extern void             processor_set_primary(
455 	processor_t             processor,
456 	processor_t             primary);
457 
458 extern kern_return_t    processor_shutdown(
459 	processor_t             processor,
460 	processor_reason_t      reason,
461 	uint32_t                flags);
462 
463 extern void             processor_wait_for_start(
464 	processor_t             processor);
465 
466 extern kern_return_t    processor_start_from_user(
467 	processor_t             processor);
468 extern kern_return_t    processor_exit_from_user(
469 	processor_t             processor);
470 
471 extern kern_return_t    processor_start_reason(
472 	processor_t             processor,
473 	processor_reason_t      reason,
474 	uint32_t                flags);
475 extern kern_return_t    processor_exit_reason(
476 	processor_t             processor,
477 	processor_reason_t      reason,
478 	uint32_t                flags);
479 
480 
481 extern kern_return_t    sched_processor_enable(
482 	processor_t             processor,
483 	boolean_t               enable);
484 
485 extern void             processor_queue_shutdown(
486 	processor_t             processor);
487 
488 extern void             processor_queue_shutdown(
489 	processor_t             processor);
490 
491 extern processor_set_t  processor_pset(
492 	processor_t             processor);
493 
494 extern pset_node_t      pset_node_root(void);
495 
496 extern processor_set_t  pset_create(
497 	pset_node_t             node,
498 	pset_cluster_type_t     pset_type,
499 	uint32_t                pset_cluster_id,
500 	int                     pset_id);
501 
502 extern void             pset_init(
503 	processor_set_t         pset,
504 	pset_node_t             node);
505 
506 extern processor_set_t  pset_find(
507 	uint32_t                cluster_id,
508 	processor_set_t         default_pset);
509 
510 extern kern_return_t    processor_info_count(
511 	processor_flavor_t      flavor,
512 	mach_msg_type_number_t  *count);
513 
514 extern void processor_cpu_load_info(
515 	processor_t processor,
516 	natural_t ticks[static CPU_STATE_MAX]);
517 
518 extern void             machine_run_count(
519 	uint32_t                count);
520 
521 extern processor_t      machine_choose_processor(
522 	processor_set_t         pset,
523 	processor_t             processor);
524 
525 inline static processor_set_t
next_pset(processor_set_t pset)526 next_pset(processor_set_t pset)
527 {
528 	pset_map_t map = pset->node->pset_map;
529 
530 	int pset_id = lsb_next(map, pset->pset_id);
531 	if (pset_id == -1) {
532 		pset_id = lsb_first(map);
533 	}
534 
535 	return pset_array[pset_id];
536 }
537 
538 #define PSET_THING_TASK         0
539 #define PSET_THING_THREAD       1
540 
541 extern pset_cluster_type_t recommended_pset_type(
542 	thread_t                thread);
543 
544 extern void             processor_state_update_idle(
545 	processor_t             processor);
546 
547 extern void             processor_state_update_from_thread(
548 	processor_t             processor,
549 	thread_t                thread,
550 	boolean_t               pset_lock_held);
551 
552 extern void             processor_state_update_explicit(
553 	processor_t             processor,
554 	int                     pri,
555 	sfi_class_id_t          sfi_class,
556 	pset_cluster_type_t     pset_type,
557 	perfcontrol_class_t     perfctl_class,
558 	thread_urgency_t        urgency,
559 	sched_bucket_t          bucket);
560 
561 #define PSET_LOAD_NUMERATOR_SHIFT   16
562 #define PSET_LOAD_FRACTIONAL_SHIFT   4
563 
564 #if CONFIG_SCHED_EDGE
565 
566 extern cluster_type_t pset_type_for_id(uint32_t cluster_id);
567 extern uint64_t sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type);
568 
569 /*
570  * The Edge scheduler uses average scheduling latency as the metric for making
571  * thread migration decisions. One component of avg scheduling latency is the load
572  * average on the cluster.
573  *
574  * Load Average Fixed Point Arithmetic
575  *
576  * The load average is maintained as a 24.8 fixed point arithmetic value for precision.
577  * When multiplied by the average execution time, it needs to be rounded up (based on
578  * the most significant bit of the fractional part) for better accuracy. After rounding
579  * up, the whole number part of the value is used as the actual load value for
580  * migrate/steal decisions.
581  */
582 #define SCHED_PSET_LOAD_EWMA_FRACTION_BITS 8
583 #define SCHED_PSET_LOAD_EWMA_ROUND_BIT     (1 << (SCHED_PSET_LOAD_EWMA_FRACTION_BITS - 1))
584 #define SCHED_PSET_LOAD_EWMA_FRACTION_MASK ((1 << SCHED_PSET_LOAD_EWMA_FRACTION_BITS) - 1)
585 
586 inline static int
sched_get_pset_load_average(processor_set_t pset,sched_bucket_t sched_bucket)587 sched_get_pset_load_average(processor_set_t pset, sched_bucket_t sched_bucket)
588 {
589 	uint64_t load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
590 	return (int)(((load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS) *
591 	       pset->pset_execution_time[sched_bucket].pset_avg_thread_execution_time);
592 }
593 
594 #else /* CONFIG_SCHED_EDGE */
595 inline static int
sched_get_pset_load_average(processor_set_t pset,__unused sched_bucket_t sched_bucket)596 sched_get_pset_load_average(processor_set_t pset, __unused sched_bucket_t sched_bucket)
597 {
598 	return (int)pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT);
599 }
600 #endif /* CONFIG_SCHED_EDGE */
601 
602 extern void sched_update_pset_load_average(processor_set_t pset, uint64_t curtime);
603 extern void sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t delta, uint64_t curtime, sched_bucket_t sched_bucket);
604 
605 inline static void
pset_update_processor_state(processor_set_t pset,processor_t processor,uint new_state)606 pset_update_processor_state(processor_set_t pset, processor_t processor, uint new_state)
607 {
608 	pset_assert_locked(pset);
609 
610 	uint old_state = processor->state;
611 	uint cpuid = (uint)processor->cpu_id;
612 
613 	assert(processor->processor_set == pset);
614 	assert(bit_test(pset->cpu_bitmask, cpuid));
615 
616 	assert(old_state < PROCESSOR_STATE_LEN);
617 	assert(new_state < PROCESSOR_STATE_LEN);
618 
619 	processor->state = new_state;
620 
621 	bit_clear(pset->cpu_state_map[old_state], cpuid);
622 	bit_set(pset->cpu_state_map[new_state], cpuid);
623 
624 	if (bit_test(pset->cpu_available_map, cpuid) && (new_state < PROCESSOR_IDLE)) {
625 		/* No longer available for scheduling */
626 		bit_clear(pset->cpu_available_map, cpuid);
627 	} else if (!bit_test(pset->cpu_available_map, cpuid) && (new_state >= PROCESSOR_IDLE)) {
628 		/* Newly available for scheduling */
629 		bit_set(pset->cpu_available_map, cpuid);
630 	}
631 
632 	if ((old_state == PROCESSOR_RUNNING) || (new_state == PROCESSOR_RUNNING)) {
633 		sched_update_pset_load_average(pset, 0);
634 		if (new_state == PROCESSOR_RUNNING) {
635 			assert(processor == current_processor());
636 		}
637 	}
638 	if ((old_state == PROCESSOR_IDLE) || (new_state == PROCESSOR_IDLE)) {
639 		if (new_state == PROCESSOR_IDLE) {
640 			bit_clear(pset->realtime_map, cpuid);
641 		}
642 
643 		pset_node_t node = pset->node;
644 
645 		if (bit_count(node->pset_map) == 1) {
646 			/* Node has only a single pset, so skip node pset map updates */
647 			return;
648 		}
649 
650 		if (new_state == PROCESSOR_IDLE) {
651 			if (processor->processor_primary == processor) {
652 				if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
653 					atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
654 				}
655 				if (!bit_test(atomic_load(&node->pset_idle_primary_map), pset->pset_id)) {
656 					atomic_bit_set(&node->pset_idle_primary_map, pset->pset_id, memory_order_relaxed);
657 				}
658 			}
659 			if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
660 				atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
661 			}
662 			if (!bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) {
663 				atomic_bit_set(&node->pset_idle_map, pset->pset_id, memory_order_relaxed);
664 			}
665 		} else {
666 			cpumap_t idle_map = pset->cpu_state_map[PROCESSOR_IDLE];
667 			if (idle_map == 0) {
668 				/* No more IDLE CPUs */
669 				if (bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) {
670 					atomic_bit_clear(&node->pset_idle_map, pset->pset_id, memory_order_relaxed);
671 				}
672 			}
673 			if (processor->processor_primary == processor) {
674 				idle_map &= pset->primary_map;
675 				if (idle_map == 0) {
676 					/* No more IDLE primary CPUs */
677 					if (bit_test(atomic_load(&node->pset_idle_primary_map), pset->pset_id)) {
678 						atomic_bit_clear(&node->pset_idle_primary_map, pset->pset_id, memory_order_relaxed);
679 					}
680 				}
681 			}
682 		}
683 	}
684 }
685 
686 decl_simple_lock_data(extern, sched_available_cores_lock);
687 
688 #endif  /* MACH_KERNEL_PRIVATE */
689 #ifdef KERNEL_PRIVATE
690 
691 extern unsigned int     processor_count;
692 extern processor_t      cpu_to_processor(int cpu);
693 
694 extern kern_return_t    enable_smt_processors(bool enable);
695 
696 /*
697  * Update the scheduler with the set of cores that should be used to dispatch new threads.
698  * Non-recommended cores can still be used to field interrupts or run bound threads.
699  * This should be called with interrupts enabled and no scheduler locks held.
700  */
701 #define ALL_CORES_RECOMMENDED   (~(uint64_t)0)
702 #define ALL_CORES_POWERED       (~(uint64_t)0)
703 
704 extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores);
705 extern void sched_perfcontrol_update_recommended_cores_reason(uint64_t recommended_cores, processor_reason_t reason, uint32_t flags);
706 extern void sched_perfcontrol_update_powered_cores(uint64_t powered_cores, processor_reason_t reason, uint32_t flags);
707 extern void sched_override_available_cores_for_sleep(void);
708 extern void sched_restore_available_cores_after_sleep(void);
709 extern bool sched_is_in_sleep(void);
710 extern void sched_mark_processor_online_locked(processor_t processor, processor_reason_t reason);
711 extern kern_return_t sched_mark_processor_offline(processor_t processor, processor_reason_t reason);
712 extern bool processor_should_kprintf(processor_t processor, bool starting);
713 extern void suspend_cluster_powerdown(void);
714 extern void resume_cluster_powerdown(void);
715 extern kern_return_t suspend_cluster_powerdown_from_user(void);
716 extern kern_return_t resume_cluster_powerdown_from_user(void);
717 extern int get_cluster_powerdown_user_suspended(void);
718 
719 #endif /* KERNEL_PRIVATE */
720 
721 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
722 
723 #endif  /* _KERN_PROCESSOR_H_ */
724