xref: /xnu-12377.61.12/osfmk/kern/sched_rt.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_SCHED_RT_H_
30 #define _KERN_SCHED_RT_H_
31 
32 #include <kern/kern_types.h>
33 #include <kern/sched_common.h>
34 #include <kern/processor.h>
35 #include <kern/sched_prim.h>
36 
37 __BEGIN_DECLS
38 
39 #pragma mark - Constants and Tunables
40 
41 #if (DEVELOPMENT || DEBUG || SCHED_TEST_HARNESS)
42 extern unsigned sched_rt_spill_policy;
43 extern unsigned sched_rt_steal_policy;
44 #endif /* (DEVELOPMENT || DEBUG || SCHED_TEST_HARNESS) */
45 
46 extern uint32_t rt_deadline_epsilon;
47 extern uint32_t rt_constraint_threshold;
48 extern int sched_rt_runq_strict_priority;
49 extern int sched_allow_rt_smt;
50 
51 #pragma mark - Initialization
52 
53 void sched_realtime_timebase_init(void);
54 
55 /* Initialize realtime runqueues for the given pset. */
56 void sched_rt_init_pset(processor_set_t pset);
57 
58 /* Called once all psets are initialized. */
59 void sched_rt_init_completed(void);
60 
61 #if CONFIG_SCHED_EDGE
62 #pragma mark - Realtime Scheduler-CLPC Interface
63 
64 /*
65  * The realtime scheduler uses edges between psets to define policies
66  * regarding migration and steal operations, similar to the edge scheduler.
67  * The weights define an explicit search order for the scheduler to identify
68  * alternative psets when a realtime thread's preferred pset is overloaded.
69  *
70  * The matrix can be directly manipulated with
71  * sched_rt_config_set()/sched_rt_config_get(), but the preferred interface for
72  * updates is to call sched_rt_matrix_set(), which will update cached values
73  * computed from the matrix.
74  */
75 
76 void              sched_rt_config_set(pset_id_t src_pset, pset_id_t dst_pset, sched_clutch_edge edge_config);
77 sched_clutch_edge sched_rt_config_get(pset_id_t src_pset, pset_id_t dst_pset);
78 
79 /*
80  * sched_rt_matrix_get()/sched_rt_matrix_set()
81  *
82  * Selectively retrieve (or update, respectively) multiple edges in the realtime
83  * matrix. The realtime spill order is recomputed for every pset with a changed
84  * outgoing edge.
85  *
86  * The matrix provided should be `num_psets * num_psets`, where `num_psets`
87  * is equal to `sched_edge_max_clusters`. Like the Edge matrix, it is indexed
88  * first by source pset (major), then by destination pset (minor).
89  */
90 
91 void sched_rt_matrix_get(sched_clutch_edge *rt_matrix, bool *edge_requests, uint64_t num_psets);
92 void sched_rt_matrix_set(sched_clutch_edge *rt_matrix, bool *edge_changes, uint64_t num_psets);
93 
94 #endif /* CONFIG_SCHED_EDGE */
95 
96 #pragma mark - Scheduler Callouts
97 
98 #if CONFIG_SCHED_SMT
99 /* SMT-aware callout for rt_choose_processor. */
100 processor_t sched_rtlocal_choose_processor_smt(processor_set_t starting_pset, processor_t processor, thread_t thread);
101 #else /* !CONFIG_SCHED_SMT */
102 processor_t sched_rt_choose_processor(processor_set_t starting_pset, processor_t processor, thread_t thread);
103 #endif /* !CONFIG_SCHED_SMT */
104 
105 #if CONFIG_SCHED_EDGE
106 thread_t sched_rt_steal_thread(processor_set_t stealing_pset);
107 #endif /* CONFIG_SCHED_EDGE */
108 thread_t sched_rt_choose_thread(processor_t processor);
109 
110 void sched_rt_queue_shutdown(processor_t processor);
111 
112 void sched_rt_runq_scan(sched_update_scan_context_t scan_context);
113 
114 int64_t sched_rt_runq_count_sum(void);
115 
116 #pragma mark - Utilities
117 
118 /*
119  * We are in the process of migrating realtime scheduler code into sched_rt.c
120  * to make it unit-testable in isolation.
121  *
122  * For the time being, these methods are made accessible to code that include
123  * sched_rt.h. They will be made static members of sched_rt.c as soon as
124  * practicable.
125  */
126 uint64_t rt_deadline_add(uint64_t d, uint64_t e);
127 
128 cpumap_t pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset);
129 
130 processor_t
131 pset_choose_furthest_deadline_processor_for_realtime_thread(
132 	processor_set_t pset,
133 	int             max_pri,
134 	uint64_t        minimum_deadline,
135 	processor_t     skip_processor,
136 	bool            skip_spills,
137 	bool            include_ast_urgent_pending_cpus);
138 
139 #if CONFIG_SCHED_SMT
140 processor_t pset_choose_processor_for_realtime_thread_smt(
141 	processor_set_t pset,
142 	processor_t     skip_processor,
143 	bool            consider_secondaries,
144 	bool            skip_spills);
145 #else /* !CONFIG_SCHED_SMT */
146 processor_t
147 pset_choose_processor_for_realtime_thread(
148 	processor_set_t pset,
149 	processor_t     skip_processor,
150 	bool            skip_spills);
151 #endif /* !CONFIG_SCHED_SMT */
152 
153 #if CONFIG_SCHED_EDGE
154 bool     rt_pset_has_stealable_threads(processor_set_t pset);
155 void     pset_update_rt_stealable_state(processor_set_t pset);
156 /* Realtime spill is only supported on platforms with the edge scheduler. */
157 bool rt_choose_next_processor_for_spill_IPI(processor_set_t starting_pset, processor_t chosen_processor, processor_t *result_processor, sched_ipi_type_t *result_ipi_type);
158 #else /* !CONFIG_SCHED_EDGE */
159 #define pset_update_rt_stealable_state(x) do {(void) x;} while (0)
160 #endif /* !CONFIG_SCHED_EDGE */
161 
162 bool rt_pset_needs_a_followup_IPI(processor_set_t pset);
163 void rt_choose_next_processor_for_followup_IPI(processor_set_t pset, processor_t chosen_processor, processor_t *result_processor, sched_ipi_type_t *result_ipi_type);
164 
165 bool rt_clear_pending_spill(processor_t processor, int reason);
166 
167 #pragma mark - Realtime Runqueues
168 
169 #if DEBUG || SCHED_TEST_HARNESS
170 void check_rt_runq_consistency(rt_queue_t rt_run_queue, thread_t thread);
171 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    check_rt_runq_consistency(q, th)
172 #else /* !(DEBUG || SCHED_TEST_HARNESS) */
173 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    do {} while (0)
174 #endif /* !(DEBUG || SCHED_TEST_HARNESS) */
175 
176 int      rt_runq_count(processor_set_t);
177 thread_t rt_runq_dequeue(rt_queue_t rt_run_queue);
178 uint64_t rt_runq_earliest_deadline(processor_set_t);
179 thread_t rt_runq_first(rt_queue_t rt_run_queue);
180 bool     rt_runq_insert(processor_t processor, processor_set_t pset, thread_t thread);
181 bool     rt_runq_is_low_latency(processor_set_t pset);
182 int      rt_runq_priority(processor_set_t pset);
183 void     rt_runq_remove(rt_queue_t rt_run_queue, thread_t thread);
184 
185 __END_DECLS
186 
187 #endif /* _KERN_SCHED_RT_H_ */
188