xref: /xnu-12377.81.4/osfmk/kern/sched_rt.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_SCHED_RT_H_
30 #define _KERN_SCHED_RT_H_
31 
32 #include <kern/kern_types.h>
33 #include <kern/sched_common.h>
34 #include <kern/processor.h>
35 #include <kern/sched_prim.h>
36 
37 __BEGIN_DECLS
38 
39 #pragma mark - Constants and Tunables
40 
41 extern unsigned sched_rt_spill_policy;
42 extern unsigned sched_rt_steal_policy;
43 extern uint32_t rt_deadline_epsilon;
44 extern uint32_t rt_constraint_threshold;
45 extern int sched_rt_runq_strict_priority;
46 extern int sched_allow_rt_smt;
47 
48 #pragma mark - Initialization
49 
50 void sched_realtime_timebase_init(void);
51 
52 /* Initialize realtime runqueues for the given pset. */
53 void sched_rt_init_pset(processor_set_t pset);
54 
55 /* Called once all psets are initialized. */
56 void sched_rt_init_completed(void);
57 
58 #if CONFIG_SCHED_EDGE
59 #pragma mark - Realtime Scheduler-CLPC Interface
60 
61 /*
62  * The realtime scheduler uses edges between psets to define policies
63  * regarding migration and steal operations, similar to the edge scheduler.
64  * The weights define an explicit search order for the scheduler to identify
65  * alternative psets when a realtime thread's preferred pset is overloaded.
66  *
67  * The matrix can be directly manipulated with
68  * sched_rt_config_set()/sched_rt_config_get(), but the preferred interface for
69  * updates is to call sched_rt_matrix_set(), which will update cached values
70  * computed from the matrix.
71  */
72 
73 void              sched_rt_config_set(pset_id_t src_pset, pset_id_t dst_pset, sched_clutch_edge edge_config);
74 sched_clutch_edge sched_rt_config_get(pset_id_t src_pset, pset_id_t dst_pset);
75 
76 /*
77  * sched_rt_matrix_get()/sched_rt_matrix_set()
78  *
79  * Selectively retrieve (or update, respectively) multiple edges in the realtime
80  * matrix. The realtime spill order is recomputed for every pset with a changed
81  * outgoing edge.
82  *
83  * The matrix provided should be `num_psets * num_psets`, where `num_psets`
84  * is equal to `sched_num_psets`. Like the Edge matrix, it is indexed
85  * first by source pset (major), then by destination pset (minor).
86  */
87 
88 void sched_rt_matrix_get(sched_clutch_edge *rt_matrix, bool *edge_requests, uint64_t num_psets);
89 void sched_rt_matrix_set(sched_clutch_edge *rt_matrix, bool *edge_changes, uint64_t num_psets);
90 
91 #endif /* CONFIG_SCHED_EDGE */
92 
93 #pragma mark - Scheduler Callouts
94 
95 #if CONFIG_SCHED_SMT
96 /* SMT-aware callout for rt_choose_processor. */
97 processor_t sched_rtlocal_choose_processor_smt(processor_set_t starting_pset, processor_t processor, thread_t thread);
98 #else /* !CONFIG_SCHED_SMT */
99 processor_t sched_rt_choose_processor(processor_set_t starting_pset, processor_t processor, thread_t thread);
100 #endif /* !CONFIG_SCHED_SMT */
101 
102 #if CONFIG_SCHED_EDGE
103 thread_t sched_rt_steal_thread(processor_set_t stealing_pset);
104 #endif /* CONFIG_SCHED_EDGE */
105 thread_t sched_rt_choose_thread(processor_t processor);
106 
107 void sched_rt_queue_shutdown(processor_t processor, struct pulled_thread_queue * threadq);
108 
109 void sched_rt_runq_scan(sched_update_scan_context_t scan_context);
110 
111 int64_t sched_rt_runq_count_sum(void);
112 
113 #pragma mark - Utilities
114 
115 /*
116  * We are in the process of migrating realtime scheduler code into sched_rt.c
117  * to make it unit-testable in isolation.
118  *
119  * For the time being, these methods are made accessible to code that include
120  * sched_rt.h. They will be made static members of sched_rt.c as soon as
121  * practicable.
122  */
123 uint64_t rt_deadline_add(uint64_t d, uint64_t e);
124 
125 cpumap_t pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset);
126 
127 processor_t
128 pset_choose_furthest_deadline_processor_for_realtime_thread(
129 	processor_set_t pset,
130 	int             max_pri,
131 	uint64_t        minimum_deadline,
132 	processor_t     skip_processor,
133 	bool            skip_spills,
134 	bool            include_ast_urgent_pending_cpus);
135 
136 #if CONFIG_SCHED_SMT
137 processor_t pset_choose_processor_for_realtime_thread_smt(
138 	processor_set_t pset,
139 	processor_t     skip_processor,
140 	bool            consider_secondaries,
141 	bool            skip_spills);
142 #else /* !CONFIG_SCHED_SMT */
143 processor_t
144 pset_choose_processor_for_realtime_thread(
145 	processor_set_t pset,
146 	processor_t     skip_processor,
147 	bool            skip_spills);
148 #endif /* !CONFIG_SCHED_SMT */
149 
150 #if CONFIG_SCHED_EDGE
151 bool     rt_pset_has_stealable_threads(processor_set_t pset);
152 void     pset_update_rt_stealable_state(processor_set_t pset);
153 /* Realtime spill is only supported on platforms with the edge scheduler. */
154 bool rt_choose_next_processor_for_spill_IPI(processor_set_t starting_pset, processor_t chosen_processor, processor_t *result_processor, sched_ipi_type_t *result_ipi_type);
155 #else /* !CONFIG_SCHED_EDGE */
156 #define pset_update_rt_stealable_state(x) do {(void) x;} while (0)
157 #endif /* !CONFIG_SCHED_EDGE */
158 
159 bool rt_pset_needs_a_followup_IPI(processor_set_t pset);
160 void rt_choose_next_processor_for_followup_IPI(processor_set_t pset, processor_t chosen_processor, processor_t *result_processor, sched_ipi_type_t *result_ipi_type);
161 
162 bool rt_clear_pending_spill(processor_t processor, int reason);
163 
164 #pragma mark - Realtime Runqueues
165 
166 #if DEBUG || SCHED_TEST_HARNESS
167 void check_rt_runq_consistency(rt_queue_t rt_run_queue, thread_t thread);
168 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    check_rt_runq_consistency(q, th)
169 #else /* !(DEBUG || SCHED_TEST_HARNESS) */
170 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    do {} while (0)
171 #endif /* !(DEBUG || SCHED_TEST_HARNESS) */
172 
173 int      rt_runq_count(processor_set_t);
174 thread_t rt_runq_dequeue(rt_queue_t rt_run_queue);
175 uint64_t rt_runq_earliest_deadline(processor_set_t);
176 thread_t rt_runq_first(rt_queue_t rt_run_queue);
177 bool     rt_runq_insert(processor_t processor, processor_set_t pset, thread_t thread);
178 bool     rt_runq_is_low_latency(processor_set_t pset);
179 int      rt_runq_priority(processor_set_t pset);
180 void     rt_runq_remove(rt_queue_t rt_run_queue, thread_t thread);
181 
182 __END_DECLS
183 
184 #endif /* _KERN_SCHED_RT_H_ */
185