1 // Copyright (c) 2024 Apple Inc. All rights reserved.
2
3 #include <mach/mach_time.h>
4
5 #ifndef MIN
6 #define MIN(a, b) (((a)<(b))?(a):(b))
7 #endif /* MIN */
8 #ifndef MAX
9 #define MAX(a, b) (((a)>(b))?(a):(b))
10 #endif /* MAX */
11
12 /* Overrides necessary for userspace code */
13 #define panic(...) ({ printf("Panicking:\n"); printf(__VA_ARGS__); abort(); })
14 #define KDBG(...) (void)0
15 #define KDBG_RELEASE(...) (void)0
16 #define kalloc_type(x, y, z) calloc((size_t)y, sizeof(x))
17 #define kfree_type(x, y, z) free(z)
18 #define PE_parse_boot_argn(x, y, z) FALSE
19 #define kprintf(...) printf(__VA_ARGS__)
20
21 /* Mock locks */
22 typedef void *lck_ticket_t;
23 #define decl_lck_mtx_data(class, name) class int name
24 #define decl_simple_lock_data(class, name) class int name
25 #define pset_lock(x) (void)x
26 #define pset_unlock(x) (void)x
27 #define change_locked_pset(x, y) y
28 #define pset_assert_locked(x) (void)x
29 #define thread_lock(x) (void)x
30 #define thread_unlock(x) (void)x
31 #define simple_lock(...)
32 #define simple_unlock(...)
33
34 /* Processor-related */
35 #define PERCPU_DECL(type_t, name) type_t name
36 #include <kern/processor.h>
37 processor_t processor_array[MAX_SCHED_CPUS];
38 processor_set_t pset_array[MAX_PSETS];
39 struct pset_node pset_nodes[MAX_AMP_CLUSTER_TYPES];
40 #define pset_node0 (pset_nodes[0])
41
42 pset_node_t
pset_node_for_pset_cluster_type(pset_cluster_type_t pset_cluster_type)43 pset_node_for_pset_cluster_type(pset_cluster_type_t pset_cluster_type)
44 {
45 for (unsigned i = 0; i < MAX_AMP_CLUSTER_TYPES; i++) {
46 if (bitmap_is_empty(&pset_nodes[i].pset_map, MAX_PSETS)) {
47 continue;
48 }
49 if (pset_nodes[i].pset_cluster_type == pset_cluster_type) {
50 return &pset_nodes[i];
51 }
52 }
53 return PSET_NODE_NULL;
54 }
55
56 pset_cluster_type_t
cluster_type_to_pset_cluster_type(cluster_type_t cluster_type)57 cluster_type_to_pset_cluster_type(cluster_type_t cluster_type)
58 {
59 switch (cluster_type) {
60 #if __AMP__
61 case CLUSTER_TYPE_E:
62 return PSET_AMP_E;
63 case CLUSTER_TYPE_P:
64 return PSET_AMP_P;
65 #endif /* __AMP__ */
66 case CLUSTER_TYPE_SMP:
67 return PSET_SMP;
68 default:
69 panic("Unexpected cluster type %d", cluster_type);
70 }
71 }
72
73 cpumap_t
pset_available_cpumap(processor_set_t pset)74 pset_available_cpumap(processor_set_t pset)
75 {
76 return pset->cpu_available_map & pset->recommended_bitmask;
77 }
78
79 /* Expected global(s) */
80 static task_t kernel_task = NULL;
81
82 /* Time conversion to mock the implementation in osfmk/arm/rtclock.c */
83 static mach_timebase_info_data_t timebase_info;
84 void
clock_interval_to_absolutetime_interval(uint32_t interval,uint32_t scale_factor,uint64_t * result)85 clock_interval_to_absolutetime_interval(uint32_t interval,
86 uint32_t scale_factor,
87 uint64_t * result)
88 {
89 mach_timebase_info(&timebase_info);
90 uint64_t nanosecs = (uint64_t) interval * scale_factor;
91 *result = nanosecs * timebase_info.denom / timebase_info.numer;
92 }
93
94 /*
95 * thread struct from osfmk/kern/thread.h containing only fields needed by
96 * the Clutch runqueue logic, followed by needed functions from osfmk/kern/thread.c
97 * for operating on the __runq field
98 */
99 struct thread {
100 int id;
101 sched_mode_t sched_mode;
102 int16_t sched_pri; /* scheduled (current) priority */
103 int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
104 queue_chain_t runq_links; /* run queue links */
105 struct { processor_t runq; } __runq; /* internally managed run queue assignment, see above comment */
106 sched_bucket_t th_sched_bucket;
107 processor_t bound_processor; /* bound to a processor? */
108 processor_t last_processor; /* processor last dispatched on */
109 ast_t reason; /* why we blocked */
110 int state;
111 #define TH_WAIT 0x01 /* queued for waiting */
112 #define TH_RUN 0x04 /* running or on runq */
113 #define TH_IDLE 0x80 /* idling processor */
114 #define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
115 #define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
116 #define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
117 #define TH_SFLAG_BOUND_SOFT 0x20000 /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
118 uint64_t thread_id; /* system wide unique thread-id */
119 struct {
120 uint64_t user_time;
121 uint64_t system_time;
122 } mock_recount_time;
123 uint64_t sched_time_save;
124 natural_t sched_usage; /* timesharing cpu usage [sched] */
125 natural_t pri_shift; /* usage -> priority from pset */
126 natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
127 natural_t cpu_delta; /* accumulated cpu_usage delta */
128 struct thread_group *thread_group;
129 struct priority_queue_entry_stable th_clutch_runq_link;
130 struct priority_queue_entry_sched th_clutch_pri_link;
131 queue_chain_t th_clutch_timeshare_link;
132 uint32_t sched_flags; /* current flag bits */
133 #define THREAD_BOUND_CLUSTER_NONE (UINT32_MAX)
134 uint32_t th_bound_cluster_id;
135 #if CONFIG_SCHED_EDGE
136 bool th_bound_cluster_enqueued;
137 bool th_shared_rsrc_enqueued[CLUSTER_SHARED_RSRC_TYPE_COUNT];
138 bool th_shared_rsrc_heavy_user[CLUSTER_SHARED_RSRC_TYPE_COUNT];
139 bool th_shared_rsrc_heavy_perf_control[CLUSTER_SHARED_RSRC_TYPE_COUNT];
140 bool th_expired_quantum_on_lower_core;
141 bool th_expired_quantum_on_higher_core;
142 #endif /* CONFIG_SCHED_EDGE */
143
144 /* real-time parameters */
145 struct { /* see mach/thread_policy.h */
146 uint32_t period;
147 uint32_t computation;
148 uint32_t constraint;
149 bool preemptible;
150 uint8_t priority_offset; /* base_pri = BASEPRI_RTQUEUES + priority_offset */
151 uint64_t deadline;
152 } realtime;
153
154 uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */
155 };
156
157 void
thread_assert_runq_null(__assert_only thread_t thread)158 thread_assert_runq_null(__assert_only thread_t thread)
159 {
160 assert(thread->__runq.runq == PROCESSOR_NULL);
161 }
162
163 void
thread_assert_runq_nonnull(thread_t thread)164 thread_assert_runq_nonnull(thread_t thread)
165 {
166 assert(thread->__runq.runq != PROCESSOR_NULL);
167 }
168
169 void
thread_clear_runq(thread_t thread)170 thread_clear_runq(thread_t thread)
171 {
172 thread_assert_runq_nonnull(thread);
173 thread->__runq.runq = PROCESSOR_NULL;
174 }
175
176 void
thread_set_runq_locked(thread_t thread,processor_t new_runq)177 thread_set_runq_locked(thread_t thread, processor_t new_runq)
178 {
179 thread_assert_runq_null(thread);
180 thread->__runq.runq = new_runq;
181 }
182
183 processor_t
thread_get_runq_locked(thread_t thread)184 thread_get_runq_locked(thread_t thread)
185 {
186 return thread->__runq.runq;
187 }
188
189 uint64_t
thread_tid(thread_t thread)190 thread_tid(
191 thread_t thread)
192 {
193 return thread != THREAD_NULL? thread->thread_id: 0;
194 }
195
196 void
thread_clear_runq_locked(thread_t thread)197 thread_clear_runq_locked(thread_t thread)
198 {
199 thread->__runq.runq = PROCESSOR_NULL;
200 }
201
202 /* Satisfy recount dependency needed by osfmk/kern/sched.h */
203 #define recount_thread_time_mach(thread) (thread->mock_recount_time.user_time + thread->mock_recount_time.system_time)
204
205 /*
206 * thread_group struct from osfmk/kern/thread_group.c containing only fields
207 * needed by the Clutch runqueue logic, followed by needed functions from
208 * osfmk/kern/thread_group.c
209 */
210 struct thread_group {
211 uint64_t tg_id;
212 struct sched_clutch tg_sched_clutch;
213 };
214
215 sched_clutch_t
sched_clutch_for_thread(thread_t thread)216 sched_clutch_for_thread(thread_t thread)
217 {
218 assert(thread->thread_group != NULL);
219 return &(thread->thread_group->tg_sched_clutch);
220 }
221
222 sched_clutch_t
sched_clutch_for_thread_group(struct thread_group * thread_group)223 sched_clutch_for_thread_group(struct thread_group *thread_group)
224 {
225 return &(thread_group->tg_sched_clutch);
226 }
227
228 uint64_t
thread_group_get_id(struct thread_group * tg)229 thread_group_get_id(struct thread_group *tg)
230 {
231 return tg->tg_id;
232 }
233
234 #if CONFIG_SCHED_EDGE
235
236 bool
thread_shared_rsrc_policy_get(thread_t thread,cluster_shared_rsrc_type_t type)237 thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type)
238 {
239 return thread->th_shared_rsrc_heavy_user[type] || thread->th_shared_rsrc_heavy_perf_control[type];
240 }
241
242 #endif /* CONFIG_SCHED_EDGE */
243