1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpuid_internal.h>
33 #include <arm/cpu_data.h>
34 #include <arm/cpu_data_internal.h>
35 #include <arm/misc_protos.h>
36 #include <arm/machdep_call.h>
37 #include <arm/machine_routines.h>
38 #include <arm/rtclock.h>
39 #include <kern/machine.h>
40 #include <kern/thread.h>
41 #include <kern/thread_group.h>
42 #include <kern/policy_internal.h>
43 #include <kern/sched_hygiene.h>
44 #include <kern/startup.h>
45 #include <kern/monotonic.h>
46 #include <machine/config.h>
47 #include <machine/atomic.h>
48 #include <machine/monotonic.h>
49 #include <pexpert/pexpert.h>
50 #include <pexpert/device_tree.h>
51 #include <pexpert/arm64/apple_arm64_cpu.h>
52
53 #include <mach/machine.h>
54 #include <mach/machine/sdt.h>
55
56 #if !HAS_CONTINUOUS_HWCLOCK
57 extern uint64_t mach_absolutetime_asleep;
58 #else
59 extern uint64_t wake_abstime;
60 static uint64_t wake_conttime = UINT64_MAX;
61 #endif
62
63 extern volatile uint32_t debug_enabled;
64 extern _Atomic unsigned int cluster_type_num_active_cpus[MAX_CPU_TYPES];
65 const char *cluster_type_names[MAX_CPU_TYPES] = {
66 [CLUSTER_TYPE_SMP] = "Standard",
67 [CLUSTER_TYPE_P] = "Performance",
68 [CLUSTER_TYPE_E] = "Efficiency",
69 };
70
71 static int max_cpus_initialized = 0;
72 #define MAX_CPUS_SET 0x1
73 #define MAX_CPUS_WAIT 0x2
74
75 LCK_GRP_DECLARE(max_cpus_grp, "max_cpus");
76 LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp);
77 uint32_t lockdown_done = 0;
78 boolean_t is_clock_configured = FALSE;
79
80 static void
sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused,going_on_core_t on __unused)81 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
82 {
83 }
84
85 static void
sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused,perfcontrol_state_t new_thread_state __unused)86 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
87 {
88 }
89
90 static void
sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused,going_off_core_t off __unused,boolean_t thread_terminating __unused)91 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
92 {
93 }
94
95 static void
sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)96 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
97 {
98 }
99
100 static void
sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)101 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
102 {
103 }
104
105 static void
sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_t work_interval __unused)106 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
107 perfcontrol_work_interval_t work_interval __unused)
108 {
109 }
110
111 static void
sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_instance_t instance __unused)112 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
113 perfcontrol_work_interval_instance_t instance __unused)
114 {
115 }
116
117 static void
sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)118 sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
119 {
120 }
121
122 static void
sched_perfcontrol_csw_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * offcore,__unused struct perfcontrol_thread_data * oncore,__unused struct perfcontrol_cpu_counters * cpu_counters,__unused void * unused)123 sched_perfcontrol_csw_default(
124 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
125 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
126 __unused struct perfcontrol_thread_data *oncore,
127 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
128 {
129 }
130
131 static void
sched_perfcontrol_state_update_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * thr_data,__unused void * unused)132 sched_perfcontrol_state_update_default(
133 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
134 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
135 __unused void *unused)
136 {
137 }
138
139 static void
sched_perfcontrol_thread_group_blocked_default(__unused thread_group_data_t blocked_tg,__unused thread_group_data_t blocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t blocked_thr_state)140 sched_perfcontrol_thread_group_blocked_default(
141 __unused thread_group_data_t blocked_tg, __unused thread_group_data_t blocking_tg,
142 __unused uint32_t flags, __unused perfcontrol_state_t blocked_thr_state)
143 {
144 }
145
146 static void
sched_perfcontrol_thread_group_unblocked_default(__unused thread_group_data_t unblocked_tg,__unused thread_group_data_t unblocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t unblocked_thr_state)147 sched_perfcontrol_thread_group_unblocked_default(
148 __unused thread_group_data_t unblocked_tg, __unused thread_group_data_t unblocking_tg,
149 __unused uint32_t flags, __unused perfcontrol_state_t unblocked_thr_state)
150 {
151 }
152
153 sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
154 sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
155 sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
156 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
157 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
158 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
159 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
160 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
161 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
162 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
163 sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
164 sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
165 sched_perfcontrol_thread_group_blocked_t sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
166 sched_perfcontrol_thread_group_unblocked_t sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
167 boolean_t sched_perfcontrol_thread_shared_rsrc_flags_enabled = false;
168
169 void
sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks,unsigned long size_of_state)170 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
171 {
172 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
173
174 if (size_of_state > sizeof(struct perfcontrol_state)) {
175 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
176 }
177
178 if (callbacks) {
179 #if CONFIG_THREAD_GROUPS
180 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_3) {
181 if (callbacks->thread_group_init != NULL) {
182 sched_perfcontrol_thread_group_init = callbacks->thread_group_init;
183 } else {
184 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
185 }
186 if (callbacks->thread_group_deinit != NULL) {
187 sched_perfcontrol_thread_group_deinit = callbacks->thread_group_deinit;
188 } else {
189 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
190 }
191 // tell CLPC about existing thread groups
192 thread_group_resync(TRUE);
193 }
194
195 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_6) {
196 if (callbacks->thread_group_flags_update != NULL) {
197 sched_perfcontrol_thread_group_flags_update = callbacks->thread_group_flags_update;
198 } else {
199 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
200 }
201 }
202
203 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_8) {
204 if (callbacks->thread_group_blocked != NULL) {
205 sched_perfcontrol_thread_group_blocked = callbacks->thread_group_blocked;
206 } else {
207 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
208 }
209
210 if (callbacks->thread_group_unblocked != NULL) {
211 sched_perfcontrol_thread_group_unblocked = callbacks->thread_group_unblocked;
212 } else {
213 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
214 }
215 }
216 #endif
217 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_9) {
218 sched_perfcontrol_thread_shared_rsrc_flags_enabled = true;
219 }
220
221 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
222 if (callbacks->work_interval_ctl != NULL) {
223 sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
224 } else {
225 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
226 }
227 }
228
229 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
230 if (callbacks->csw != NULL) {
231 sched_perfcontrol_csw = callbacks->csw;
232 } else {
233 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
234 }
235
236 if (callbacks->state_update != NULL) {
237 sched_perfcontrol_state_update = callbacks->state_update;
238 } else {
239 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
240 }
241 }
242
243 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
244 if (callbacks->deadline_passed != NULL) {
245 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
246 } else {
247 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
248 }
249 }
250
251 if (callbacks->offcore != NULL) {
252 sched_perfcontrol_offcore = callbacks->offcore;
253 } else {
254 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
255 }
256
257 if (callbacks->context_switch != NULL) {
258 sched_perfcontrol_switch = callbacks->context_switch;
259 } else {
260 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
261 }
262
263 if (callbacks->oncore != NULL) {
264 sched_perfcontrol_oncore = callbacks->oncore;
265 } else {
266 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
267 }
268
269 if (callbacks->max_runnable_latency != NULL) {
270 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
271 } else {
272 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
273 }
274
275 if (callbacks->work_interval_notify != NULL) {
276 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
277 } else {
278 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
279 }
280 } else {
281 /* reset to defaults */
282 #if CONFIG_THREAD_GROUPS
283 thread_group_resync(FALSE);
284 #endif
285 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
286 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
287 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
288 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
289 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
290 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
291 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
292 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
293 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
294 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
295 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
296 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
297 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
298 }
299 }
300
301
302 static void
machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data * data,thread_t thread,uint64_t same_pri_latency)303 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
304 thread_t thread,
305 uint64_t same_pri_latency)
306 {
307 bzero(data, sizeof(struct perfcontrol_thread_data));
308 data->perfctl_class = thread_get_perfcontrol_class(thread);
309 data->energy_estimate_nj = 0;
310 data->thread_id = thread->thread_id;
311 #if CONFIG_THREAD_GROUPS
312 struct thread_group *tg = thread_group_get(thread);
313 data->thread_group_id = thread_group_get_id(tg);
314 data->thread_group_data = thread_group_get_machine_data(tg);
315 #endif
316 data->scheduling_latency_at_same_basepri = same_pri_latency;
317 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
318 }
319
320 static void
machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters * cpu_counters)321 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
322 {
323 #if CONFIG_CPU_COUNTERS
324 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
325 #else /* CONFIG_CPU_COUNTERS */
326 cpu_counters->instructions = 0;
327 cpu_counters->cycles = 0;
328 #endif /* !CONFIG_CPU_COUNTERS */
329 }
330
331 int perfcontrol_callout_stats_enabled = 0;
332 static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
333 static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
334
335 #if CONFIG_CPU_COUNTERS
336 static inline
337 bool
perfcontrol_callout_counters_begin(uint64_t * counters)338 perfcontrol_callout_counters_begin(uint64_t *counters)
339 {
340 if (!perfcontrol_callout_stats_enabled) {
341 return false;
342 }
343 mt_fixed_counts(counters);
344 return true;
345 }
346
347 static inline
348 void
perfcontrol_callout_counters_end(uint64_t * start_counters,perfcontrol_callout_type_t type)349 perfcontrol_callout_counters_end(uint64_t *start_counters,
350 perfcontrol_callout_type_t type)
351 {
352 uint64_t end_counters[MT_CORE_NFIXED];
353 mt_fixed_counts(end_counters);
354 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
355 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed);
356 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
357 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed);
358 os_atomic_inc(&perfcontrol_callout_count[type], relaxed);
359 }
360 #endif /* CONFIG_CPU_COUNTERS */
361
362 uint64_t
perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,perfcontrol_callout_stat_t stat)363 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
364 perfcontrol_callout_stat_t stat)
365 {
366 if (!perfcontrol_callout_stats_enabled) {
367 return 0;
368 }
369 return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) /
370 os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed);
371 }
372
373 #if CONFIG_SCHED_EDGE
374
375 /*
376 * The Edge scheduler allows the performance controller to update properties about the
377 * threads as part of the callouts. These properties typically include shared cluster
378 * resource usage. This allows the scheduler to manage specific threads within the
379 * workload more optimally.
380 */
381 static void
sched_perfcontrol_thread_flags_update(thread_t thread,struct perfcontrol_thread_data * thread_data,shared_rsrc_policy_agent_t agent)382 sched_perfcontrol_thread_flags_update(thread_t thread,
383 struct perfcontrol_thread_data *thread_data,
384 shared_rsrc_policy_agent_t agent)
385 {
386 kern_return_t kr = KERN_SUCCESS;
387 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
388 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
389 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
390 } else {
391 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
392 }
393 }
394 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
395 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
396 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
397 } else {
398 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
399 }
400 }
401 /*
402 * The thread_shared_rsrc_policy_* routines only fail if the performance controller is
403 * attempting to double set/clear a policy on the thread.
404 */
405 assert(kr == KERN_SUCCESS);
406 }
407
408 #endif /* CONFIG_SCHED_EDGE */
409
410 void
machine_switch_perfcontrol_context(perfcontrol_event event,uint64_t timestamp,uint32_t flags,uint64_t new_thread_same_pri_latency,thread_t old,thread_t new)411 machine_switch_perfcontrol_context(perfcontrol_event event,
412 uint64_t timestamp,
413 uint32_t flags,
414 uint64_t new_thread_same_pri_latency,
415 thread_t old,
416 thread_t new)
417 {
418
419 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
420 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
421 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
422 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
423 }
424
425 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
426 uint32_t cpu_id = (uint32_t)cpu_number();
427 struct perfcontrol_cpu_counters cpu_counters;
428 struct perfcontrol_thread_data offcore, oncore;
429 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
430 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
431 new_thread_same_pri_latency);
432 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
433
434 #if CONFIG_CPU_COUNTERS
435 uint64_t counters[MT_CORE_NFIXED];
436 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
437 #endif /* CONFIG_CPU_COUNTERS */
438 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
439 &offcore, &oncore, &cpu_counters, NULL);
440 #if CONFIG_CPU_COUNTERS
441 if (ctrs_enabled) {
442 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
443 }
444 #endif /* CONFIG_CPU_COUNTERS */
445
446 recount_add_energy(old, get_threadtask(old),
447 offcore.energy_estimate_nj);
448
449 #if CONFIG_SCHED_EDGE
450 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled) {
451 sched_perfcontrol_thread_flags_update(old, &offcore, SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
452 }
453 #endif /* CONFIG_SCHED_EDGE */
454 }
455 }
456
457 void
machine_switch_perfcontrol_state_update(perfcontrol_event event,uint64_t timestamp,uint32_t flags,thread_t thread)458 machine_switch_perfcontrol_state_update(perfcontrol_event event,
459 uint64_t timestamp,
460 uint32_t flags,
461 thread_t thread)
462 {
463
464 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) {
465 return;
466 }
467 uint32_t cpu_id = (uint32_t)cpu_number();
468 struct perfcontrol_thread_data data;
469 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
470
471 #if CONFIG_CPU_COUNTERS
472 uint64_t counters[MT_CORE_NFIXED];
473 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
474 #endif /* CONFIG_CPU_COUNTERS */
475 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
476 &data, NULL);
477 #if CONFIG_CPU_COUNTERS
478 if (ctrs_enabled) {
479 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
480 }
481 #endif /* CONFIG_CPU_COUNTERS */
482
483 #if CONFIG_PERVASIVE_ENERGY
484 recount_add_energy(thread, get_threadtask(thread), data.energy_estimate_nj);
485 #endif /* CONFIG_PERVASIVE_ENERGY */
486
487 #if CONFIG_SCHED_EDGE
488 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled && (event == QUANTUM_EXPIRY)) {
489 sched_perfcontrol_thread_flags_update(thread, &data, SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM);
490 } else {
491 assert(data.thread_flags_mask == 0);
492 }
493 #endif /* CONFIG_SCHED_EDGE */
494 }
495
496 void
machine_thread_going_on_core(thread_t new_thread,thread_urgency_t urgency,uint64_t sched_latency,uint64_t same_pri_latency,uint64_t timestamp)497 machine_thread_going_on_core(thread_t new_thread,
498 thread_urgency_t urgency,
499 uint64_t sched_latency,
500 uint64_t same_pri_latency,
501 uint64_t timestamp)
502 {
503 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) {
504 return;
505 }
506 struct going_on_core on_core;
507 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
508
509 on_core.thread_id = new_thread->thread_id;
510 on_core.energy_estimate_nj = 0;
511 on_core.qos_class = (uint16_t)proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
512 on_core.urgency = (uint16_t)urgency;
513 on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
514 on_core.is_kernel_thread = get_threadtask(new_thread) == kernel_task;
515 #if CONFIG_THREAD_GROUPS
516 struct thread_group *tg = thread_group_get(new_thread);
517 on_core.thread_group_id = thread_group_get_id(tg);
518 on_core.thread_group_data = thread_group_get_machine_data(tg);
519 #endif
520 on_core.scheduling_latency = sched_latency;
521 on_core.start_time = timestamp;
522 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
523
524 #if CONFIG_CPU_COUNTERS
525 uint64_t counters[MT_CORE_NFIXED];
526 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
527 #endif /* CONFIG_CPU_COUNTERS */
528 sched_perfcontrol_oncore(state, &on_core);
529 #if CONFIG_CPU_COUNTERS
530 if (ctrs_enabled) {
531 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
532 }
533 #endif /* CONFIG_CPU_COUNTERS */
534 }
535
536 void
machine_thread_going_off_core(thread_t old_thread,boolean_t thread_terminating,uint64_t last_dispatch,__unused boolean_t thread_runnable)537 machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating,
538 uint64_t last_dispatch, __unused boolean_t thread_runnable)
539 {
540 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) {
541 return;
542 }
543 struct going_off_core off_core;
544 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
545
546 off_core.thread_id = old_thread->thread_id;
547 off_core.energy_estimate_nj = 0;
548 off_core.end_time = last_dispatch;
549 #if CONFIG_THREAD_GROUPS
550 struct thread_group *tg = thread_group_get(old_thread);
551 off_core.thread_group_id = thread_group_get_id(tg);
552 off_core.thread_group_data = thread_group_get_machine_data(tg);
553 #endif
554
555 #if CONFIG_CPU_COUNTERS
556 uint64_t counters[MT_CORE_NFIXED];
557 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
558 #endif /* CONFIG_CPU_COUNTERS */
559 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
560 #if CONFIG_CPU_COUNTERS
561 if (ctrs_enabled) {
562 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
563 }
564 #endif /* CONFIG_CPU_COUNTERS */
565 }
566
567 #if CONFIG_THREAD_GROUPS
568 void
machine_thread_group_init(struct thread_group * tg)569 machine_thread_group_init(struct thread_group *tg)
570 {
571 if (sched_perfcontrol_thread_group_init == sched_perfcontrol_thread_group_default) {
572 return;
573 }
574 struct thread_group_data data;
575 data.thread_group_id = thread_group_get_id(tg);
576 data.thread_group_data = thread_group_get_machine_data(tg);
577 data.thread_group_size = thread_group_machine_data_size();
578 data.thread_group_flags = thread_group_get_flags(tg);
579 sched_perfcontrol_thread_group_init(&data);
580 }
581
582 void
machine_thread_group_deinit(struct thread_group * tg)583 machine_thread_group_deinit(struct thread_group *tg)
584 {
585 if (sched_perfcontrol_thread_group_deinit == sched_perfcontrol_thread_group_default) {
586 return;
587 }
588 struct thread_group_data data;
589 data.thread_group_id = thread_group_get_id(tg);
590 data.thread_group_data = thread_group_get_machine_data(tg);
591 data.thread_group_size = thread_group_machine_data_size();
592 data.thread_group_flags = thread_group_get_flags(tg);
593 sched_perfcontrol_thread_group_deinit(&data);
594 }
595
596 void
machine_thread_group_flags_update(struct thread_group * tg,uint32_t flags)597 machine_thread_group_flags_update(struct thread_group *tg, uint32_t flags)
598 {
599 if (sched_perfcontrol_thread_group_flags_update == sched_perfcontrol_thread_group_default) {
600 return;
601 }
602 struct thread_group_data data;
603 data.thread_group_id = thread_group_get_id(tg);
604 data.thread_group_data = thread_group_get_machine_data(tg);
605 data.thread_group_size = thread_group_machine_data_size();
606 data.thread_group_flags = flags;
607 sched_perfcontrol_thread_group_flags_update(&data);
608 }
609
610 void
machine_thread_group_blocked(struct thread_group * blocked_tg,struct thread_group * blocking_tg,uint32_t flags,thread_t blocked_thread)611 machine_thread_group_blocked(struct thread_group *blocked_tg,
612 struct thread_group *blocking_tg,
613 uint32_t flags,
614 thread_t blocked_thread)
615 {
616 if (sched_perfcontrol_thread_group_blocked == sched_perfcontrol_thread_group_blocked_default) {
617 return;
618 }
619
620 spl_t s = splsched();
621
622 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(blocked_thread);
623 struct thread_group_data blocked_data;
624 assert(blocked_tg != NULL);
625
626 blocked_data.thread_group_id = thread_group_get_id(blocked_tg);
627 blocked_data.thread_group_data = thread_group_get_machine_data(blocked_tg);
628 blocked_data.thread_group_size = thread_group_machine_data_size();
629
630 if (blocking_tg == NULL) {
631 /*
632 * For special cases such as the render server, the blocking TG is a
633 * well known TG. Only in that case, the blocking_tg should be NULL.
634 */
635 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
636 sched_perfcontrol_thread_group_blocked(&blocked_data, NULL, flags, state);
637 } else {
638 struct thread_group_data blocking_data;
639 blocking_data.thread_group_id = thread_group_get_id(blocking_tg);
640 blocking_data.thread_group_data = thread_group_get_machine_data(blocking_tg);
641 blocking_data.thread_group_size = thread_group_machine_data_size();
642 sched_perfcontrol_thread_group_blocked(&blocked_data, &blocking_data, flags, state);
643 }
644 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_START,
645 thread_tid(blocked_thread), thread_group_get_id(blocked_tg),
646 blocking_tg ? thread_group_get_id(blocking_tg) : THREAD_GROUP_INVALID,
647 flags);
648
649 splx(s);
650 }
651
652 void
machine_thread_group_unblocked(struct thread_group * unblocked_tg,struct thread_group * unblocking_tg,uint32_t flags,thread_t unblocked_thread)653 machine_thread_group_unblocked(struct thread_group *unblocked_tg,
654 struct thread_group *unblocking_tg,
655 uint32_t flags,
656 thread_t unblocked_thread)
657 {
658 if (sched_perfcontrol_thread_group_unblocked == sched_perfcontrol_thread_group_unblocked_default) {
659 return;
660 }
661
662 spl_t s = splsched();
663
664 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(unblocked_thread);
665 struct thread_group_data unblocked_data;
666 assert(unblocked_tg != NULL);
667
668 unblocked_data.thread_group_id = thread_group_get_id(unblocked_tg);
669 unblocked_data.thread_group_data = thread_group_get_machine_data(unblocked_tg);
670 unblocked_data.thread_group_size = thread_group_machine_data_size();
671
672 if (unblocking_tg == NULL) {
673 /*
674 * For special cases such as the render server, the unblocking TG is a
675 * well known TG. Only in that case, the unblocking_tg should be NULL.
676 */
677 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
678 sched_perfcontrol_thread_group_unblocked(&unblocked_data, NULL, flags, state);
679 } else {
680 struct thread_group_data unblocking_data;
681 unblocking_data.thread_group_id = thread_group_get_id(unblocking_tg);
682 unblocking_data.thread_group_data = thread_group_get_machine_data(unblocking_tg);
683 unblocking_data.thread_group_size = thread_group_machine_data_size();
684 sched_perfcontrol_thread_group_unblocked(&unblocked_data, &unblocking_data, flags, state);
685 }
686 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_END,
687 thread_tid(unblocked_thread), thread_group_get_id(unblocked_tg),
688 unblocking_tg ? thread_group_get_id(unblocking_tg) : THREAD_GROUP_INVALID,
689 flags);
690
691 splx(s);
692 }
693
694 #endif /* CONFIG_THREAD_GROUPS */
695
696 void
machine_max_runnable_latency(uint64_t bg_max_latency,uint64_t default_max_latency,uint64_t realtime_max_latency)697 machine_max_runnable_latency(uint64_t bg_max_latency,
698 uint64_t default_max_latency,
699 uint64_t realtime_max_latency)
700 {
701 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) {
702 return;
703 }
704 struct perfcontrol_max_runnable_latency latencies = {
705 .max_scheduling_latencies = {
706 [THREAD_URGENCY_NONE] = 0,
707 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
708 [THREAD_URGENCY_NORMAL] = default_max_latency,
709 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
710 }
711 };
712
713 sched_perfcontrol_max_runnable_latency(&latencies);
714 }
715
716 void
machine_work_interval_notify(thread_t thread,struct kern_work_interval_args * kwi_args)717 machine_work_interval_notify(thread_t thread,
718 struct kern_work_interval_args* kwi_args)
719 {
720 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) {
721 return;
722 }
723 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
724 struct perfcontrol_work_interval work_interval = {
725 .thread_id = thread->thread_id,
726 .qos_class = (uint16_t)proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
727 .urgency = kwi_args->urgency,
728 .flags = kwi_args->notify_flags,
729 .work_interval_id = kwi_args->work_interval_id,
730 .start = kwi_args->start,
731 .finish = kwi_args->finish,
732 .deadline = kwi_args->deadline,
733 .next_start = kwi_args->next_start,
734 .create_flags = kwi_args->create_flags,
735 };
736 #if CONFIG_THREAD_GROUPS
737 struct thread_group *tg;
738 tg = thread_group_get(thread);
739 work_interval.thread_group_id = thread_group_get_id(tg);
740 work_interval.thread_group_data = thread_group_get_machine_data(tg);
741 #endif
742 sched_perfcontrol_work_interval_notify(state, &work_interval);
743 }
744
745
746 void
machine_perfcontrol_deadline_passed(uint64_t deadline)747 machine_perfcontrol_deadline_passed(uint64_t deadline)
748 {
749 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) {
750 sched_perfcontrol_deadline_passed(deadline);
751 }
752 }
753
754 #if SCHED_HYGIENE_DEBUG
755
756 __options_decl(int_mask_hygiene_flags_t, uint8_t, {
757 INT_MASK_BASE = 0x00,
758 INT_MASK_FROM_HANDLER = 0x01,
759 INT_MASK_IS_STACKSHOT = 0x02,
760 });
761
762 /*
763 * ml_spin_debug_reset()
764 * Reset the timestamp on a thread that has been unscheduled
765 * to avoid false alarms. Alarm will go off if interrupts are held
766 * disabled for too long, starting from now.
767 *
768 * Call ml_get_timebase() directly to prevent extra overhead on newer
769 * platforms that's enabled in DEVELOPMENT kernel configurations.
770 */
771 void
ml_spin_debug_reset(thread_t thread)772 ml_spin_debug_reset(thread_t thread)
773 {
774 if (thread->machine.intmask_timestamp) {
775 thread->machine.intmask_timestamp = ml_get_sched_hygiene_timebase();
776 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
777 }
778 }
779
780 /*
781 * ml_spin_debug_clear()
782 * Clear the timestamp and cycle/instruction counts on a thread that
783 * has been unscheduled to avoid false alarms
784 */
785 void
ml_spin_debug_clear(thread_t thread)786 ml_spin_debug_clear(thread_t thread)
787 {
788 thread->machine.intmask_timestamp = 0;
789 thread->machine.intmask_cycles = 0;
790 thread->machine.intmask_instr = 0;
791 }
792
793 /*
794 * ml_spin_debug_clear_self()
795 * Clear the timestamp on the current thread to prevent
796 * false alarms
797 */
798 void
ml_spin_debug_clear_self(void)799 ml_spin_debug_clear_self(void)
800 {
801 ml_spin_debug_clear(current_thread());
802 }
803
804 #ifndef KASAN
805
806 /*
807 * Get a character representing the provided thread's kind of CPU.
808 */
809 #if !CONFIG_CPU_COUNTERS
810 __unused
811 #endif // !CONFIG_CPU_COUNTERS
812 static char
__ml_interrupts_disabled_cpu_kind(thread_t thread)813 __ml_interrupts_disabled_cpu_kind(thread_t thread)
814 {
815 #if __AMP__
816 processor_t processor = thread->last_processor;
817 if (!processor) {
818 return '!';
819 }
820 switch (processor->processor_set->pset_cluster_type) {
821 case PSET_AMP_P:
822 return 'P';
823 case PSET_AMP_E:
824 return 'E';
825 default:
826 return '?';
827 }
828 #else // __AMP__
829 #pragma unused(thread)
830 return '-';
831 #endif // !__AMP__
832 }
833
834 #define EXTRA_INFO_STRING_SIZE 256
835 #define LOW_FREQ_THRESHOLD_MHZ 500
836 #define HIGH_CPI_THRESHOLD 3
837
838 static void
__ml_trigger_interrupts_disabled_handle(thread_t thread,uint64_t start,uint64_t now,uint64_t timeout,int_mask_hygiene_flags_t flags)839 __ml_trigger_interrupts_disabled_handle(thread_t thread, uint64_t start, uint64_t now, uint64_t timeout, int_mask_hygiene_flags_t flags)
840 {
841 mach_timebase_info_data_t timebase;
842 clock_timebase_info(&timebase);
843 bool is_int_handler = flags & INT_MASK_FROM_HANDLER;
844 bool is_stackshot = flags & INT_MASK_IS_STACKSHOT;
845
846 const uint64_t time_elapsed = now - start;
847 const uint64_t time_elapsed_ns = (time_elapsed * timebase.numer) / timebase.denom;
848
849 #if __AMP__
850 if (is_stackshot && interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
851 /*
852 * If there are no recommended performance cores, we double the timeout to compensate
853 * for the difference in time it takes Stackshot to run on efficiency cores, and then
854 * recheck if we still exceeded the adjusted timeout.
855 */
856 int cpu;
857 int max_cpu;
858
859 max_cpu = ml_get_max_cpu_number();
860 for (cpu = 0; cpu <= max_cpu; cpu++) {
861 processor_t processor = cpu_to_processor(cpu);
862 if (processor->is_recommended &&
863 processor->processor_set->pset_cluster_type == PSET_AMP_P) {
864 break;
865 }
866 }
867 if (cpu > max_cpu) {
868 if (time_elapsed < timeout * 2) {
869 return;
870 }
871 }
872 }
873 #endif /* __AMP__ */
874
875 uint64_t current_cycles = 0, current_instrs = 0;
876
877 #if CONFIG_CPU_COUNTERS
878 if (static_if(sched_debug_pmc)) {
879 mt_cur_cpu_cycles_instrs_speculative(¤t_cycles, ¤t_instrs);
880 }
881 #endif // CONFIG_CPU_COUNTERS
882
883 const uint64_t cycles_elapsed = current_cycles - thread->machine.intmask_cycles;
884 const uint64_t instrs_elapsed = current_instrs - thread->machine.intmask_instr;
885
886 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
887 const uint64_t timeout_ns = ((timeout * debug_cpu_performance_degradation_factor) * timebase.numer) / timebase.denom;
888 char extra_info_string[EXTRA_INFO_STRING_SIZE] = { '\0' };
889 #if CONFIG_CPU_COUNTERS
890 if (static_if(sched_debug_pmc)) {
891 const uint64_t time_elapsed_us = time_elapsed_ns / 1000;
892 const uint64_t average_freq_mhz = cycles_elapsed / time_elapsed_us;
893 const uint64_t average_cpi_whole = cycles_elapsed / instrs_elapsed;
894 const uint64_t average_cpi_fractional = ((cycles_elapsed * 100) / instrs_elapsed) % 100;
895 bool high_cpi = average_cpi_whole >= HIGH_CPI_THRESHOLD;
896 char core_kind = __ml_interrupts_disabled_cpu_kind(thread);
897 bool low_mhz = average_freq_mhz < LOW_FREQ_THRESHOLD_MHZ;
898
899 snprintf(extra_info_string, EXTRA_INFO_STRING_SIZE,
900 ", %sfreq = %llu MHz, %sCPI = %llu.%llu, CPU kind = %c",
901 low_mhz ? "low " : "",
902 average_freq_mhz,
903 high_cpi ? "high " : "",
904 average_cpi_whole,
905 average_cpi_fractional,
906 core_kind);
907 }
908 #endif // CONFIG_CPU_COUNTERS
909
910 if (is_int_handler) {
911 panic("Processing of an interrupt (type = %u, handler address = %p, vector = %p) "
912 "took %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
913 thread->machine.int_type, (void *)thread->machine.int_handler_addr, (void *)thread->machine.int_vector,
914 time_elapsed_ns, start, now, timeout_ns, extra_info_string);
915 } else {
916 panic("%s for %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
917 is_stackshot ? "Stackshot disabled interrupts" : "Interrupts held disabled",
918 time_elapsed_ns, start, now, timeout_ns, extra_info_string);
919 }
920 } else if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_TRACE) {
921 if (is_int_handler) {
922 static const uint32_t interrupt_handled_dbgid =
923 MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_HANDLED_EXPIRED);
924 DTRACE_SCHED3(interrupt_handled_dbgid, uint64_t, time_elapsed,
925 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
926 KDBG(interrupt_handled_dbgid, time_elapsed,
927 cycles_elapsed, instrs_elapsed);
928 } else {
929 static const uint32_t interrupt_masked_dbgid =
930 MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_MASKED_EXPIRED);
931 DTRACE_SCHED3(interrupt_masked_dbgid, uint64_t, time_elapsed,
932 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
933 KDBG(interrupt_masked_dbgid, time_elapsed,
934 cycles_elapsed, instrs_elapsed);
935 }
936 }
937 }
938 #endif // !defined(KASAN)
939
940 static inline void
__ml_handle_interrupts_disabled_duration(thread_t thread,uint64_t timeout,bool is_int_handler)941 __ml_handle_interrupts_disabled_duration(thread_t thread, uint64_t timeout, bool is_int_handler)
942 {
943 if (timeout == 0) {
944 return; // 0 means timeout disabled.
945 }
946 uint64_t start = is_int_handler ? thread->machine.inthandler_timestamp : thread->machine.intmask_timestamp;
947 if (start != 0) {
948 uint64_t now = ml_get_sched_hygiene_timebase();
949
950 if (interrupt_masked_debug_mode &&
951 ((now - start) > timeout * debug_cpu_performance_degradation_factor) &&
952 !thread->machine.inthandler_abandon) {
953 /*
954 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
955 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
956 */
957 #ifndef KASAN
958 __ml_trigger_interrupts_disabled_handle(thread, start, now, timeout, is_int_handler);
959 #endif
960 }
961
962 if (is_int_handler) {
963 uint64_t const duration = now - start;
964 /*
965 * No need for an atomic add, the only thread modifying
966 * this is ourselves. Other threads querying will just see
967 * either the old or the new value. (This will also just
968 * resolve to regular loads and stores on relevant
969 * platforms.)
970 */
971 uint64_t const old_duration = os_atomic_load_wide(&thread->machine.int_time_mt, relaxed);
972 os_atomic_store_wide(&thread->machine.int_time_mt, old_duration + duration, relaxed);
973 }
974 }
975 }
976
977 void
ml_handle_interrupts_disabled_duration(thread_t thread)978 ml_handle_interrupts_disabled_duration(thread_t thread)
979 {
980 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), INT_MASK_BASE);
981 }
982
983 void
ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)984 ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)
985 {
986 /* Use MAX() to let the user bump the timeout further if needed */
987 uint64_t stackshot_timeout = os_atomic_load(&stackshot_interrupt_masked_timeout, relaxed);
988 uint64_t normal_timeout = os_atomic_load(&interrupt_masked_timeout, relaxed);
989 uint64_t timeout = MAX(stackshot_timeout, normal_timeout);
990 __ml_handle_interrupts_disabled_duration(thread, timeout, INT_MASK_IS_STACKSHOT);
991 }
992
993 void
ml_handle_interrupt_handler_duration(thread_t thread)994 ml_handle_interrupt_handler_duration(thread_t thread)
995 {
996 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), INT_MASK_FROM_HANDLER);
997 }
998
999 void
ml_irq_debug_start(uintptr_t handler,uintptr_t vector)1000 ml_irq_debug_start(uintptr_t handler, uintptr_t vector)
1001 {
1002 INTERRUPT_MASKED_DEBUG_START(handler, DBG_INTR_TYPE_OTHER);
1003 current_thread()->machine.int_vector = (uintptr_t)VM_KERNEL_STRIP_PTR(vector);
1004 }
1005
1006 void
ml_irq_debug_end()1007 ml_irq_debug_end()
1008 {
1009 INTERRUPT_MASKED_DEBUG_END();
1010 }
1011
1012 /*
1013 * Abandon a potential timeout when handling an interrupt. It is important to
1014 * continue to keep track of the interrupt time so the time-stamp can't be
1015 * reset. (Interrupt time is subtracted from preemption time to maintain
1016 * accurate preemption time measurement).
1017 * When `inthandler_abandon` is true, a timeout will be ignored when the
1018 * interrupt handler finishes.
1019 */
1020 void
ml_irq_debug_abandon(void)1021 ml_irq_debug_abandon(void)
1022 {
1023 assert(!ml_get_interrupts_enabled());
1024
1025 thread_t t = current_thread();
1026 if (t->machine.inthandler_timestamp != 0) {
1027 t->machine.inthandler_abandon = true;
1028 }
1029 }
1030 #endif // SCHED_HYGIENE_DEBUG
1031
1032 #if SCHED_HYGIENE_DEBUG
1033 __attribute__((noinline))
1034 static void
ml_interrupt_masked_debug_timestamp(thread_t thread)1035 ml_interrupt_masked_debug_timestamp(thread_t thread)
1036 {
1037 thread->machine.intmask_timestamp = ml_get_sched_hygiene_timebase();
1038 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
1039 }
1040 #endif
1041
1042 boolean_t
ml_set_interrupts_enabled_with_debug(boolean_t enable,boolean_t __unused debug)1043 ml_set_interrupts_enabled_with_debug(boolean_t enable, boolean_t __unused debug)
1044 {
1045 thread_t thread;
1046 uint64_t state;
1047
1048 thread = current_thread();
1049
1050 state = __builtin_arm_rsr("DAIF");
1051
1052 if (__improbable(!(state & DAIF_DEBUGF))) {
1053 panic("%s: debug exceptions enabled in kernel mode", __func__);
1054 }
1055 if (enable && (state & DAIF_STANDARD_DISABLE)) {
1056 assert3u(state & DAIF_STANDARD_DISABLE, ==, DAIF_STANDARD_DISABLE);
1057 assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context
1058 #if SCHED_HYGIENE_DEBUG
1059 if (__probable(debug && static_if(sched_debug_interrupt_disable))) {
1060 // Interrupts are currently masked, we will enable them (after finishing this check)
1061 if (stackshot_active()) {
1062 ml_handle_stackshot_interrupt_disabled_duration(thread);
1063 } else {
1064 ml_handle_interrupts_disabled_duration(thread);
1065 }
1066 thread->machine.intmask_timestamp = 0;
1067 thread->machine.intmask_cycles = 0;
1068 thread->machine.intmask_instr = 0;
1069 }
1070 #endif // SCHED_HYGIENE_DEBUG
1071 if (get_preemption_level() == 0) {
1072 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
1073 #if __ARM_USER_PROTECT__
1074 uintptr_t up = arm_user_protect_begin(thread);
1075 #endif
1076 ast_taken_kernel();
1077 #if __ARM_USER_PROTECT__
1078 arm_user_protect_end(thread, up, FALSE);
1079 #endif
1080 }
1081 }
1082 __builtin_arm_wsr("DAIFClr", DAIFSC_STANDARD_DISABLE);
1083 } else if (!enable && ((state & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE)) {
1084 assert3u(state & DAIF_STANDARD_DISABLE, ==, 0);
1085 __builtin_arm_wsr("DAIFSet", DAIFSC_STANDARD_DISABLE);
1086
1087 #if SCHED_HYGIENE_DEBUG
1088 if (__probable(debug && static_if(sched_debug_interrupt_disable))) {
1089 // Interrupts were enabled, we just masked them
1090 ml_interrupt_masked_debug_timestamp(thread);
1091 }
1092 #endif
1093 }
1094 return (state & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE;
1095 }
1096
1097 boolean_t
ml_set_interrupts_enabled(boolean_t enable)1098 ml_set_interrupts_enabled(boolean_t enable)
1099 {
1100 return ml_set_interrupts_enabled_with_debug(enable, true);
1101 }
1102
1103 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable)1104 ml_early_set_interrupts_enabled(boolean_t enable)
1105 {
1106 return ml_set_interrupts_enabled(enable);
1107 }
1108
1109 /*
1110 * Interrupt enable function exported for AppleCLPC without
1111 * measurements enabled.
1112 *
1113 * Only for AppleCLPC!
1114 */
1115 boolean_t
sched_perfcontrol_ml_set_interrupts_without_measurement(boolean_t enable)1116 sched_perfcontrol_ml_set_interrupts_without_measurement(boolean_t enable)
1117 {
1118 return ml_set_interrupts_enabled_with_debug(enable, false);
1119 }
1120
1121 /*
1122 * Routine: ml_at_interrupt_context
1123 * Function: Check if running at interrupt context
1124 */
1125 boolean_t
ml_at_interrupt_context(void)1126 ml_at_interrupt_context(void)
1127 {
1128 /* Do not use a stack-based check here, as the top-level exception handler
1129 * is free to use some other stack besides the per-CPU interrupt stack.
1130 * Interrupts should always be disabled if we're at interrupt context.
1131 * Check that first, as we may be in a preemptible non-interrupt context, in
1132 * which case we could be migrated to a different CPU between obtaining
1133 * the per-cpu data pointer and loading cpu_int_state. We then might end
1134 * up checking the interrupt state of a different CPU, resulting in a false
1135 * positive. But if interrupts are disabled, we also know we cannot be
1136 * preempted. */
1137 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL);
1138 }
1139
1140 /*
1141 * This answers the question
1142 * "after returning from this interrupt handler with the AST_URGENT bit set,
1143 * will I end up in ast_taken_user or ast_taken_kernel?"
1144 *
1145 * If it's called in non-interrupt context (e.g. regular syscall), it should
1146 * return false.
1147 *
1148 * Must be called with interrupts disabled.
1149 */
1150 bool
ml_did_interrupt_userspace(void)1151 ml_did_interrupt_userspace(void)
1152 {
1153 assert(ml_get_interrupts_enabled() == false);
1154
1155 struct arm_saved_state *state = getCpuDatap()->cpu_int_state;
1156
1157 return state && PSR64_IS_USER(get_saved_state_cpsr(state));
1158 }
1159
1160
1161 vm_offset_t
ml_stack_remaining(void)1162 ml_stack_remaining(void)
1163 {
1164 uintptr_t local = (uintptr_t) &local;
1165 vm_offset_t intstack_top_ptr;
1166
1167 /* Since this is a stack-based check, we don't need to worry about
1168 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
1169 * then the sp should never be within any CPU's interrupt stack unless
1170 * something has gone horribly wrong. */
1171 intstack_top_ptr = getCpuDatap()->intstack_top;
1172 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
1173 return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE);
1174 } else {
1175 return local - current_thread()->kernel_stack;
1176 }
1177 }
1178
1179 static boolean_t ml_quiescing = FALSE;
1180
1181 void
ml_set_is_quiescing(boolean_t quiescing)1182 ml_set_is_quiescing(boolean_t quiescing)
1183 {
1184 assert(ml_quiescing != quiescing);
1185 ml_quiescing = quiescing;
1186 os_atomic_thread_fence(release);
1187 }
1188
1189 boolean_t
ml_is_quiescing(void)1190 ml_is_quiescing(void)
1191 {
1192 os_atomic_thread_fence(acquire);
1193 return ml_quiescing;
1194 }
1195
1196 uint64_t
ml_get_booter_memory_size(void)1197 ml_get_booter_memory_size(void)
1198 {
1199 #if CONFIG_SPTM
1200 extern uint64_t memSize;
1201 #endif /* CONFIG_SPTM */
1202 uint64_t size;
1203 uint64_t roundsize = 512 * 1024 * 1024ULL;
1204 size = BootArgs->memSizeActual;
1205 if (!size) {
1206 #if CONFIG_SPTM
1207 /*
1208 * SPTM systems cache [memSize] in a CTRR-protected variable rather
1209 * than relying on [BootArgs]. This is to enable the possibility
1210 * for XNU to modify it before machine lockdown, which happens in
1211 * KASAN kernels. If we did not do this, XNU would fault on the first
1212 * attempt to overwrite [BootArgs->memSize].
1213 */
1214 size = memSize;
1215 #else
1216 size = BootArgs->memSize;
1217 #endif /* CONFIG_SPTM */
1218 if (size < (2 * roundsize)) {
1219 roundsize >>= 1;
1220 }
1221 size = (size + roundsize - 1) & ~(roundsize - 1);
1222 }
1223
1224 #if CONFIG_SPTM
1225 size -= memSize;
1226 #else
1227 size -= BootArgs->memSize;
1228 #endif /* CONFIG_SPTM */
1229
1230 return size;
1231 }
1232
1233 uint64_t
ml_get_abstime_offset(void)1234 ml_get_abstime_offset(void)
1235 {
1236 return rtclock_base_abstime;
1237 }
1238
1239 uint64_t
ml_get_conttime_offset(void)1240 ml_get_conttime_offset(void)
1241 {
1242 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1243 return hwclock_conttime_offset;
1244 #elif HAS_CONTINUOUS_HWCLOCK
1245 return 0;
1246 #else
1247 return rtclock_base_abstime + mach_absolutetime_asleep;
1248 #endif
1249 }
1250
1251 uint64_t
ml_get_time_since_reset(void)1252 ml_get_time_since_reset(void)
1253 {
1254 #if HAS_CONTINUOUS_HWCLOCK
1255 if (wake_conttime == UINT64_MAX) {
1256 return UINT64_MAX;
1257 } else {
1258 return mach_continuous_time() - wake_conttime;
1259 }
1260 #else
1261 /* The timebase resets across S2R, so just return the raw value. */
1262 return ml_get_hwclock();
1263 #endif
1264 }
1265
1266 void
ml_set_reset_time(__unused uint64_t wake_time)1267 ml_set_reset_time(__unused uint64_t wake_time)
1268 {
1269 #if HAS_CONTINUOUS_HWCLOCK
1270 wake_conttime = wake_time;
1271 #endif
1272 }
1273
1274 uint64_t
ml_get_conttime_wake_time(void)1275 ml_get_conttime_wake_time(void)
1276 {
1277 #if HAS_CONTINUOUS_HWCLOCK
1278 /*
1279 * For now, we will reconstitute the timebase value from
1280 * cpu_timebase_init and use it as the wake time.
1281 */
1282 return wake_abstime - ml_get_abstime_offset();
1283 #else /* HAS_CONTINOUS_HWCLOCK */
1284 /* The wake time is simply our continuous time offset. */
1285 return ml_get_conttime_offset();
1286 #endif /* HAS_CONTINOUS_HWCLOCK */
1287 }
1288
1289 /*
1290 * ml_snoop_thread_is_on_core(thread_t thread)
1291 * Check if the given thread is currently on core. This function does not take
1292 * locks, disable preemption, or otherwise guarantee synchronization. The
1293 * result should be considered advisory.
1294 */
1295 bool
ml_snoop_thread_is_on_core(thread_t thread)1296 ml_snoop_thread_is_on_core(thread_t thread)
1297 {
1298 unsigned int cur_cpu_num = 0;
1299 const unsigned int max_cpu_id = ml_get_max_cpu_number();
1300
1301 for (cur_cpu_num = 0; cur_cpu_num <= max_cpu_id; cur_cpu_num++) {
1302 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) {
1303 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) {
1304 return true;
1305 }
1306 }
1307 }
1308
1309 return false;
1310 }
1311
1312 int
ml_early_cpu_max_number(void)1313 ml_early_cpu_max_number(void)
1314 {
1315 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1316 return ml_get_max_cpu_number();
1317 }
1318
1319 void
ml_set_max_cpus(unsigned int max_cpus __unused)1320 ml_set_max_cpus(unsigned int max_cpus __unused)
1321 {
1322 lck_mtx_lock(&max_cpus_lock);
1323 if (max_cpus_initialized != MAX_CPUS_SET) {
1324 if (max_cpus_initialized == MAX_CPUS_WAIT) {
1325 thread_wakeup((event_t) &max_cpus_initialized);
1326 }
1327 max_cpus_initialized = MAX_CPUS_SET;
1328 }
1329 lck_mtx_unlock(&max_cpus_lock);
1330 }
1331
1332 unsigned int
ml_wait_max_cpus(void)1333 ml_wait_max_cpus(void)
1334 {
1335 assert(lockdown_done);
1336 lck_mtx_lock(&max_cpus_lock);
1337 while (max_cpus_initialized != MAX_CPUS_SET) {
1338 max_cpus_initialized = MAX_CPUS_WAIT;
1339 lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT);
1340 }
1341 lck_mtx_unlock(&max_cpus_lock);
1342 return machine_info.max_cpus;
1343 }
1344
1345 void
ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info,cluster_type_t cluster_type)1346 ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info, cluster_type_t cluster_type)
1347 {
1348 cache_info_t *cpuid_cache_info;
1349
1350 cpuid_cache_info = cache_info_type(cluster_type);
1351 ml_cpu_info->vector_unit = 0;
1352 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
1353 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
1354 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
1355
1356 #if (__ARM_ARCH__ >= 8)
1357 ml_cpu_info->l2_settings = 1;
1358 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
1359 #else
1360 #error Unsupported arch
1361 #endif
1362 ml_cpu_info->l3_settings = 0;
1363 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
1364 }
1365
1366 /*
1367 * Routine: ml_cpu_get_info
1368 * Function: Fill out the ml_cpu_info_t structure with parameters associated
1369 * with the boot cluster.
1370 */
1371 void
ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)1372 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
1373 {
1374 ml_cpu_get_info_type(ml_cpu_info, ml_get_topology_info()->boot_cpu->cluster_type);
1375 }
1376
1377 unsigned int
ml_get_cpu_number_type(cluster_type_t cluster_type,bool logical,bool available)1378 ml_get_cpu_number_type(cluster_type_t cluster_type, bool logical, bool available)
1379 {
1380 /*
1381 * At present no supported ARM system features SMT, so the "logical"
1382 * parameter doesn't have an impact on the result.
1383 */
1384 if (logical && available) {
1385 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1386 } else if (logical && !available) {
1387 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1388 } else if (!logical && available) {
1389 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1390 } else {
1391 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1392 }
1393 }
1394
1395 void
ml_get_cluster_type_name(cluster_type_t cluster_type,char * name,size_t name_size)1396 ml_get_cluster_type_name(cluster_type_t cluster_type, char *name, size_t name_size)
1397 {
1398 strlcpy(name, cluster_type_names[cluster_type], name_size);
1399 }
1400
1401 unsigned int
ml_get_cluster_number_type(cluster_type_t cluster_type)1402 ml_get_cluster_number_type(cluster_type_t cluster_type)
1403 {
1404 return ml_get_topology_info()->cluster_type_num_clusters[cluster_type];
1405 }
1406
1407 unsigned int
ml_cpu_cache_sharing(unsigned int level,cluster_type_t cluster_type,bool include_all_cpu_types __unused)1408 ml_cpu_cache_sharing(unsigned int level, cluster_type_t cluster_type, bool include_all_cpu_types __unused)
1409 {
1410 unsigned int cpu_number = 0, cluster_types = 0;
1411
1412 /*
1413 * Level 0 corresponds to main memory, which is shared across all cores.
1414 */
1415 if (level == 0) {
1416 return ml_get_topology_info()->num_cpus;
1417 }
1418
1419 /*
1420 * At present no supported ARM system features more than 2 levels of caches.
1421 */
1422 if (level > 2) {
1423 return 0;
1424 }
1425
1426 /*
1427 * L1 caches are always per core.
1428 */
1429 if (level == 1) {
1430 return 1;
1431 }
1432
1433 cluster_types = (1 << cluster_type);
1434
1435 /*
1436 * Traverse clusters until we find the one(s) of the desired type(s).
1437 */
1438 for (int i = 0; i < ml_get_topology_info()->num_clusters; i++) {
1439 ml_topology_cluster_t *cluster = &ml_get_topology_info()->clusters[i];
1440 if ((1 << cluster->cluster_type) & cluster_types) {
1441 cpu_number += cluster->num_cpus;
1442 cluster_types &= ~(1 << cluster->cluster_type);
1443 if (!cluster_types) {
1444 break;
1445 }
1446 }
1447 }
1448
1449 return cpu_number;
1450 }
1451
1452 unsigned int
ml_get_cpu_types(void)1453 ml_get_cpu_types(void)
1454 {
1455 return ml_get_topology_info()->cluster_types;
1456 }
1457
1458 void
machine_conf(void)1459 machine_conf(void)
1460 {
1461 /*
1462 * This is known to be inaccurate. mem_size should always be capped at 2 GB
1463 */
1464 machine_info.memory_size = (uint32_t)mem_size;
1465
1466 // rdar://problem/58285685: Userland expects _COMM_PAGE_LOGICAL_CPUS to report
1467 // (max_cpu_id+1) rather than a literal *count* of logical CPUs.
1468 unsigned int num_cpus = ml_get_topology_info()->max_cpu_id + 1;
1469 machine_info.max_cpus = num_cpus;
1470 machine_info.physical_cpu_max = num_cpus;
1471 machine_info.logical_cpu_max = num_cpus;
1472 }
1473
1474 void
machine_init(void)1475 machine_init(void)
1476 {
1477 debug_log_init();
1478 clock_config();
1479 is_clock_configured = TRUE;
1480 if (debug_enabled) {
1481 pmap_map_globals();
1482 }
1483 ml_lockdown_init();
1484 }
1485