1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpuid_internal.h>
33 #include <arm/cpu_data.h>
34 #include <arm/cpu_data_internal.h>
35 #include <arm/misc_protos.h>
36 #include <arm/machdep_call.h>
37 #include <arm/machine_routines.h>
38 #include <arm/rtclock.h>
39 #include <kern/machine.h>
40 #include <kern/thread.h>
41 #include <kern/thread_group.h>
42 #include <kern/policy_internal.h>
43 #include <kern/sched_hygiene.h>
44 #include <kern/startup.h>
45 #include <kern/monotonic.h>
46 #include <machine/config.h>
47 #include <machine/atomic.h>
48 #include <machine/monotonic.h>
49 #include <pexpert/pexpert.h>
50 #include <pexpert/device_tree.h>
51 #include <pexpert/arm64/apple_arm64_cpu.h>
52
53 #include <mach/machine.h>
54 #include <mach/machine/sdt.h>
55
56 #if !HAS_CONTINUOUS_HWCLOCK
57 extern uint64_t mach_absolutetime_asleep;
58 #else
59 extern uint64_t wake_abstime;
60 static uint64_t wake_conttime = UINT64_MAX;
61 #endif
62
63 extern volatile uint32_t debug_enabled;
64 extern _Atomic unsigned int cluster_type_num_active_cpus[MAX_CPU_TYPES];
65 const char *cluster_type_names[MAX_CPU_TYPES] = {
66 [CLUSTER_TYPE_SMP] = "Standard",
67 [CLUSTER_TYPE_P] = "Performance",
68 [CLUSTER_TYPE_E] = "Efficiency",
69 };
70
71 static int max_cpus_initialized = 0;
72 #define MAX_CPUS_SET 0x1
73 #define MAX_CPUS_WAIT 0x2
74
75 LCK_GRP_DECLARE(max_cpus_grp, "max_cpus");
76 LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp);
77 uint32_t lockdown_done = 0;
78 boolean_t is_clock_configured = FALSE;
79
80
81 static void
sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused,going_on_core_t on __unused)82 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
83 {
84 }
85
86 static void
sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused,perfcontrol_state_t new_thread_state __unused)87 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
88 {
89 }
90
91 static void
sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused,going_off_core_t off __unused,boolean_t thread_terminating __unused)92 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
93 {
94 }
95
96 static void
sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)97 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
98 {
99 }
100
101 static void
sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)102 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
103 {
104 }
105
106 static void
sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_t work_interval __unused)107 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
108 perfcontrol_work_interval_t work_interval __unused)
109 {
110 }
111
112 static void
sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_instance_t instance __unused)113 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
114 perfcontrol_work_interval_instance_t instance __unused)
115 {
116 }
117
118 static void
sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)119 sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
120 {
121 }
122
123 static void
sched_perfcontrol_csw_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * offcore,__unused struct perfcontrol_thread_data * oncore,__unused struct perfcontrol_cpu_counters * cpu_counters,__unused void * unused)124 sched_perfcontrol_csw_default(
125 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
126 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
127 __unused struct perfcontrol_thread_data *oncore,
128 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
129 {
130 }
131
132 static void
sched_perfcontrol_state_update_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * thr_data,__unused void * unused)133 sched_perfcontrol_state_update_default(
134 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
135 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
136 __unused void *unused)
137 {
138 }
139
140 static void
sched_perfcontrol_thread_group_blocked_default(__unused thread_group_data_t blocked_tg,__unused thread_group_data_t blocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t blocked_thr_state)141 sched_perfcontrol_thread_group_blocked_default(
142 __unused thread_group_data_t blocked_tg, __unused thread_group_data_t blocking_tg,
143 __unused uint32_t flags, __unused perfcontrol_state_t blocked_thr_state)
144 {
145 }
146
147 static void
sched_perfcontrol_thread_group_unblocked_default(__unused thread_group_data_t unblocked_tg,__unused thread_group_data_t unblocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t unblocked_thr_state)148 sched_perfcontrol_thread_group_unblocked_default(
149 __unused thread_group_data_t unblocked_tg, __unused thread_group_data_t unblocking_tg,
150 __unused uint32_t flags, __unused perfcontrol_state_t unblocked_thr_state)
151 {
152 }
153
154 sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
155 sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
156 sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
157 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
158 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
159 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
160 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
161 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
162 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
163 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
164 sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
165 sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
166 sched_perfcontrol_thread_group_blocked_t sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
167 sched_perfcontrol_thread_group_unblocked_t sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
168 boolean_t sched_perfcontrol_thread_shared_rsrc_flags_enabled = false;
169
170 void
sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks,unsigned long size_of_state)171 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
172 {
173 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
174
175 if (size_of_state > sizeof(struct perfcontrol_state)) {
176 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
177 }
178
179 if (callbacks) {
180 #if CONFIG_THREAD_GROUPS
181 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_3) {
182 if (callbacks->thread_group_init != NULL) {
183 sched_perfcontrol_thread_group_init = callbacks->thread_group_init;
184 } else {
185 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
186 }
187 if (callbacks->thread_group_deinit != NULL) {
188 sched_perfcontrol_thread_group_deinit = callbacks->thread_group_deinit;
189 } else {
190 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
191 }
192 // tell CLPC about existing thread groups
193 thread_group_resync(TRUE);
194 }
195
196 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_6) {
197 if (callbacks->thread_group_flags_update != NULL) {
198 sched_perfcontrol_thread_group_flags_update = callbacks->thread_group_flags_update;
199 } else {
200 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
201 }
202 }
203
204 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_8) {
205 if (callbacks->thread_group_blocked != NULL) {
206 sched_perfcontrol_thread_group_blocked = callbacks->thread_group_blocked;
207 } else {
208 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
209 }
210
211 if (callbacks->thread_group_unblocked != NULL) {
212 sched_perfcontrol_thread_group_unblocked = callbacks->thread_group_unblocked;
213 } else {
214 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
215 }
216 }
217 #endif
218 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_9) {
219 sched_perfcontrol_thread_shared_rsrc_flags_enabled = true;
220 }
221
222 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
223 if (callbacks->work_interval_ctl != NULL) {
224 sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
225 } else {
226 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
227 }
228 }
229
230 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
231 if (callbacks->csw != NULL) {
232 sched_perfcontrol_csw = callbacks->csw;
233 } else {
234 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
235 }
236
237 if (callbacks->state_update != NULL) {
238 sched_perfcontrol_state_update = callbacks->state_update;
239 } else {
240 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
241 }
242 }
243
244 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
245 if (callbacks->deadline_passed != NULL) {
246 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
247 } else {
248 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
249 }
250 }
251
252 if (callbacks->offcore != NULL) {
253 sched_perfcontrol_offcore = callbacks->offcore;
254 } else {
255 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
256 }
257
258 if (callbacks->context_switch != NULL) {
259 sched_perfcontrol_switch = callbacks->context_switch;
260 } else {
261 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
262 }
263
264 if (callbacks->oncore != NULL) {
265 sched_perfcontrol_oncore = callbacks->oncore;
266 } else {
267 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
268 }
269
270 if (callbacks->max_runnable_latency != NULL) {
271 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
272 } else {
273 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
274 }
275
276 if (callbacks->work_interval_notify != NULL) {
277 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
278 } else {
279 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
280 }
281 } else {
282 /* reset to defaults */
283 #if CONFIG_THREAD_GROUPS
284 thread_group_resync(FALSE);
285 #endif
286 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
287 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
288 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
289 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
290 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
291 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
292 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
293 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
294 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
295 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
296 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
297 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
298 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
299 }
300 }
301
302
303 static void
machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data * data,thread_t thread,uint64_t same_pri_latency)304 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
305 thread_t thread,
306 uint64_t same_pri_latency)
307 {
308 bzero(data, sizeof(struct perfcontrol_thread_data));
309 data->perfctl_class = thread_get_perfcontrol_class(thread);
310 data->energy_estimate_nj = 0;
311 data->thread_id = thread->thread_id;
312 #if CONFIG_THREAD_GROUPS
313 struct thread_group *tg = thread_group_get(thread);
314 data->thread_group_id = thread_group_get_id(tg);
315 data->thread_group_data = thread_group_get_machine_data(tg);
316 #endif
317 data->scheduling_latency_at_same_basepri = same_pri_latency;
318 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
319 }
320
321 static void
machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters * cpu_counters)322 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
323 {
324 #if CONFIG_CPU_COUNTERS
325 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
326 #else /* CONFIG_CPU_COUNTERS */
327 cpu_counters->instructions = 0;
328 cpu_counters->cycles = 0;
329 #endif /* !CONFIG_CPU_COUNTERS */
330 }
331
332 int perfcontrol_callout_stats_enabled = 0;
333 static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
334 static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
335
336 #if CONFIG_CPU_COUNTERS
337 static inline
338 bool
perfcontrol_callout_counters_begin(uint64_t * counters)339 perfcontrol_callout_counters_begin(uint64_t *counters)
340 {
341 if (!perfcontrol_callout_stats_enabled) {
342 return false;
343 }
344 mt_fixed_counts(counters);
345 return true;
346 }
347
348 static inline
349 void
perfcontrol_callout_counters_end(uint64_t * start_counters,perfcontrol_callout_type_t type)350 perfcontrol_callout_counters_end(uint64_t *start_counters,
351 perfcontrol_callout_type_t type)
352 {
353 uint64_t end_counters[MT_CORE_NFIXED];
354 mt_fixed_counts(end_counters);
355 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
356 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed);
357 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
358 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed);
359 os_atomic_inc(&perfcontrol_callout_count[type], relaxed);
360 }
361 #endif /* CONFIG_CPU_COUNTERS */
362
363 uint64_t
perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,perfcontrol_callout_stat_t stat)364 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
365 perfcontrol_callout_stat_t stat)
366 {
367 if (!perfcontrol_callout_stats_enabled) {
368 return 0;
369 }
370 return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) /
371 os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed);
372 }
373
374
375
376 #if CONFIG_SCHED_EDGE
377
378 /*
379 * The Edge scheduler allows the performance controller to update properties about the
380 * threads as part of the callouts. These properties typically include shared cluster
381 * resource usage. This allows the scheduler to manage specific threads within the
382 * workload more optimally.
383 */
384 static void
sched_perfcontrol_thread_flags_update(thread_t thread,struct perfcontrol_thread_data * thread_data,shared_rsrc_policy_agent_t agent)385 sched_perfcontrol_thread_flags_update(thread_t thread,
386 struct perfcontrol_thread_data *thread_data,
387 shared_rsrc_policy_agent_t agent)
388 {
389 kern_return_t kr = KERN_SUCCESS;
390 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
391 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
392 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
393 } else {
394 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
395 }
396 }
397 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
398 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
399 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
400 } else {
401 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
402 }
403 }
404 /*
405 * The thread_shared_rsrc_policy_* routines only fail if the performance controller is
406 * attempting to double set/clear a policy on the thread.
407 */
408 assert(kr == KERN_SUCCESS);
409 }
410
411 #endif /* CONFIG_SCHED_EDGE */
412
413 void
machine_switch_perfcontrol_context(perfcontrol_event event,uint64_t timestamp,uint32_t flags,uint64_t new_thread_same_pri_latency,thread_t old,thread_t new)414 machine_switch_perfcontrol_context(perfcontrol_event event,
415 uint64_t timestamp,
416 uint32_t flags,
417 uint64_t new_thread_same_pri_latency,
418 thread_t old,
419 thread_t new)
420 {
421
422 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
423 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
424 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
425 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
426 }
427
428 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
429 uint32_t cpu_id = (uint32_t)cpu_number();
430 struct perfcontrol_cpu_counters cpu_counters;
431 struct perfcontrol_thread_data offcore, oncore;
432 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
433 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
434 new_thread_same_pri_latency);
435 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
436
437 #if CONFIG_CPU_COUNTERS
438 uint64_t counters[MT_CORE_NFIXED];
439 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
440 #endif /* CONFIG_CPU_COUNTERS */
441 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
442 &offcore, &oncore, &cpu_counters, NULL);
443 #if CONFIG_CPU_COUNTERS
444 if (ctrs_enabled) {
445 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
446 }
447 #endif /* CONFIG_CPU_COUNTERS */
448
449 recount_add_energy(old, get_threadtask(old),
450 offcore.energy_estimate_nj);
451
452 #if CONFIG_SCHED_EDGE
453 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled) {
454 sched_perfcontrol_thread_flags_update(old, &offcore, SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
455 }
456 #endif /* CONFIG_SCHED_EDGE */
457 }
458 }
459
460 void
machine_switch_perfcontrol_state_update(perfcontrol_event event,uint64_t timestamp,uint32_t flags,thread_t thread)461 machine_switch_perfcontrol_state_update(perfcontrol_event event,
462 uint64_t timestamp,
463 uint32_t flags,
464 thread_t thread)
465 {
466
467 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) {
468 return;
469 }
470 uint32_t cpu_id = (uint32_t)cpu_number();
471 struct perfcontrol_thread_data data;
472 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
473
474 #if CONFIG_CPU_COUNTERS
475 uint64_t counters[MT_CORE_NFIXED];
476 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
477 #endif /* CONFIG_CPU_COUNTERS */
478 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
479 &data, NULL);
480 #if CONFIG_CPU_COUNTERS
481 if (ctrs_enabled) {
482 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
483 }
484 #endif /* CONFIG_CPU_COUNTERS */
485
486 #if CONFIG_PERVASIVE_ENERGY
487 recount_add_energy(thread, get_threadtask(thread), data.energy_estimate_nj);
488 #endif /* CONFIG_PERVASIVE_ENERGY */
489
490 #if CONFIG_SCHED_EDGE
491 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled && (event == QUANTUM_EXPIRY)) {
492 sched_perfcontrol_thread_flags_update(thread, &data, SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM);
493 } else {
494 assert(data.thread_flags_mask == 0);
495 }
496 #endif /* CONFIG_SCHED_EDGE */
497 }
498
499 void
machine_thread_going_on_core(thread_t new_thread,thread_urgency_t urgency,uint64_t sched_latency,uint64_t same_pri_latency,uint64_t timestamp)500 machine_thread_going_on_core(thread_t new_thread,
501 thread_urgency_t urgency,
502 uint64_t sched_latency,
503 uint64_t same_pri_latency,
504 uint64_t timestamp)
505 {
506 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) {
507 return;
508 }
509 struct going_on_core on_core;
510 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
511
512 on_core.thread_id = new_thread->thread_id;
513 on_core.energy_estimate_nj = 0;
514 on_core.qos_class = (uint16_t)proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
515 on_core.urgency = (uint16_t)urgency;
516 on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
517 on_core.is_kernel_thread = get_threadtask(new_thread) == kernel_task;
518 #if CONFIG_THREAD_GROUPS
519 struct thread_group *tg = thread_group_get(new_thread);
520 on_core.thread_group_id = thread_group_get_id(tg);
521 on_core.thread_group_data = thread_group_get_machine_data(tg);
522 #endif
523 on_core.scheduling_latency = sched_latency;
524 on_core.start_time = timestamp;
525 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
526
527 #if CONFIG_CPU_COUNTERS
528 uint64_t counters[MT_CORE_NFIXED];
529 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
530 #endif /* CONFIG_CPU_COUNTERS */
531 sched_perfcontrol_oncore(state, &on_core);
532 #if CONFIG_CPU_COUNTERS
533 if (ctrs_enabled) {
534 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
535 }
536 #endif /* CONFIG_CPU_COUNTERS */
537 }
538
539 void
machine_thread_going_off_core(thread_t old_thread,boolean_t thread_terminating,uint64_t last_dispatch,__unused boolean_t thread_runnable)540 machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating,
541 uint64_t last_dispatch, __unused boolean_t thread_runnable)
542 {
543 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) {
544 return;
545 }
546 struct going_off_core off_core;
547 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
548
549 off_core.thread_id = old_thread->thread_id;
550 off_core.energy_estimate_nj = 0;
551 off_core.end_time = last_dispatch;
552 #if CONFIG_THREAD_GROUPS
553 struct thread_group *tg = thread_group_get(old_thread);
554 off_core.thread_group_id = thread_group_get_id(tg);
555 off_core.thread_group_data = thread_group_get_machine_data(tg);
556 #endif
557
558 #if CONFIG_CPU_COUNTERS
559 uint64_t counters[MT_CORE_NFIXED];
560 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
561 #endif /* CONFIG_CPU_COUNTERS */
562 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
563 #if CONFIG_CPU_COUNTERS
564 if (ctrs_enabled) {
565 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
566 }
567 #endif /* CONFIG_CPU_COUNTERS */
568 }
569
570 #if CONFIG_THREAD_GROUPS
571 void
machine_thread_group_init(struct thread_group * tg)572 machine_thread_group_init(struct thread_group *tg)
573 {
574 if (sched_perfcontrol_thread_group_init == sched_perfcontrol_thread_group_default) {
575 return;
576 }
577 struct thread_group_data data;
578 data.thread_group_id = thread_group_get_id(tg);
579 data.thread_group_data = thread_group_get_machine_data(tg);
580 data.thread_group_size = thread_group_machine_data_size();
581 data.thread_group_flags = thread_group_get_flags(tg);
582 sched_perfcontrol_thread_group_init(&data);
583 }
584
585 void
machine_thread_group_deinit(struct thread_group * tg)586 machine_thread_group_deinit(struct thread_group *tg)
587 {
588 if (sched_perfcontrol_thread_group_deinit == sched_perfcontrol_thread_group_default) {
589 return;
590 }
591 struct thread_group_data data;
592 data.thread_group_id = thread_group_get_id(tg);
593 data.thread_group_data = thread_group_get_machine_data(tg);
594 data.thread_group_size = thread_group_machine_data_size();
595 data.thread_group_flags = thread_group_get_flags(tg);
596 sched_perfcontrol_thread_group_deinit(&data);
597 }
598
599 void
machine_thread_group_flags_update(struct thread_group * tg,uint32_t flags)600 machine_thread_group_flags_update(struct thread_group *tg, uint32_t flags)
601 {
602 if (sched_perfcontrol_thread_group_flags_update == sched_perfcontrol_thread_group_default) {
603 return;
604 }
605 struct thread_group_data data;
606 data.thread_group_id = thread_group_get_id(tg);
607 data.thread_group_data = thread_group_get_machine_data(tg);
608 data.thread_group_size = thread_group_machine_data_size();
609 data.thread_group_flags = flags;
610 sched_perfcontrol_thread_group_flags_update(&data);
611 }
612
613 void
machine_thread_group_blocked(struct thread_group * blocked_tg,struct thread_group * blocking_tg,uint32_t flags,thread_t blocked_thread)614 machine_thread_group_blocked(struct thread_group *blocked_tg,
615 struct thread_group *blocking_tg,
616 uint32_t flags,
617 thread_t blocked_thread)
618 {
619 if (sched_perfcontrol_thread_group_blocked == sched_perfcontrol_thread_group_blocked_default) {
620 return;
621 }
622
623 spl_t s = splsched();
624
625 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(blocked_thread);
626 struct thread_group_data blocked_data;
627 assert(blocked_tg != NULL);
628
629 blocked_data.thread_group_id = thread_group_get_id(blocked_tg);
630 blocked_data.thread_group_data = thread_group_get_machine_data(blocked_tg);
631 blocked_data.thread_group_size = thread_group_machine_data_size();
632
633 if (blocking_tg == NULL) {
634 /*
635 * For special cases such as the render server, the blocking TG is a
636 * well known TG. Only in that case, the blocking_tg should be NULL.
637 */
638 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
639 sched_perfcontrol_thread_group_blocked(&blocked_data, NULL, flags, state);
640 } else {
641 struct thread_group_data blocking_data;
642 blocking_data.thread_group_id = thread_group_get_id(blocking_tg);
643 blocking_data.thread_group_data = thread_group_get_machine_data(blocking_tg);
644 blocking_data.thread_group_size = thread_group_machine_data_size();
645 sched_perfcontrol_thread_group_blocked(&blocked_data, &blocking_data, flags, state);
646 }
647 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_START,
648 thread_tid(blocked_thread), thread_group_get_id(blocked_tg),
649 blocking_tg ? thread_group_get_id(blocking_tg) : THREAD_GROUP_INVALID,
650 flags);
651
652 splx(s);
653 }
654
655 void
machine_thread_group_unblocked(struct thread_group * unblocked_tg,struct thread_group * unblocking_tg,uint32_t flags,thread_t unblocked_thread)656 machine_thread_group_unblocked(struct thread_group *unblocked_tg,
657 struct thread_group *unblocking_tg,
658 uint32_t flags,
659 thread_t unblocked_thread)
660 {
661 if (sched_perfcontrol_thread_group_unblocked == sched_perfcontrol_thread_group_unblocked_default) {
662 return;
663 }
664
665 spl_t s = splsched();
666
667 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(unblocked_thread);
668 struct thread_group_data unblocked_data;
669 assert(unblocked_tg != NULL);
670
671 unblocked_data.thread_group_id = thread_group_get_id(unblocked_tg);
672 unblocked_data.thread_group_data = thread_group_get_machine_data(unblocked_tg);
673 unblocked_data.thread_group_size = thread_group_machine_data_size();
674
675 if (unblocking_tg == NULL) {
676 /*
677 * For special cases such as the render server, the unblocking TG is a
678 * well known TG. Only in that case, the unblocking_tg should be NULL.
679 */
680 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
681 sched_perfcontrol_thread_group_unblocked(&unblocked_data, NULL, flags, state);
682 } else {
683 struct thread_group_data unblocking_data;
684 unblocking_data.thread_group_id = thread_group_get_id(unblocking_tg);
685 unblocking_data.thread_group_data = thread_group_get_machine_data(unblocking_tg);
686 unblocking_data.thread_group_size = thread_group_machine_data_size();
687 sched_perfcontrol_thread_group_unblocked(&unblocked_data, &unblocking_data, flags, state);
688 }
689 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_END,
690 thread_tid(unblocked_thread), thread_group_get_id(unblocked_tg),
691 unblocking_tg ? thread_group_get_id(unblocking_tg) : THREAD_GROUP_INVALID,
692 flags);
693
694 splx(s);
695 }
696
697 #endif /* CONFIG_THREAD_GROUPS */
698
699 void
machine_max_runnable_latency(uint64_t bg_max_latency,uint64_t default_max_latency,uint64_t realtime_max_latency)700 machine_max_runnable_latency(uint64_t bg_max_latency,
701 uint64_t default_max_latency,
702 uint64_t realtime_max_latency)
703 {
704 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) {
705 return;
706 }
707 struct perfcontrol_max_runnable_latency latencies = {
708 .max_scheduling_latencies = {
709 [THREAD_URGENCY_NONE] = 0,
710 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
711 [THREAD_URGENCY_NORMAL] = default_max_latency,
712 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
713 }
714 };
715
716 sched_perfcontrol_max_runnable_latency(&latencies);
717 }
718
719 void
machine_work_interval_notify(thread_t thread,struct kern_work_interval_args * kwi_args)720 machine_work_interval_notify(thread_t thread,
721 struct kern_work_interval_args* kwi_args)
722 {
723 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) {
724 return;
725 }
726 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
727 struct perfcontrol_work_interval work_interval = {
728 .thread_id = thread->thread_id,
729 .qos_class = (uint16_t)proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
730 .urgency = kwi_args->urgency,
731 .flags = kwi_args->notify_flags,
732 .work_interval_id = kwi_args->work_interval_id,
733 .start = kwi_args->start,
734 .finish = kwi_args->finish,
735 .deadline = kwi_args->deadline,
736 .next_start = kwi_args->next_start,
737 .create_flags = kwi_args->create_flags,
738 };
739 #if CONFIG_THREAD_GROUPS
740 struct thread_group *tg;
741 tg = thread_group_get(thread);
742 work_interval.thread_group_id = thread_group_get_id(tg);
743 work_interval.thread_group_data = thread_group_get_machine_data(tg);
744 #endif
745 sched_perfcontrol_work_interval_notify(state, &work_interval);
746 }
747
748
749 void
machine_perfcontrol_deadline_passed(uint64_t deadline)750 machine_perfcontrol_deadline_passed(uint64_t deadline)
751 {
752 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) {
753 sched_perfcontrol_deadline_passed(deadline);
754 }
755 }
756
757 #if SCHED_HYGIENE_DEBUG
758
759 __options_decl(int_mask_hygiene_flags_t, uint8_t, {
760 INT_MASK_BASE = 0x00,
761 INT_MASK_FROM_HANDLER = 0x01,
762 INT_MASK_IS_STACKSHOT = 0x02,
763 });
764
765 /*
766 * ml_spin_debug_reset()
767 * Reset the timestamp on a thread that has been unscheduled
768 * to avoid false alarms. Alarm will go off if interrupts are held
769 * disabled for too long, starting from now.
770 *
771 * Call ml_get_timebase() directly to prevent extra overhead on newer
772 * platforms that's enabled in DEVELOPMENT kernel configurations.
773 */
774 void
ml_spin_debug_reset(thread_t thread)775 ml_spin_debug_reset(thread_t thread)
776 {
777 if (thread->machine.intmask_timestamp) {
778 thread->machine.intmask_timestamp = ml_get_sched_hygiene_timebase();
779 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
780 }
781 }
782
783 /*
784 * ml_spin_debug_clear()
785 * Clear the timestamp and cycle/instruction counts on a thread that
786 * has been unscheduled to avoid false alarms
787 */
788 void
ml_spin_debug_clear(thread_t thread)789 ml_spin_debug_clear(thread_t thread)
790 {
791 thread->machine.intmask_timestamp = 0;
792 thread->machine.intmask_cycles = 0;
793 thread->machine.intmask_instr = 0;
794 }
795
796 /*
797 * ml_spin_debug_clear_self()
798 * Clear the timestamp on the current thread to prevent
799 * false alarms
800 */
801 void
ml_spin_debug_clear_self(void)802 ml_spin_debug_clear_self(void)
803 {
804 ml_spin_debug_clear(current_thread());
805 }
806
807 #ifndef KASAN
808
809 /*
810 * Get a character representing the provided thread's kind of CPU.
811 */
812 #if !CONFIG_CPU_COUNTERS
813 __unused
814 #endif // !CONFIG_CPU_COUNTERS
815 static char
__ml_interrupts_disabled_cpu_kind(thread_t thread)816 __ml_interrupts_disabled_cpu_kind(thread_t thread)
817 {
818 #if __AMP__
819 processor_t processor = thread->last_processor;
820 if (!processor) {
821 return '!';
822 }
823 switch (processor->processor_set->pset_cluster_type) {
824 case PSET_AMP_P:
825 return 'P';
826 case PSET_AMP_E:
827 return 'E';
828 default:
829 return '?';
830 }
831 #else // __AMP__
832 #pragma unused(thread)
833 return '-';
834 #endif // !__AMP__
835 }
836
837 #define EXTRA_INFO_STRING_SIZE 256
838 #define LOW_FREQ_THRESHOLD_MHZ 500
839 #define HIGH_CPI_THRESHOLD 3
840
841 static void
__ml_trigger_interrupts_disabled_handle(thread_t thread,uint64_t start,uint64_t now,uint64_t timeout,int_mask_hygiene_flags_t flags)842 __ml_trigger_interrupts_disabled_handle(thread_t thread, uint64_t start, uint64_t now, uint64_t timeout, int_mask_hygiene_flags_t flags)
843 {
844 mach_timebase_info_data_t timebase;
845 clock_timebase_info(&timebase);
846 bool is_int_handler = flags & INT_MASK_FROM_HANDLER;
847 bool is_stackshot = flags & INT_MASK_IS_STACKSHOT;
848
849 const uint64_t time_elapsed = now - start;
850 const uint64_t time_elapsed_ns = (time_elapsed * timebase.numer) / timebase.denom;
851
852 uint64_t current_cycles = 0, current_instrs = 0;
853
854 #if CONFIG_CPU_COUNTERS
855 if (sched_hygiene_debug_pmc) {
856 mt_cur_cpu_cycles_instrs_speculative(¤t_cycles, ¤t_instrs);
857 }
858 #endif // CONFIG_CPU_COUNTERS
859
860 const uint64_t cycles_elapsed = current_cycles - thread->machine.intmask_cycles;
861 const uint64_t instrs_elapsed = current_instrs - thread->machine.intmask_instr;
862
863 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
864 const uint64_t timeout_ns = ((timeout * debug_cpu_performance_degradation_factor) * timebase.numer) / timebase.denom;
865 char extra_info_string[EXTRA_INFO_STRING_SIZE] = { '\0' };
866 #if CONFIG_CPU_COUNTERS
867 if (sched_hygiene_debug_pmc) {
868 const uint64_t time_elapsed_us = time_elapsed_ns / 1000;
869 const uint64_t average_freq_mhz = cycles_elapsed / time_elapsed_us;
870 const uint64_t average_cpi_whole = cycles_elapsed / instrs_elapsed;
871 const uint64_t average_cpi_fractional = ((cycles_elapsed * 100) / instrs_elapsed) % 100;
872 bool high_cpi = average_cpi_whole >= HIGH_CPI_THRESHOLD;
873 char core_kind = __ml_interrupts_disabled_cpu_kind(thread);
874 bool low_mhz = average_freq_mhz < LOW_FREQ_THRESHOLD_MHZ;
875
876 snprintf(extra_info_string, EXTRA_INFO_STRING_SIZE,
877 ", %sfreq = %llu MHz, %sCPI = %llu.%llu, CPU kind = %c",
878 low_mhz ? "low " : "",
879 average_freq_mhz,
880 high_cpi ? "high " : "",
881 average_cpi_whole,
882 average_cpi_fractional,
883 core_kind);
884 }
885 #endif // CONFIG_CPU_COUNTERS
886
887 if (is_int_handler) {
888 panic("Processing of an interrupt (type = %u, handler address = %p, vector = %p) "
889 "took %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
890 thread->machine.int_type, (void *)thread->machine.int_handler_addr, (void *)thread->machine.int_vector,
891 time_elapsed_ns, start, now, timeout_ns, extra_info_string);
892 } else {
893 panic("%s for %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
894 is_stackshot ? "Stackshot disabled interrupts" : "Interrupts held disabled",
895 time_elapsed_ns, start, now, timeout_ns, extra_info_string);
896 }
897 } else if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_TRACE) {
898 if (is_int_handler) {
899 static const uint32_t interrupt_handled_dbgid =
900 MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_HANDLED_EXPIRED);
901 DTRACE_SCHED3(interrupt_handled_dbgid, uint64_t, time_elapsed,
902 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
903 KDBG(interrupt_handled_dbgid, time_elapsed,
904 cycles_elapsed, instrs_elapsed);
905 } else {
906 static const uint32_t interrupt_masked_dbgid =
907 MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_MASKED_EXPIRED);
908 DTRACE_SCHED3(interrupt_masked_dbgid, uint64_t, time_elapsed,
909 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
910 KDBG(interrupt_masked_dbgid, time_elapsed,
911 cycles_elapsed, instrs_elapsed);
912 }
913 }
914 }
915 #endif // !defined(KASAN)
916
917 static inline void
__ml_handle_interrupts_disabled_duration(thread_t thread,uint64_t timeout,bool is_int_handler)918 __ml_handle_interrupts_disabled_duration(thread_t thread, uint64_t timeout, bool is_int_handler)
919 {
920 if (timeout == 0) {
921 return; // 0 means timeout disabled.
922 }
923 uint64_t start = is_int_handler ? thread->machine.inthandler_timestamp : thread->machine.intmask_timestamp;
924 if (start != 0) {
925 uint64_t now = ml_get_sched_hygiene_timebase();
926
927 if (interrupt_masked_debug_mode &&
928 ((now - start) > timeout * debug_cpu_performance_degradation_factor) &&
929 !thread->machine.inthandler_abandon) {
930 /*
931 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
932 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
933 */
934 #ifndef KASAN
935 __ml_trigger_interrupts_disabled_handle(thread, start, now, timeout, is_int_handler);
936 #endif
937 }
938
939 if (is_int_handler) {
940 uint64_t const duration = now - start;
941 /*
942 * No need for an atomic add, the only thread modifying
943 * this is ourselves. Other threads querying will just see
944 * either the old or the new value. (This will also just
945 * resolve to regular loads and stores on relevant
946 * platforms.)
947 */
948 uint64_t const old_duration = os_atomic_load_wide(&thread->machine.int_time_mt, relaxed);
949 os_atomic_store_wide(&thread->machine.int_time_mt, old_duration + duration, relaxed);
950 }
951 }
952 }
953
954 void
ml_handle_interrupts_disabled_duration(thread_t thread)955 ml_handle_interrupts_disabled_duration(thread_t thread)
956 {
957 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), INT_MASK_BASE);
958 }
959
960 void
ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)961 ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)
962 {
963 /* Use MAX() to let the user bump the timeout further if needed */
964 uint64_t stackshot_timeout = os_atomic_load(&stackshot_interrupt_masked_timeout, relaxed);
965 uint64_t normal_timeout = os_atomic_load(&interrupt_masked_timeout, relaxed);
966 uint64_t timeout = MAX(stackshot_timeout, normal_timeout);
967 __ml_handle_interrupts_disabled_duration(thread, timeout, INT_MASK_IS_STACKSHOT);
968 }
969
970 void
ml_handle_interrupt_handler_duration(thread_t thread)971 ml_handle_interrupt_handler_duration(thread_t thread)
972 {
973 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), INT_MASK_FROM_HANDLER);
974 }
975
976 void
ml_irq_debug_start(uintptr_t handler,uintptr_t vector)977 ml_irq_debug_start(uintptr_t handler, uintptr_t vector)
978 {
979 INTERRUPT_MASKED_DEBUG_START(handler, DBG_INTR_TYPE_OTHER);
980 current_thread()->machine.int_vector = (uintptr_t)VM_KERNEL_STRIP_PTR(vector);
981 }
982
983 void
ml_irq_debug_end()984 ml_irq_debug_end()
985 {
986 INTERRUPT_MASKED_DEBUG_END();
987 }
988
989 /*
990 * Abandon a potential timeout when handling an interrupt. It is important to
991 * continue to keep track of the interrupt time so the time-stamp can't be
992 * reset. (Interrupt time is subtracted from preemption time to maintain
993 * accurate preemption time measurement).
994 * When `inthandler_abandon` is true, a timeout will be ignored when the
995 * interrupt handler finishes.
996 */
997 void
ml_irq_debug_abandon(void)998 ml_irq_debug_abandon(void)
999 {
1000 assert(!ml_get_interrupts_enabled());
1001
1002 thread_t t = current_thread();
1003 if (t->machine.inthandler_timestamp != 0) {
1004 t->machine.inthandler_abandon = true;
1005 }
1006 }
1007 #endif // SCHED_HYGIENE_DEBUG
1008
1009 #if SCHED_HYGIENE_DEBUG
1010 __attribute__((noinline))
1011 static void
ml_interrupt_masked_debug_timestamp(thread_t thread)1012 ml_interrupt_masked_debug_timestamp(thread_t thread)
1013 {
1014 thread->machine.intmask_timestamp = ml_get_sched_hygiene_timebase();
1015 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
1016 }
1017 #endif
1018
1019 boolean_t
ml_set_interrupts_enabled_with_debug(boolean_t enable,boolean_t __unused debug)1020 ml_set_interrupts_enabled_with_debug(boolean_t enable, boolean_t __unused debug)
1021 {
1022 thread_t thread;
1023 uint64_t state;
1024
1025 thread = current_thread();
1026
1027 state = __builtin_arm_rsr("DAIF");
1028
1029 if (__improbable(!(state & DAIF_DEBUGF))) {
1030 panic("%s: debug exceptions enabled in kernel mode", __func__);
1031 }
1032 if (enable && (state & DAIF_STANDARD_DISABLE)) {
1033 assert3u(state & DAIF_STANDARD_DISABLE, ==, DAIF_STANDARD_DISABLE);
1034 assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context
1035 #if SCHED_HYGIENE_DEBUG
1036 if (__probable(debug && (interrupt_masked_debug_mode || sched_preemption_disable_debug_mode))) {
1037 // Interrupts are currently masked, we will enable them (after finishing this check)
1038 if (stackshot_active()) {
1039 ml_handle_stackshot_interrupt_disabled_duration(thread);
1040 } else {
1041 ml_handle_interrupts_disabled_duration(thread);
1042 }
1043 thread->machine.intmask_timestamp = 0;
1044 thread->machine.intmask_cycles = 0;
1045 thread->machine.intmask_instr = 0;
1046 }
1047 #endif // SCHED_HYGIENE_DEBUG
1048 if (get_preemption_level() == 0) {
1049 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
1050 #if __ARM_USER_PROTECT__
1051 uintptr_t up = arm_user_protect_begin(thread);
1052 #endif
1053 ast_taken_kernel();
1054 #if __ARM_USER_PROTECT__
1055 arm_user_protect_end(thread, up, FALSE);
1056 #endif
1057 }
1058 }
1059 __builtin_arm_wsr("DAIFClr", DAIFSC_STANDARD_DISABLE);
1060 } else if (!enable && ((state & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE)) {
1061 assert3u(state & DAIF_STANDARD_DISABLE, ==, 0);
1062 __builtin_arm_wsr("DAIFSet", DAIFSC_STANDARD_DISABLE);
1063
1064 #if SCHED_HYGIENE_DEBUG
1065 if (__probable(debug && (interrupt_masked_debug_mode || sched_preemption_disable_debug_mode))) {
1066 // Interrupts were enabled, we just masked them
1067 ml_interrupt_masked_debug_timestamp(thread);
1068 }
1069 #endif
1070 }
1071 return (state & DAIF_STANDARD_DISABLE) != DAIF_STANDARD_DISABLE;
1072 }
1073
1074 boolean_t
ml_set_interrupts_enabled(boolean_t enable)1075 ml_set_interrupts_enabled(boolean_t enable)
1076 {
1077 return ml_set_interrupts_enabled_with_debug(enable, true);
1078 }
1079
1080 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable)1081 ml_early_set_interrupts_enabled(boolean_t enable)
1082 {
1083 return ml_set_interrupts_enabled(enable);
1084 }
1085
1086 /*
1087 * Interrupt enable function exported for AppleCLPC without
1088 * measurements enabled.
1089 *
1090 * Only for AppleCLPC!
1091 */
1092 boolean_t
sched_perfcontrol_ml_set_interrupts_without_measurement(boolean_t enable)1093 sched_perfcontrol_ml_set_interrupts_without_measurement(boolean_t enable)
1094 {
1095 return ml_set_interrupts_enabled_with_debug(enable, false);
1096 }
1097
1098 /*
1099 * Routine: ml_at_interrupt_context
1100 * Function: Check if running at interrupt context
1101 */
1102 boolean_t
ml_at_interrupt_context(void)1103 ml_at_interrupt_context(void)
1104 {
1105 /* Do not use a stack-based check here, as the top-level exception handler
1106 * is free to use some other stack besides the per-CPU interrupt stack.
1107 * Interrupts should always be disabled if we're at interrupt context.
1108 * Check that first, as we may be in a preemptible non-interrupt context, in
1109 * which case we could be migrated to a different CPU between obtaining
1110 * the per-cpu data pointer and loading cpu_int_state. We then might end
1111 * up checking the interrupt state of a different CPU, resulting in a false
1112 * positive. But if interrupts are disabled, we also know we cannot be
1113 * preempted. */
1114 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL);
1115 }
1116
1117 /*
1118 * This answers the question
1119 * "after returning from this interrupt handler with the AST_URGENT bit set,
1120 * will I end up in ast_taken_user or ast_taken_kernel?"
1121 *
1122 * If it's called in non-interrupt context (e.g. regular syscall), it should
1123 * return false.
1124 *
1125 * Must be called with interrupts disabled.
1126 */
1127 bool
ml_did_interrupt_userspace(void)1128 ml_did_interrupt_userspace(void)
1129 {
1130 assert(ml_get_interrupts_enabled() == false);
1131
1132 struct arm_saved_state *state = getCpuDatap()->cpu_int_state;
1133
1134 return state && PSR64_IS_USER(get_saved_state_cpsr(state));
1135 }
1136
1137
1138 vm_offset_t
ml_stack_remaining(void)1139 ml_stack_remaining(void)
1140 {
1141 uintptr_t local = (uintptr_t) &local;
1142 vm_offset_t intstack_top_ptr;
1143
1144 /* Since this is a stack-based check, we don't need to worry about
1145 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
1146 * then the sp should never be within any CPU's interrupt stack unless
1147 * something has gone horribly wrong. */
1148 intstack_top_ptr = getCpuDatap()->intstack_top;
1149 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
1150 return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE);
1151 } else {
1152 return local - current_thread()->kernel_stack;
1153 }
1154 }
1155
1156 static boolean_t ml_quiescing = FALSE;
1157
1158 void
ml_set_is_quiescing(boolean_t quiescing)1159 ml_set_is_quiescing(boolean_t quiescing)
1160 {
1161 assert(ml_quiescing != quiescing);
1162 ml_quiescing = quiescing;
1163 os_atomic_thread_fence(release);
1164 }
1165
1166 boolean_t
ml_is_quiescing(void)1167 ml_is_quiescing(void)
1168 {
1169 os_atomic_thread_fence(acquire);
1170 return ml_quiescing;
1171 }
1172
1173 uint64_t
ml_get_booter_memory_size(void)1174 ml_get_booter_memory_size(void)
1175 {
1176 #if CONFIG_SPTM
1177 extern uint64_t memSize;
1178 #endif /* CONFIG_SPTM */
1179 uint64_t size;
1180 uint64_t roundsize = 512 * 1024 * 1024ULL;
1181 size = BootArgs->memSizeActual;
1182 if (!size) {
1183 #if CONFIG_SPTM
1184 /*
1185 * SPTM systems cache [memSize] in a CTRR-protected variable rather
1186 * than relying on [BootArgs]. This is to enable the possibility
1187 * for XNU to modify it before machine lockdown, which happens in
1188 * KASAN kernels. If we did not do this, XNU would fault on the first
1189 * attempt to overwrite [BootArgs->memSize].
1190 */
1191 size = memSize;
1192 #else
1193 size = BootArgs->memSize;
1194 #endif /* CONFIG_SPTM */
1195 if (size < (2 * roundsize)) {
1196 roundsize >>= 1;
1197 }
1198 size = (size + roundsize - 1) & ~(roundsize - 1);
1199 }
1200
1201 #if CONFIG_SPTM
1202 size -= memSize;
1203 #else
1204 size -= BootArgs->memSize;
1205 #endif /* CONFIG_SPTM */
1206
1207 return size;
1208 }
1209
1210 uint64_t
ml_get_abstime_offset(void)1211 ml_get_abstime_offset(void)
1212 {
1213 return rtclock_base_abstime;
1214 }
1215
1216 uint64_t
ml_get_conttime_offset(void)1217 ml_get_conttime_offset(void)
1218 {
1219 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1220 return hwclock_conttime_offset;
1221 #elif HAS_CONTINUOUS_HWCLOCK
1222 return 0;
1223 #else
1224 return rtclock_base_abstime + mach_absolutetime_asleep;
1225 #endif
1226 }
1227
1228 uint64_t
ml_get_time_since_reset(void)1229 ml_get_time_since_reset(void)
1230 {
1231 #if HAS_CONTINUOUS_HWCLOCK
1232 if (wake_conttime == UINT64_MAX) {
1233 return UINT64_MAX;
1234 } else {
1235 return mach_continuous_time() - wake_conttime;
1236 }
1237 #else
1238 /* The timebase resets across S2R, so just return the raw value. */
1239 return ml_get_hwclock();
1240 #endif
1241 }
1242
1243 void
ml_set_reset_time(__unused uint64_t wake_time)1244 ml_set_reset_time(__unused uint64_t wake_time)
1245 {
1246 #if HAS_CONTINUOUS_HWCLOCK
1247 wake_conttime = wake_time;
1248 #endif
1249 }
1250
1251 uint64_t
ml_get_conttime_wake_time(void)1252 ml_get_conttime_wake_time(void)
1253 {
1254 #if HAS_CONTINUOUS_HWCLOCK
1255 /*
1256 * For now, we will reconstitute the timebase value from
1257 * cpu_timebase_init and use it as the wake time.
1258 */
1259 return wake_abstime - ml_get_abstime_offset();
1260 #else /* HAS_CONTINOUS_HWCLOCK */
1261 /* The wake time is simply our continuous time offset. */
1262 return ml_get_conttime_offset();
1263 #endif /* HAS_CONTINOUS_HWCLOCK */
1264 }
1265
1266 /*
1267 * ml_snoop_thread_is_on_core(thread_t thread)
1268 * Check if the given thread is currently on core. This function does not take
1269 * locks, disable preemption, or otherwise guarantee synchronization. The
1270 * result should be considered advisory.
1271 */
1272 bool
ml_snoop_thread_is_on_core(thread_t thread)1273 ml_snoop_thread_is_on_core(thread_t thread)
1274 {
1275 unsigned int cur_cpu_num = 0;
1276 const unsigned int max_cpu_id = ml_get_max_cpu_number();
1277
1278 for (cur_cpu_num = 0; cur_cpu_num <= max_cpu_id; cur_cpu_num++) {
1279 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) {
1280 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) {
1281 return true;
1282 }
1283 }
1284 }
1285
1286 return false;
1287 }
1288
1289 int
ml_early_cpu_max_number(void)1290 ml_early_cpu_max_number(void)
1291 {
1292 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1293 return ml_get_max_cpu_number();
1294 }
1295
1296 void
ml_set_max_cpus(unsigned int max_cpus __unused)1297 ml_set_max_cpus(unsigned int max_cpus __unused)
1298 {
1299 lck_mtx_lock(&max_cpus_lock);
1300 if (max_cpus_initialized != MAX_CPUS_SET) {
1301 if (max_cpus_initialized == MAX_CPUS_WAIT) {
1302 thread_wakeup((event_t) &max_cpus_initialized);
1303 }
1304 max_cpus_initialized = MAX_CPUS_SET;
1305 }
1306 lck_mtx_unlock(&max_cpus_lock);
1307 }
1308
1309 unsigned int
ml_wait_max_cpus(void)1310 ml_wait_max_cpus(void)
1311 {
1312 assert(lockdown_done);
1313 lck_mtx_lock(&max_cpus_lock);
1314 while (max_cpus_initialized != MAX_CPUS_SET) {
1315 max_cpus_initialized = MAX_CPUS_WAIT;
1316 lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT);
1317 }
1318 lck_mtx_unlock(&max_cpus_lock);
1319 return machine_info.max_cpus;
1320 }
1321
1322 void
ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info,cluster_type_t cluster_type)1323 ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info, cluster_type_t cluster_type)
1324 {
1325 cache_info_t *cpuid_cache_info;
1326
1327 cpuid_cache_info = cache_info_type(cluster_type);
1328 ml_cpu_info->vector_unit = 0;
1329 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
1330 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
1331 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
1332
1333 #if (__ARM_ARCH__ >= 8)
1334 ml_cpu_info->l2_settings = 1;
1335 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
1336 #else
1337 #error Unsupported arch
1338 #endif
1339 ml_cpu_info->l3_settings = 0;
1340 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
1341 }
1342
1343 /*
1344 * Routine: ml_cpu_get_info
1345 * Function: Fill out the ml_cpu_info_t structure with parameters associated
1346 * with the boot cluster.
1347 */
1348 void
ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)1349 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
1350 {
1351 ml_cpu_get_info_type(ml_cpu_info, ml_get_topology_info()->boot_cpu->cluster_type);
1352 }
1353
1354 unsigned int
ml_get_cpu_number_type(cluster_type_t cluster_type,bool logical,bool available)1355 ml_get_cpu_number_type(cluster_type_t cluster_type, bool logical, bool available)
1356 {
1357 /*
1358 * At present no supported ARM system features SMT, so the "logical"
1359 * parameter doesn't have an impact on the result.
1360 */
1361 if (logical && available) {
1362 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1363 } else if (logical && !available) {
1364 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1365 } else if (!logical && available) {
1366 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1367 } else {
1368 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1369 }
1370 }
1371
1372 void
ml_get_cluster_type_name(cluster_type_t cluster_type,char * name,size_t name_size)1373 ml_get_cluster_type_name(cluster_type_t cluster_type, char *name, size_t name_size)
1374 {
1375 strlcpy(name, cluster_type_names[cluster_type], name_size);
1376 }
1377
1378 unsigned int
ml_get_cluster_number_type(cluster_type_t cluster_type)1379 ml_get_cluster_number_type(cluster_type_t cluster_type)
1380 {
1381 return ml_get_topology_info()->cluster_type_num_clusters[cluster_type];
1382 }
1383
1384 unsigned int
ml_cpu_cache_sharing(unsigned int level,cluster_type_t cluster_type,bool include_all_cpu_types __unused)1385 ml_cpu_cache_sharing(unsigned int level, cluster_type_t cluster_type, bool include_all_cpu_types __unused)
1386 {
1387 unsigned int cpu_number = 0, cluster_types = 0;
1388
1389 /*
1390 * Level 0 corresponds to main memory, which is shared across all cores.
1391 */
1392 if (level == 0) {
1393 return ml_get_topology_info()->num_cpus;
1394 }
1395
1396 /*
1397 * At present no supported ARM system features more than 2 levels of caches.
1398 */
1399 if (level > 2) {
1400 return 0;
1401 }
1402
1403 /*
1404 * L1 caches are always per core.
1405 */
1406 if (level == 1) {
1407 return 1;
1408 }
1409
1410 cluster_types = (1 << cluster_type);
1411
1412 /*
1413 * Traverse clusters until we find the one(s) of the desired type(s).
1414 */
1415 for (int i = 0; i < ml_get_topology_info()->num_clusters; i++) {
1416 ml_topology_cluster_t *cluster = &ml_get_topology_info()->clusters[i];
1417 if ((1 << cluster->cluster_type) & cluster_types) {
1418 cpu_number += cluster->num_cpus;
1419 cluster_types &= ~(1 << cluster->cluster_type);
1420 if (!cluster_types) {
1421 break;
1422 }
1423 }
1424 }
1425
1426 return cpu_number;
1427 }
1428
1429 unsigned int
ml_get_cpu_types(void)1430 ml_get_cpu_types(void)
1431 {
1432 return ml_get_topology_info()->cluster_types;
1433 }
1434
1435 void
machine_conf(void)1436 machine_conf(void)
1437 {
1438 /*
1439 * This is known to be inaccurate. mem_size should always be capped at 2 GB
1440 */
1441 machine_info.memory_size = (uint32_t)mem_size;
1442
1443 // rdar://problem/58285685: Userland expects _COMM_PAGE_LOGICAL_CPUS to report
1444 // (max_cpu_id+1) rather than a literal *count* of logical CPUs.
1445 unsigned int num_cpus = ml_get_topology_info()->max_cpu_id + 1;
1446 machine_info.max_cpus = num_cpus;
1447 machine_info.physical_cpu_max = num_cpus;
1448 machine_info.logical_cpu_max = num_cpus;
1449 }
1450
1451 void
machine_init(void)1452 machine_init(void)
1453 {
1454 debug_log_init();
1455 clock_config();
1456 is_clock_configured = TRUE;
1457 if (debug_enabled) {
1458 pmap_map_globals();
1459 }
1460 ml_lockdown_init();
1461 }
1462