1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpuid_internal.h>
33 #include <arm/cpu_data.h>
34 #include <arm/cpu_data_internal.h>
35 #include <arm/misc_protos.h>
36 #include <arm/machdep_call.h>
37 #include <arm/machine_routines.h>
38 #include <arm/rtclock.h>
39 #include <kern/machine.h>
40 #include <kern/thread.h>
41 #include <kern/thread_group.h>
42 #include <kern/policy_internal.h>
43 #include <kern/sched_hygiene.h>
44 #include <kern/startup.h>
45 #include <machine/config.h>
46 #include <machine/atomic.h>
47 #include <pexpert/pexpert.h>
48 #include <pexpert/device_tree.h>
49
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54
55 #include <mach/machine.h>
56 #include <mach/machine/sdt.h>
57
58 #if !HAS_CONTINUOUS_HWCLOCK
59 extern uint64_t mach_absolutetime_asleep;
60 #else
61 extern uint64_t wake_abstime;
62 static uint64_t wake_conttime = UINT64_MAX;
63 #endif
64
65 extern volatile uint32_t debug_enabled;
66 extern _Atomic unsigned int cluster_type_num_active_cpus[MAX_CPU_TYPES];
67 const char *cluster_type_names[MAX_CPU_TYPES] = {
68 [CLUSTER_TYPE_SMP] = "Standard",
69 [CLUSTER_TYPE_P] = "Performance",
70 [CLUSTER_TYPE_E] = "Efficiency",
71 };
72
73 static int max_cpus_initialized = 0;
74 #define MAX_CPUS_SET 0x1
75 #define MAX_CPUS_WAIT 0x2
76
77 LCK_GRP_DECLARE(max_cpus_grp, "max_cpus");
78 LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp);
79 uint32_t lockdown_done = 0;
80 boolean_t is_clock_configured = FALSE;
81
82
83 static void
sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused,going_on_core_t on __unused)84 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
85 {
86 }
87
88 static void
sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused,perfcontrol_state_t new_thread_state __unused)89 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
90 {
91 }
92
93 static void
sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused,going_off_core_t off __unused,boolean_t thread_terminating __unused)94 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
95 {
96 }
97
98 static void
sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)99 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
100 {
101 }
102
103 static void
sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)104 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
105 {
106 }
107
108 static void
sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_t work_interval __unused)109 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
110 perfcontrol_work_interval_t work_interval __unused)
111 {
112 }
113
114 static void
sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_instance_t instance __unused)115 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
116 perfcontrol_work_interval_instance_t instance __unused)
117 {
118 }
119
120 static void
sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)121 sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
122 {
123 }
124
125 static void
sched_perfcontrol_csw_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * offcore,__unused struct perfcontrol_thread_data * oncore,__unused struct perfcontrol_cpu_counters * cpu_counters,__unused void * unused)126 sched_perfcontrol_csw_default(
127 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
128 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
129 __unused struct perfcontrol_thread_data *oncore,
130 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
131 {
132 }
133
134 static void
sched_perfcontrol_state_update_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * thr_data,__unused void * unused)135 sched_perfcontrol_state_update_default(
136 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
137 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
138 __unused void *unused)
139 {
140 }
141
142 static void
sched_perfcontrol_thread_group_blocked_default(__unused thread_group_data_t blocked_tg,__unused thread_group_data_t blocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t blocked_thr_state)143 sched_perfcontrol_thread_group_blocked_default(
144 __unused thread_group_data_t blocked_tg, __unused thread_group_data_t blocking_tg,
145 __unused uint32_t flags, __unused perfcontrol_state_t blocked_thr_state)
146 {
147 }
148
149 static void
sched_perfcontrol_thread_group_unblocked_default(__unused thread_group_data_t unblocked_tg,__unused thread_group_data_t unblocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t unblocked_thr_state)150 sched_perfcontrol_thread_group_unblocked_default(
151 __unused thread_group_data_t unblocked_tg, __unused thread_group_data_t unblocking_tg,
152 __unused uint32_t flags, __unused perfcontrol_state_t unblocked_thr_state)
153 {
154 }
155
156 sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
157 sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
158 sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
159 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
160 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
161 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
162 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
163 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
164 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
165 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
166 sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
167 sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
168 sched_perfcontrol_thread_group_blocked_t sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
169 sched_perfcontrol_thread_group_unblocked_t sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
170 boolean_t sched_perfcontrol_thread_shared_rsrc_flags_enabled = false;
171
172 void
sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks,unsigned long size_of_state)173 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
174 {
175 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
176
177 if (size_of_state > sizeof(struct perfcontrol_state)) {
178 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
179 }
180
181 if (callbacks) {
182 #if CONFIG_THREAD_GROUPS
183 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_3) {
184 if (callbacks->thread_group_init != NULL) {
185 sched_perfcontrol_thread_group_init = callbacks->thread_group_init;
186 } else {
187 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
188 }
189 if (callbacks->thread_group_deinit != NULL) {
190 sched_perfcontrol_thread_group_deinit = callbacks->thread_group_deinit;
191 } else {
192 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
193 }
194 // tell CLPC about existing thread groups
195 thread_group_resync(TRUE);
196 }
197
198 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_6) {
199 if (callbacks->thread_group_flags_update != NULL) {
200 sched_perfcontrol_thread_group_flags_update = callbacks->thread_group_flags_update;
201 } else {
202 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
203 }
204 }
205
206 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_8) {
207 if (callbacks->thread_group_blocked != NULL) {
208 sched_perfcontrol_thread_group_blocked = callbacks->thread_group_blocked;
209 } else {
210 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
211 }
212
213 if (callbacks->thread_group_unblocked != NULL) {
214 sched_perfcontrol_thread_group_unblocked = callbacks->thread_group_unblocked;
215 } else {
216 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
217 }
218 }
219 #endif
220 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_9) {
221 sched_perfcontrol_thread_shared_rsrc_flags_enabled = true;
222 }
223
224 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
225 if (callbacks->work_interval_ctl != NULL) {
226 sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
227 } else {
228 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
229 }
230 }
231
232 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
233 if (callbacks->csw != NULL) {
234 sched_perfcontrol_csw = callbacks->csw;
235 } else {
236 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
237 }
238
239 if (callbacks->state_update != NULL) {
240 sched_perfcontrol_state_update = callbacks->state_update;
241 } else {
242 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
243 }
244 }
245
246 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
247 if (callbacks->deadline_passed != NULL) {
248 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
249 } else {
250 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
251 }
252 }
253
254 if (callbacks->offcore != NULL) {
255 sched_perfcontrol_offcore = callbacks->offcore;
256 } else {
257 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
258 }
259
260 if (callbacks->context_switch != NULL) {
261 sched_perfcontrol_switch = callbacks->context_switch;
262 } else {
263 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
264 }
265
266 if (callbacks->oncore != NULL) {
267 sched_perfcontrol_oncore = callbacks->oncore;
268 } else {
269 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
270 }
271
272 if (callbacks->max_runnable_latency != NULL) {
273 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
274 } else {
275 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
276 }
277
278 if (callbacks->work_interval_notify != NULL) {
279 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
280 } else {
281 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
282 }
283 } else {
284 /* reset to defaults */
285 #if CONFIG_THREAD_GROUPS
286 thread_group_resync(FALSE);
287 #endif
288 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
289 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
290 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
291 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
292 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
293 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
294 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
295 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
296 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
297 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
298 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
299 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
300 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
301 }
302 }
303
304
305 static void
machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data * data,thread_t thread,uint64_t same_pri_latency)306 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
307 thread_t thread,
308 uint64_t same_pri_latency)
309 {
310 bzero(data, sizeof(struct perfcontrol_thread_data));
311 data->perfctl_class = thread_get_perfcontrol_class(thread);
312 data->energy_estimate_nj = 0;
313 data->thread_id = thread->thread_id;
314 #if CONFIG_THREAD_GROUPS
315 struct thread_group *tg = thread_group_get(thread);
316 data->thread_group_id = thread_group_get_id(tg);
317 data->thread_group_data = thread_group_get_machine_data(tg);
318 #endif
319 data->scheduling_latency_at_same_basepri = same_pri_latency;
320 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
321 }
322
323 static void
machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters * cpu_counters)324 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
325 {
326 #if MONOTONIC
327 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
328 #else /* MONOTONIC */
329 cpu_counters->instructions = 0;
330 cpu_counters->cycles = 0;
331 #endif /* !MONOTONIC */
332 }
333
334 int perfcontrol_callout_stats_enabled = 0;
335 static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
336 static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
337
338 #if MONOTONIC
339 static inline
340 bool
perfcontrol_callout_counters_begin(uint64_t * counters)341 perfcontrol_callout_counters_begin(uint64_t *counters)
342 {
343 if (!perfcontrol_callout_stats_enabled) {
344 return false;
345 }
346 mt_fixed_counts(counters);
347 return true;
348 }
349
350 static inline
351 void
perfcontrol_callout_counters_end(uint64_t * start_counters,perfcontrol_callout_type_t type)352 perfcontrol_callout_counters_end(uint64_t *start_counters,
353 perfcontrol_callout_type_t type)
354 {
355 uint64_t end_counters[MT_CORE_NFIXED];
356 mt_fixed_counts(end_counters);
357 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
358 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed);
359 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
360 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed);
361 os_atomic_inc(&perfcontrol_callout_count[type], relaxed);
362 }
363 #endif /* MONOTONIC */
364
365 uint64_t
perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,perfcontrol_callout_stat_t stat)366 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
367 perfcontrol_callout_stat_t stat)
368 {
369 if (!perfcontrol_callout_stats_enabled) {
370 return 0;
371 }
372 return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) /
373 os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed);
374 }
375
376
377
378 #if CONFIG_SCHED_EDGE
379
380 /*
381 * The Edge scheduler allows the performance controller to update properties about the
382 * threads as part of the callouts. These properties typically include shared cluster
383 * resource usage. This allows the scheduler to manage specific threads within the
384 * workload more optimally.
385 */
386 static void
sched_perfcontrol_thread_flags_update(thread_t thread,struct perfcontrol_thread_data * thread_data,shared_rsrc_policy_agent_t agent)387 sched_perfcontrol_thread_flags_update(thread_t thread,
388 struct perfcontrol_thread_data *thread_data,
389 shared_rsrc_policy_agent_t agent)
390 {
391 kern_return_t kr = KERN_SUCCESS;
392 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
393 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
394 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
395 } else {
396 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
397 }
398 }
399 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
400 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
401 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
402 } else {
403 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
404 }
405 }
406 /*
407 * The thread_shared_rsrc_policy_* routines only fail if the performance controller is
408 * attempting to double set/clear a policy on the thread.
409 */
410 assert(kr == KERN_SUCCESS);
411 }
412
413 #endif /* CONFIG_SCHED_EDGE */
414
415 void
machine_switch_perfcontrol_context(perfcontrol_event event,uint64_t timestamp,uint32_t flags,uint64_t new_thread_same_pri_latency,thread_t old,thread_t new)416 machine_switch_perfcontrol_context(perfcontrol_event event,
417 uint64_t timestamp,
418 uint32_t flags,
419 uint64_t new_thread_same_pri_latency,
420 thread_t old,
421 thread_t new)
422 {
423
424 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
425 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
426 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
427 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
428 }
429
430 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
431 uint32_t cpu_id = (uint32_t)cpu_number();
432 struct perfcontrol_cpu_counters cpu_counters;
433 struct perfcontrol_thread_data offcore, oncore;
434 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
435 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
436 new_thread_same_pri_latency);
437 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
438
439 #if MONOTONIC
440 uint64_t counters[MT_CORE_NFIXED];
441 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
442 #endif /* MONOTONIC */
443 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
444 &offcore, &oncore, &cpu_counters, NULL);
445 #if MONOTONIC
446 if (ctrs_enabled) {
447 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
448 }
449 #endif /* MONOTONIC */
450
451 recount_add_energy(old, get_threadtask(old),
452 offcore.energy_estimate_nj);
453
454 #if CONFIG_SCHED_EDGE
455 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled) {
456 sched_perfcontrol_thread_flags_update(old, &offcore, SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
457 }
458 #endif /* CONFIG_SCHED_EDGE */
459 }
460 }
461
462 void
machine_switch_perfcontrol_state_update(perfcontrol_event event,uint64_t timestamp,uint32_t flags,thread_t thread)463 machine_switch_perfcontrol_state_update(perfcontrol_event event,
464 uint64_t timestamp,
465 uint32_t flags,
466 thread_t thread)
467 {
468
469 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) {
470 return;
471 }
472 uint32_t cpu_id = (uint32_t)cpu_number();
473 struct perfcontrol_thread_data data;
474 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
475
476 #if MONOTONIC
477 uint64_t counters[MT_CORE_NFIXED];
478 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
479 #endif /* MONOTONIC */
480 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
481 &data, NULL);
482 #if MONOTONIC
483 if (ctrs_enabled) {
484 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
485 }
486 #endif /* MONOTONIC */
487
488 #if CONFIG_PERVASIVE_ENERGY
489 recount_add_energy(thread, get_threadtask(thread), data.energy_estimate_nj);
490 #endif /* CONFIG_PERVASIVE_ENERGY */
491
492 #if CONFIG_SCHED_EDGE
493 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled && (event == QUANTUM_EXPIRY)) {
494 sched_perfcontrol_thread_flags_update(thread, &data, SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM);
495 } else {
496 assert(data.thread_flags_mask == 0);
497 }
498 #endif /* CONFIG_SCHED_EDGE */
499 }
500
501 void
machine_thread_going_on_core(thread_t new_thread,thread_urgency_t urgency,uint64_t sched_latency,uint64_t same_pri_latency,uint64_t timestamp)502 machine_thread_going_on_core(thread_t new_thread,
503 thread_urgency_t urgency,
504 uint64_t sched_latency,
505 uint64_t same_pri_latency,
506 uint64_t timestamp)
507 {
508 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) {
509 return;
510 }
511 struct going_on_core on_core;
512 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
513
514 on_core.thread_id = new_thread->thread_id;
515 on_core.energy_estimate_nj = 0;
516 on_core.qos_class = (uint16_t)proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
517 on_core.urgency = (uint16_t)urgency;
518 on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
519 on_core.is_kernel_thread = get_threadtask(new_thread) == kernel_task;
520 #if CONFIG_THREAD_GROUPS
521 struct thread_group *tg = thread_group_get(new_thread);
522 on_core.thread_group_id = thread_group_get_id(tg);
523 on_core.thread_group_data = thread_group_get_machine_data(tg);
524 #endif
525 on_core.scheduling_latency = sched_latency;
526 on_core.start_time = timestamp;
527 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
528
529 #if MONOTONIC
530 uint64_t counters[MT_CORE_NFIXED];
531 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
532 #endif /* MONOTONIC */
533 sched_perfcontrol_oncore(state, &on_core);
534 #if MONOTONIC
535 if (ctrs_enabled) {
536 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
537 }
538 #endif /* MONOTONIC */
539 }
540
541 void
machine_thread_going_off_core(thread_t old_thread,boolean_t thread_terminating,uint64_t last_dispatch,__unused boolean_t thread_runnable)542 machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating,
543 uint64_t last_dispatch, __unused boolean_t thread_runnable)
544 {
545 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) {
546 return;
547 }
548 struct going_off_core off_core;
549 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
550
551 off_core.thread_id = old_thread->thread_id;
552 off_core.energy_estimate_nj = 0;
553 off_core.end_time = last_dispatch;
554 #if CONFIG_THREAD_GROUPS
555 struct thread_group *tg = thread_group_get(old_thread);
556 off_core.thread_group_id = thread_group_get_id(tg);
557 off_core.thread_group_data = thread_group_get_machine_data(tg);
558 #endif
559
560 #if MONOTONIC
561 uint64_t counters[MT_CORE_NFIXED];
562 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
563 #endif /* MONOTONIC */
564 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
565 #if MONOTONIC
566 if (ctrs_enabled) {
567 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
568 }
569 #endif /* MONOTONIC */
570 }
571
572 #if CONFIG_THREAD_GROUPS
573 void
machine_thread_group_init(struct thread_group * tg)574 machine_thread_group_init(struct thread_group *tg)
575 {
576 if (sched_perfcontrol_thread_group_init == sched_perfcontrol_thread_group_default) {
577 return;
578 }
579 struct thread_group_data data;
580 data.thread_group_id = thread_group_get_id(tg);
581 data.thread_group_data = thread_group_get_machine_data(tg);
582 data.thread_group_size = thread_group_machine_data_size();
583 data.thread_group_flags = thread_group_get_flags(tg);
584 sched_perfcontrol_thread_group_init(&data);
585 }
586
587 void
machine_thread_group_deinit(struct thread_group * tg)588 machine_thread_group_deinit(struct thread_group *tg)
589 {
590 if (sched_perfcontrol_thread_group_deinit == sched_perfcontrol_thread_group_default) {
591 return;
592 }
593 struct thread_group_data data;
594 data.thread_group_id = thread_group_get_id(tg);
595 data.thread_group_data = thread_group_get_machine_data(tg);
596 data.thread_group_size = thread_group_machine_data_size();
597 data.thread_group_flags = thread_group_get_flags(tg);
598 sched_perfcontrol_thread_group_deinit(&data);
599 }
600
601 void
machine_thread_group_flags_update(struct thread_group * tg,uint32_t flags)602 machine_thread_group_flags_update(struct thread_group *tg, uint32_t flags)
603 {
604 if (sched_perfcontrol_thread_group_flags_update == sched_perfcontrol_thread_group_default) {
605 return;
606 }
607 struct thread_group_data data;
608 data.thread_group_id = thread_group_get_id(tg);
609 data.thread_group_data = thread_group_get_machine_data(tg);
610 data.thread_group_size = thread_group_machine_data_size();
611 data.thread_group_flags = flags;
612 sched_perfcontrol_thread_group_flags_update(&data);
613 }
614
615 void
machine_thread_group_blocked(struct thread_group * blocked_tg,struct thread_group * blocking_tg,uint32_t flags,thread_t blocked_thread)616 machine_thread_group_blocked(struct thread_group *blocked_tg,
617 struct thread_group *blocking_tg,
618 uint32_t flags,
619 thread_t blocked_thread)
620 {
621 if (sched_perfcontrol_thread_group_blocked == sched_perfcontrol_thread_group_blocked_default) {
622 return;
623 }
624
625 spl_t s = splsched();
626
627 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(blocked_thread);
628 struct thread_group_data blocked_data;
629 assert(blocked_tg != NULL);
630
631 blocked_data.thread_group_id = thread_group_get_id(blocked_tg);
632 blocked_data.thread_group_data = thread_group_get_machine_data(blocked_tg);
633 blocked_data.thread_group_size = thread_group_machine_data_size();
634
635 if (blocking_tg == NULL) {
636 /*
637 * For special cases such as the render server, the blocking TG is a
638 * well known TG. Only in that case, the blocking_tg should be NULL.
639 */
640 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
641 sched_perfcontrol_thread_group_blocked(&blocked_data, NULL, flags, state);
642 } else {
643 struct thread_group_data blocking_data;
644 blocking_data.thread_group_id = thread_group_get_id(blocking_tg);
645 blocking_data.thread_group_data = thread_group_get_machine_data(blocking_tg);
646 blocking_data.thread_group_size = thread_group_machine_data_size();
647 sched_perfcontrol_thread_group_blocked(&blocked_data, &blocking_data, flags, state);
648 }
649 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_START,
650 thread_tid(blocked_thread), thread_group_get_id(blocked_tg),
651 blocking_tg ? thread_group_get_id(blocking_tg) : THREAD_GROUP_INVALID,
652 flags);
653
654 splx(s);
655 }
656
657 void
machine_thread_group_unblocked(struct thread_group * unblocked_tg,struct thread_group * unblocking_tg,uint32_t flags,thread_t unblocked_thread)658 machine_thread_group_unblocked(struct thread_group *unblocked_tg,
659 struct thread_group *unblocking_tg,
660 uint32_t flags,
661 thread_t unblocked_thread)
662 {
663 if (sched_perfcontrol_thread_group_unblocked == sched_perfcontrol_thread_group_unblocked_default) {
664 return;
665 }
666
667 spl_t s = splsched();
668
669 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(unblocked_thread);
670 struct thread_group_data unblocked_data;
671 assert(unblocked_tg != NULL);
672
673 unblocked_data.thread_group_id = thread_group_get_id(unblocked_tg);
674 unblocked_data.thread_group_data = thread_group_get_machine_data(unblocked_tg);
675 unblocked_data.thread_group_size = thread_group_machine_data_size();
676
677 if (unblocking_tg == NULL) {
678 /*
679 * For special cases such as the render server, the unblocking TG is a
680 * well known TG. Only in that case, the unblocking_tg should be NULL.
681 */
682 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
683 sched_perfcontrol_thread_group_unblocked(&unblocked_data, NULL, flags, state);
684 } else {
685 struct thread_group_data unblocking_data;
686 unblocking_data.thread_group_id = thread_group_get_id(unblocking_tg);
687 unblocking_data.thread_group_data = thread_group_get_machine_data(unblocking_tg);
688 unblocking_data.thread_group_size = thread_group_machine_data_size();
689 sched_perfcontrol_thread_group_unblocked(&unblocked_data, &unblocking_data, flags, state);
690 }
691 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_END,
692 thread_tid(unblocked_thread), thread_group_get_id(unblocked_tg),
693 unblocking_tg ? thread_group_get_id(unblocking_tg) : THREAD_GROUP_INVALID,
694 flags);
695
696 splx(s);
697 }
698
699 #endif /* CONFIG_THREAD_GROUPS */
700
701 void
machine_max_runnable_latency(uint64_t bg_max_latency,uint64_t default_max_latency,uint64_t realtime_max_latency)702 machine_max_runnable_latency(uint64_t bg_max_latency,
703 uint64_t default_max_latency,
704 uint64_t realtime_max_latency)
705 {
706 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) {
707 return;
708 }
709 struct perfcontrol_max_runnable_latency latencies = {
710 .max_scheduling_latencies = {
711 [THREAD_URGENCY_NONE] = 0,
712 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
713 [THREAD_URGENCY_NORMAL] = default_max_latency,
714 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
715 }
716 };
717
718 sched_perfcontrol_max_runnable_latency(&latencies);
719 }
720
721 void
machine_work_interval_notify(thread_t thread,struct kern_work_interval_args * kwi_args)722 machine_work_interval_notify(thread_t thread,
723 struct kern_work_interval_args* kwi_args)
724 {
725 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) {
726 return;
727 }
728 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
729 struct perfcontrol_work_interval work_interval = {
730 .thread_id = thread->thread_id,
731 .qos_class = (uint16_t)proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
732 .urgency = kwi_args->urgency,
733 .flags = kwi_args->notify_flags,
734 .work_interval_id = kwi_args->work_interval_id,
735 .start = kwi_args->start,
736 .finish = kwi_args->finish,
737 .deadline = kwi_args->deadline,
738 .next_start = kwi_args->next_start,
739 .create_flags = kwi_args->create_flags,
740 };
741 #if CONFIG_THREAD_GROUPS
742 struct thread_group *tg;
743 tg = thread_group_get(thread);
744 work_interval.thread_group_id = thread_group_get_id(tg);
745 work_interval.thread_group_data = thread_group_get_machine_data(tg);
746 #endif
747 sched_perfcontrol_work_interval_notify(state, &work_interval);
748 }
749
750
751 void
machine_perfcontrol_deadline_passed(uint64_t deadline)752 machine_perfcontrol_deadline_passed(uint64_t deadline)
753 {
754 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) {
755 sched_perfcontrol_deadline_passed(deadline);
756 }
757 }
758
759 #if SCHED_HYGIENE_DEBUG
760
761 __options_decl(int_mask_hygiene_flags_t, uint8_t, {
762 INT_MASK_BASE = 0x00,
763 INT_MASK_FROM_HANDLER = 0x01,
764 INT_MASK_IS_STACKSHOT = 0x02,
765 });
766
767 /*
768 * ml_spin_debug_reset()
769 * Reset the timestamp on a thread that has been unscheduled
770 * to avoid false alarms. Alarm will go off if interrupts are held
771 * disabled for too long, starting from now.
772 *
773 * Call ml_get_timebase() directly to prevent extra overhead on newer
774 * platforms that's enabled in DEVELOPMENT kernel configurations.
775 */
776 void
ml_spin_debug_reset(thread_t thread)777 ml_spin_debug_reset(thread_t thread)
778 {
779 if (thread->machine.intmask_timestamp) {
780 thread->machine.intmask_timestamp = ml_get_sched_hygiene_timebase();
781 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
782 }
783 }
784
785 /*
786 * ml_spin_debug_clear()
787 * Clear the timestamp and cycle/instruction counts on a thread that
788 * has been unscheduled to avoid false alarms
789 */
790 void
ml_spin_debug_clear(thread_t thread)791 ml_spin_debug_clear(thread_t thread)
792 {
793 thread->machine.intmask_timestamp = 0;
794 thread->machine.intmask_cycles = 0;
795 thread->machine.intmask_instr = 0;
796 }
797
798 /*
799 * ml_spin_debug_clear_self()
800 * Clear the timestamp on the current thread to prevent
801 * false alarms
802 */
803 void
ml_spin_debug_clear_self(void)804 ml_spin_debug_clear_self(void)
805 {
806 ml_spin_debug_clear(current_thread());
807 }
808
809 #ifndef KASAN
810
811 /*
812 * Get a character representing the provided thread's kind of CPU.
813 */
814 #if !MONOTONIC
815 __unused
816 #endif // !MONOTONIC
817 static char
__ml_interrupts_disabled_cpu_kind(thread_t thread)818 __ml_interrupts_disabled_cpu_kind(thread_t thread)
819 {
820 #if __AMP__
821 processor_t processor = thread->last_processor;
822 if (!processor) {
823 return '!';
824 }
825
826 switch (processor->processor_set->pset_cluster_type) {
827 case PSET_AMP_P:
828 return 'P';
829 case PSET_AMP_E:
830 return 'E';
831 default:
832 return '?';
833 }
834 #else // __AMP__
835 #pragma unused(thread)
836 return '-';
837 #endif // !__AMP__
838 }
839
840 #define EXTRA_INFO_STRING_SIZE 256
841 #define LOW_FREQ_THRESHOLD_MHZ 500
842 #define HIGH_CPI_THRESHOLD 3
843
844 static void
__ml_trigger_interrupts_disabled_handle(thread_t thread,uint64_t start,uint64_t now,uint64_t timeout,int_mask_hygiene_flags_t flags)845 __ml_trigger_interrupts_disabled_handle(thread_t thread, uint64_t start, uint64_t now, uint64_t timeout, int_mask_hygiene_flags_t flags)
846 {
847 mach_timebase_info_data_t timebase;
848 clock_timebase_info(&timebase);
849 bool is_int_handler = flags & INT_MASK_FROM_HANDLER;
850 bool is_stackshot = flags & INT_MASK_IS_STACKSHOT;
851
852 const uint64_t time_elapsed = now - start;
853 const uint64_t time_elapsed_ns = (time_elapsed * timebase.numer) / timebase.denom;
854
855 uint64_t current_cycles = 0, current_instrs = 0;
856
857 #if MONOTONIC
858 if (sched_hygiene_debug_pmc) {
859 mt_cur_cpu_cycles_instrs_speculative(¤t_cycles, ¤t_instrs);
860 }
861 #endif // MONOTONIC
862
863 const uint64_t cycles_elapsed = current_cycles - thread->machine.intmask_cycles;
864 const uint64_t instrs_elapsed = current_instrs - thread->machine.intmask_instr;
865
866 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
867 const uint64_t timeout_ns = ((timeout * debug_cpu_performance_degradation_factor) * timebase.numer) / timebase.denom;
868 char extra_info_string[EXTRA_INFO_STRING_SIZE] = { '\0' };
869 #if MONOTONIC
870 if (sched_hygiene_debug_pmc) {
871 const uint64_t time_elapsed_us = time_elapsed_ns / 1000;
872 const uint64_t average_freq_mhz = cycles_elapsed / time_elapsed_us;
873 const uint64_t average_cpi_whole = cycles_elapsed / instrs_elapsed;
874 const uint64_t average_cpi_fractional = ((cycles_elapsed * 100) / instrs_elapsed) % 100;
875 bool high_cpi = average_cpi_whole >= HIGH_CPI_THRESHOLD;
876 char core_kind = __ml_interrupts_disabled_cpu_kind(thread);
877 bool low_mhz = average_freq_mhz < LOW_FREQ_THRESHOLD_MHZ;
878
879 snprintf(extra_info_string, EXTRA_INFO_STRING_SIZE,
880 ", %sfreq = %llu MHz, %sCPI = %llu.%llu, CPU kind = %c",
881 low_mhz ? "low " : "",
882 average_freq_mhz,
883 high_cpi ? "high " : "",
884 average_cpi_whole,
885 average_cpi_fractional,
886 core_kind);
887 }
888 #endif // MONOTONIC
889
890 if (is_int_handler) {
891 panic("Processing of an interrupt (type = %u, handler address = %p, vector = %p) "
892 "took %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
893 thread->machine.int_type, (void *)thread->machine.int_handler_addr, (void *)thread->machine.int_vector,
894 time_elapsed_ns, start, now, timeout_ns, extra_info_string);
895 } else {
896 panic("%s for %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
897 is_stackshot ? "Stackshot disabled interrupts" : "Interrupts held disabled",
898 time_elapsed_ns, start, now, timeout_ns, extra_info_string);
899 }
900 } else if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_TRACE) {
901 if (is_int_handler) {
902 static const uint32_t interrupt_handled_dbgid =
903 MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_HANDLED_EXPIRED);
904 DTRACE_SCHED3(interrupt_handled_dbgid, uint64_t, time_elapsed,
905 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
906 KDBG(interrupt_handled_dbgid, time_elapsed,
907 cycles_elapsed, instrs_elapsed);
908 } else {
909 static const uint32_t interrupt_masked_dbgid =
910 MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_MASKED_EXPIRED);
911 DTRACE_SCHED3(interrupt_masked_dbgid, uint64_t, time_elapsed,
912 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
913 KDBG(interrupt_masked_dbgid, time_elapsed,
914 cycles_elapsed, instrs_elapsed);
915 }
916 }
917 }
918 #endif // !defined(KASAN)
919
920 static inline void
__ml_handle_interrupts_disabled_duration(thread_t thread,uint64_t timeout,bool is_int_handler)921 __ml_handle_interrupts_disabled_duration(thread_t thread, uint64_t timeout, bool is_int_handler)
922 {
923 if (timeout == 0) {
924 return; // 0 means timeout disabled.
925 }
926 uint64_t start = is_int_handler ? thread->machine.inthandler_timestamp : thread->machine.intmask_timestamp;
927 if (start != 0) {
928 uint64_t now = ml_get_sched_hygiene_timebase();
929
930 if (interrupt_masked_debug_mode &&
931 ((now - start) > timeout * debug_cpu_performance_degradation_factor) &&
932 !thread->machine.inthandler_abandon) {
933 /*
934 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
935 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
936 */
937 #ifndef KASAN
938 __ml_trigger_interrupts_disabled_handle(thread, start, now, timeout, is_int_handler);
939 #endif
940 }
941
942 if (is_int_handler) {
943 uint64_t const duration = now - start;
944 /*
945 * No need for an atomic add, the only thread modifying
946 * this is ourselves. Other threads querying will just see
947 * either the old or the new value. (This will also just
948 * resolve to regular loads and stores on relevant
949 * platforms.)
950 */
951 uint64_t const old_duration = os_atomic_load_wide(&thread->machine.int_time_mt, relaxed);
952 os_atomic_store_wide(&thread->machine.int_time_mt, old_duration + duration, relaxed);
953 }
954 }
955 }
956
957 void
ml_handle_interrupts_disabled_duration(thread_t thread)958 ml_handle_interrupts_disabled_duration(thread_t thread)
959 {
960 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), INT_MASK_BASE);
961 }
962
963 void
ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)964 ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)
965 {
966 /* Use MAX() to let the user bump the timeout further if needed */
967 uint64_t stackshot_timeout = os_atomic_load(&stackshot_interrupt_masked_timeout, relaxed);
968 uint64_t normal_timeout = os_atomic_load(&interrupt_masked_timeout, relaxed);
969 uint64_t timeout = MAX(stackshot_timeout, normal_timeout);
970 __ml_handle_interrupts_disabled_duration(thread, timeout, INT_MASK_IS_STACKSHOT);
971 }
972
973 void
ml_handle_interrupt_handler_duration(thread_t thread)974 ml_handle_interrupt_handler_duration(thread_t thread)
975 {
976 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), INT_MASK_FROM_HANDLER);
977 }
978
979 void
ml_irq_debug_start(uintptr_t handler,uintptr_t vector)980 ml_irq_debug_start(uintptr_t handler, uintptr_t vector)
981 {
982 INTERRUPT_MASKED_DEBUG_START(handler, DBG_INTR_TYPE_OTHER);
983 current_thread()->machine.int_vector = (uintptr_t)VM_KERNEL_STRIP_PTR(vector);
984 }
985
986 void
ml_irq_debug_end()987 ml_irq_debug_end()
988 {
989 INTERRUPT_MASKED_DEBUG_END();
990 }
991
992 /*
993 * Abandon a potential timeout when handling an interrupt. It is important to
994 * continue to keep track of the interrupt time so the time-stamp can't be
995 * reset. (Interrupt time is subtracted from preemption time to maintain
996 * accurate preemption time measurement).
997 * When `inthandler_abandon` is true, a timeout will be ignored when the
998 * interrupt handler finishes.
999 */
1000 void
ml_irq_debug_abandon(void)1001 ml_irq_debug_abandon(void)
1002 {
1003 assert(!ml_get_interrupts_enabled());
1004
1005 thread_t t = current_thread();
1006 if (t->machine.inthandler_timestamp != 0) {
1007 t->machine.inthandler_abandon = true;
1008 }
1009 }
1010 #endif // SCHED_HYGIENE_DEBUG
1011
1012 #if SCHED_HYGIENE_DEBUG
1013 __attribute__((noinline))
1014 static void
ml_interrupt_masked_debug_timestamp(thread_t thread)1015 ml_interrupt_masked_debug_timestamp(thread_t thread)
1016 {
1017 thread->machine.intmask_timestamp = ml_get_sched_hygiene_timebase();
1018 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
1019 }
1020 #endif
1021
1022 boolean_t
ml_set_interrupts_enabled_with_debug(boolean_t enable,boolean_t __unused debug)1023 ml_set_interrupts_enabled_with_debug(boolean_t enable, boolean_t __unused debug)
1024 {
1025 thread_t thread;
1026 uint64_t state;
1027
1028 thread = current_thread();
1029
1030 state = __builtin_arm_rsr("DAIF");
1031
1032 if (enable && (state & DAIF_IRQF)) {
1033 assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context
1034 #if SCHED_HYGIENE_DEBUG
1035 if (__probable(debug && (interrupt_masked_debug_mode || sched_preemption_disable_debug_mode))) {
1036 // Interrupts are currently masked, we will enable them (after finishing this check)
1037 if (stackshot_active()) {
1038 ml_handle_stackshot_interrupt_disabled_duration(thread);
1039 } else {
1040 ml_handle_interrupts_disabled_duration(thread);
1041 }
1042 thread->machine.intmask_timestamp = 0;
1043 thread->machine.intmask_cycles = 0;
1044 thread->machine.intmask_instr = 0;
1045 }
1046 #endif // SCHED_HYGIENE_DEBUG
1047 if (get_preemption_level() == 0) {
1048 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
1049 #if __ARM_USER_PROTECT__
1050 uintptr_t up = arm_user_protect_begin(thread);
1051 #endif
1052 ast_taken_kernel();
1053 #if __ARM_USER_PROTECT__
1054 arm_user_protect_end(thread, up, FALSE);
1055 #endif
1056 }
1057 }
1058 __builtin_arm_wsr("DAIFClr", DAIFSC_STANDARD_DISABLE);
1059 } else if (!enable && ((state & DAIF_IRQF) == 0)) {
1060 __builtin_arm_wsr("DAIFSet", DAIFSC_STANDARD_DISABLE);
1061
1062 #if SCHED_HYGIENE_DEBUG
1063 if (__probable(debug && (interrupt_masked_debug_mode || sched_preemption_disable_debug_mode))) {
1064 // Interrupts were enabled, we just masked them
1065 ml_interrupt_masked_debug_timestamp(thread);
1066 }
1067 #endif
1068 }
1069 return (state & DAIF_IRQF) == 0;
1070 }
1071
1072 boolean_t
ml_set_interrupts_enabled(boolean_t enable)1073 ml_set_interrupts_enabled(boolean_t enable)
1074 {
1075 return ml_set_interrupts_enabled_with_debug(enable, true);
1076 }
1077
1078 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable)1079 ml_early_set_interrupts_enabled(boolean_t enable)
1080 {
1081 return ml_set_interrupts_enabled(enable);
1082 }
1083
1084 /*
1085 * Interrupt enable function exported for AppleCLPC without
1086 * measurements enabled.
1087 *
1088 * Only for AppleCLPC!
1089 */
1090 boolean_t
sched_perfcontrol_ml_set_interrupts_without_measurement(boolean_t enable)1091 sched_perfcontrol_ml_set_interrupts_without_measurement(boolean_t enable)
1092 {
1093 return ml_set_interrupts_enabled_with_debug(enable, false);
1094 }
1095
1096 /*
1097 * Routine: ml_at_interrupt_context
1098 * Function: Check if running at interrupt context
1099 */
1100 boolean_t
ml_at_interrupt_context(void)1101 ml_at_interrupt_context(void)
1102 {
1103 /* Do not use a stack-based check here, as the top-level exception handler
1104 * is free to use some other stack besides the per-CPU interrupt stack.
1105 * Interrupts should always be disabled if we're at interrupt context.
1106 * Check that first, as we may be in a preemptible non-interrupt context, in
1107 * which case we could be migrated to a different CPU between obtaining
1108 * the per-cpu data pointer and loading cpu_int_state. We then might end
1109 * up checking the interrupt state of a different CPU, resulting in a false
1110 * positive. But if interrupts are disabled, we also know we cannot be
1111 * preempted. */
1112 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL);
1113 }
1114
1115 /*
1116 * This answers the question
1117 * "after returning from this interrupt handler with the AST_URGENT bit set,
1118 * will I end up in ast_taken_user or ast_taken_kernel?"
1119 *
1120 * If it's called in non-interrupt context (e.g. regular syscall), it should
1121 * return false.
1122 *
1123 * Must be called with interrupts disabled.
1124 */
1125 bool
ml_did_interrupt_userspace(void)1126 ml_did_interrupt_userspace(void)
1127 {
1128 assert(ml_get_interrupts_enabled() == false);
1129
1130 struct arm_saved_state *state = getCpuDatap()->cpu_int_state;
1131
1132 return state && PSR64_IS_USER(get_saved_state_cpsr(state));
1133 }
1134
1135
1136 vm_offset_t
ml_stack_remaining(void)1137 ml_stack_remaining(void)
1138 {
1139 uintptr_t local = (uintptr_t) &local;
1140 vm_offset_t intstack_top_ptr;
1141
1142 /* Since this is a stack-based check, we don't need to worry about
1143 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
1144 * then the sp should never be within any CPU's interrupt stack unless
1145 * something has gone horribly wrong. */
1146 intstack_top_ptr = getCpuDatap()->intstack_top;
1147 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
1148 return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE);
1149 } else {
1150 return local - current_thread()->kernel_stack;
1151 }
1152 }
1153
1154 static boolean_t ml_quiescing = FALSE;
1155
1156 void
ml_set_is_quiescing(boolean_t quiescing)1157 ml_set_is_quiescing(boolean_t quiescing)
1158 {
1159 ml_quiescing = quiescing;
1160 os_atomic_thread_fence(release);
1161 }
1162
1163 boolean_t
ml_is_quiescing(void)1164 ml_is_quiescing(void)
1165 {
1166 os_atomic_thread_fence(acquire);
1167 return ml_quiescing;
1168 }
1169
1170 uint64_t
ml_get_booter_memory_size(void)1171 ml_get_booter_memory_size(void)
1172 {
1173 uint64_t size;
1174 uint64_t roundsize = 512 * 1024 * 1024ULL;
1175 size = BootArgs->memSizeActual;
1176 if (!size) {
1177 size = BootArgs->memSize;
1178 if (size < (2 * roundsize)) {
1179 roundsize >>= 1;
1180 }
1181 size = (size + roundsize - 1) & ~(roundsize - 1);
1182 }
1183
1184 size -= BootArgs->memSize;
1185
1186 return size;
1187 }
1188
1189 uint64_t
ml_get_abstime_offset(void)1190 ml_get_abstime_offset(void)
1191 {
1192 return rtclock_base_abstime;
1193 }
1194
1195 uint64_t
ml_get_conttime_offset(void)1196 ml_get_conttime_offset(void)
1197 {
1198 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1199 return hwclock_conttime_offset;
1200 #elif HAS_CONTINUOUS_HWCLOCK
1201 return 0;
1202 #else
1203 return rtclock_base_abstime + mach_absolutetime_asleep;
1204 #endif
1205 }
1206
1207 uint64_t
ml_get_time_since_reset(void)1208 ml_get_time_since_reset(void)
1209 {
1210 #if HAS_CONTINUOUS_HWCLOCK
1211 if (wake_conttime == UINT64_MAX) {
1212 return UINT64_MAX;
1213 } else {
1214 return mach_continuous_time() - wake_conttime;
1215 }
1216 #else
1217 /* The timebase resets across S2R, so just return the raw value. */
1218 return ml_get_hwclock();
1219 #endif
1220 }
1221
1222 void
ml_set_reset_time(__unused uint64_t wake_time)1223 ml_set_reset_time(__unused uint64_t wake_time)
1224 {
1225 #if HAS_CONTINUOUS_HWCLOCK
1226 wake_conttime = wake_time;
1227 #endif
1228 }
1229
1230 uint64_t
ml_get_conttime_wake_time(void)1231 ml_get_conttime_wake_time(void)
1232 {
1233 #if HAS_CONTINUOUS_HWCLOCK
1234 /*
1235 * For now, we will reconstitute the timebase value from
1236 * cpu_timebase_init and use it as the wake time.
1237 */
1238 return wake_abstime - ml_get_abstime_offset();
1239 #else /* HAS_CONTINOUS_HWCLOCK */
1240 /* The wake time is simply our continuous time offset. */
1241 return ml_get_conttime_offset();
1242 #endif /* HAS_CONTINOUS_HWCLOCK */
1243 }
1244
1245 /*
1246 * ml_snoop_thread_is_on_core(thread_t thread)
1247 * Check if the given thread is currently on core. This function does not take
1248 * locks, disable preemption, or otherwise guarantee synchronization. The
1249 * result should be considered advisory.
1250 */
1251 bool
ml_snoop_thread_is_on_core(thread_t thread)1252 ml_snoop_thread_is_on_core(thread_t thread)
1253 {
1254 unsigned int cur_cpu_num = 0;
1255 const unsigned int max_cpu_id = ml_get_max_cpu_number();
1256
1257 for (cur_cpu_num = 0; cur_cpu_num <= max_cpu_id; cur_cpu_num++) {
1258 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) {
1259 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) {
1260 return true;
1261 }
1262 }
1263 }
1264
1265 return false;
1266 }
1267
1268 int
ml_early_cpu_max_number(void)1269 ml_early_cpu_max_number(void)
1270 {
1271 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1272 return ml_get_max_cpu_number();
1273 }
1274
1275 void
ml_set_max_cpus(unsigned int max_cpus __unused)1276 ml_set_max_cpus(unsigned int max_cpus __unused)
1277 {
1278 lck_mtx_lock(&max_cpus_lock);
1279 if (max_cpus_initialized != MAX_CPUS_SET) {
1280 if (max_cpus_initialized == MAX_CPUS_WAIT) {
1281 thread_wakeup((event_t) &max_cpus_initialized);
1282 }
1283 max_cpus_initialized = MAX_CPUS_SET;
1284 }
1285 lck_mtx_unlock(&max_cpus_lock);
1286 }
1287
1288 unsigned int
ml_wait_max_cpus(void)1289 ml_wait_max_cpus(void)
1290 {
1291 assert(lockdown_done);
1292 lck_mtx_lock(&max_cpus_lock);
1293 while (max_cpus_initialized != MAX_CPUS_SET) {
1294 max_cpus_initialized = MAX_CPUS_WAIT;
1295 lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT);
1296 }
1297 lck_mtx_unlock(&max_cpus_lock);
1298 return machine_info.max_cpus;
1299 }
1300
1301 void
ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info,cluster_type_t cluster_type)1302 ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info, cluster_type_t cluster_type)
1303 {
1304 cache_info_t *cpuid_cache_info;
1305
1306 cpuid_cache_info = cache_info_type(cluster_type);
1307 ml_cpu_info->vector_unit = 0;
1308 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
1309 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
1310 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
1311
1312 #if (__ARM_ARCH__ >= 8)
1313 ml_cpu_info->l2_settings = 1;
1314 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
1315 #else
1316 #error Unsupported arch
1317 #endif
1318 ml_cpu_info->l3_settings = 0;
1319 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
1320 }
1321
1322 /*
1323 * Routine: ml_cpu_get_info
1324 * Function: Fill out the ml_cpu_info_t structure with parameters associated
1325 * with the boot cluster.
1326 */
1327 void
ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)1328 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
1329 {
1330 ml_cpu_get_info_type(ml_cpu_info, ml_get_topology_info()->boot_cpu->cluster_type);
1331 }
1332
1333 unsigned int
ml_get_cpu_number_type(cluster_type_t cluster_type,bool logical,bool available)1334 ml_get_cpu_number_type(cluster_type_t cluster_type, bool logical, bool available)
1335 {
1336 /*
1337 * At present no supported ARM system features SMT, so the "logical"
1338 * parameter doesn't have an impact on the result.
1339 */
1340 if (logical && available) {
1341 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1342 } else if (logical && !available) {
1343 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1344 } else if (!logical && available) {
1345 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1346 } else {
1347 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1348 }
1349 }
1350
1351 void
ml_get_cluster_type_name(cluster_type_t cluster_type,char * name,size_t name_size)1352 ml_get_cluster_type_name(cluster_type_t cluster_type, char *name, size_t name_size)
1353 {
1354 strlcpy(name, cluster_type_names[cluster_type], name_size);
1355 }
1356
1357 unsigned int
ml_get_cluster_number_type(cluster_type_t cluster_type)1358 ml_get_cluster_number_type(cluster_type_t cluster_type)
1359 {
1360 return ml_get_topology_info()->cluster_type_num_clusters[cluster_type];
1361 }
1362
1363 unsigned int
ml_cpu_cache_sharing(unsigned int level,cluster_type_t cluster_type,bool include_all_cpu_types __unused)1364 ml_cpu_cache_sharing(unsigned int level, cluster_type_t cluster_type, bool include_all_cpu_types __unused)
1365 {
1366 unsigned int cpu_number = 0, cluster_types = 0;
1367
1368 /*
1369 * Level 0 corresponds to main memory, which is shared across all cores.
1370 */
1371 if (level == 0) {
1372 return ml_get_topology_info()->num_cpus;
1373 }
1374
1375 /*
1376 * At present no supported ARM system features more than 2 levels of caches.
1377 */
1378 if (level > 2) {
1379 return 0;
1380 }
1381
1382 /*
1383 * L1 caches are always per core.
1384 */
1385 if (level == 1) {
1386 return 1;
1387 }
1388
1389 cluster_types = (1 << cluster_type);
1390
1391 /*
1392 * Traverse clusters until we find the one(s) of the desired type(s).
1393 */
1394 for (int i = 0; i < ml_get_topology_info()->num_clusters; i++) {
1395 ml_topology_cluster_t *cluster = &ml_get_topology_info()->clusters[i];
1396 if ((1 << cluster->cluster_type) & cluster_types) {
1397 cpu_number += cluster->num_cpus;
1398 cluster_types &= ~(1 << cluster->cluster_type);
1399 if (!cluster_types) {
1400 break;
1401 }
1402 }
1403 }
1404
1405 return cpu_number;
1406 }
1407
1408 unsigned int
ml_get_cpu_types(void)1409 ml_get_cpu_types(void)
1410 {
1411 return ml_get_topology_info()->cluster_types;
1412 }
1413
1414 void
machine_conf(void)1415 machine_conf(void)
1416 {
1417 /*
1418 * This is known to be inaccurate. mem_size should always be capped at 2 GB
1419 */
1420 machine_info.memory_size = (uint32_t)mem_size;
1421
1422 // rdar://problem/58285685: Userland expects _COMM_PAGE_LOGICAL_CPUS to report
1423 // (max_cpu_id+1) rather than a literal *count* of logical CPUs.
1424 unsigned int num_cpus = ml_get_topology_info()->max_cpu_id + 1;
1425 machine_info.max_cpus = num_cpus;
1426 machine_info.physical_cpu_max = num_cpus;
1427 machine_info.logical_cpu_max = num_cpus;
1428 }
1429
1430 void
machine_init(void)1431 machine_init(void)
1432 {
1433 debug_log_init();
1434 clock_config();
1435 is_clock_configured = TRUE;
1436 if (debug_enabled) {
1437 pmap_map_globals();
1438 }
1439 ml_lockdown_init();
1440 }
1441