1 /*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpuid_internal.h>
33 #include <arm/cpu_data.h>
34 #include <arm/cpu_data_internal.h>
35 #include <arm/misc_protos.h>
36 #include <arm/machdep_call.h>
37 #include <arm/machine_routines.h>
38 #include <arm/rtclock.h>
39 #include <kern/machine.h>
40 #include <kern/thread.h>
41 #include <kern/thread_group.h>
42 #include <kern/policy_internal.h>
43 #include <kern/sched_hygiene.h>
44 #include <kern/startup.h>
45 #include <machine/config.h>
46 #include <machine/atomic.h>
47 #include <pexpert/pexpert.h>
48 #include <pexpert/device_tree.h>
49
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54
55 #include <mach/machine.h>
56 #include <mach/machine/sdt.h>
57
58 #if !HAS_CONTINUOUS_HWCLOCK
59 extern uint64_t mach_absolutetime_asleep;
60 #else
61 extern uint64_t wake_abstime;
62 static uint64_t wake_conttime = UINT64_MAX;
63 #endif
64
65 extern volatile uint32_t debug_enabled;
66 extern _Atomic unsigned int cluster_type_num_active_cpus[MAX_CPU_TYPES];
67
68 static int max_cpus_initialized = 0;
69 #define MAX_CPUS_SET 0x1
70 #define MAX_CPUS_WAIT 0x2
71
72 LCK_GRP_DECLARE(max_cpus_grp, "max_cpus");
73 LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp);
74 uint32_t lockdown_done = 0;
75 boolean_t is_clock_configured = FALSE;
76
77
78 static void
sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused,going_on_core_t on __unused)79 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
80 {
81 }
82
83 static void
sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused,perfcontrol_state_t new_thread_state __unused)84 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
85 {
86 }
87
88 static void
sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused,going_off_core_t off __unused,boolean_t thread_terminating __unused)89 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
90 {
91 }
92
93 static void
sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)94 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
95 {
96 }
97
98 static void
sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)99 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
100 {
101 }
102
103 static void
sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_t work_interval __unused)104 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
105 perfcontrol_work_interval_t work_interval __unused)
106 {
107 }
108
109 static void
sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,perfcontrol_work_interval_instance_t instance __unused)110 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
111 perfcontrol_work_interval_instance_t instance __unused)
112 {
113 }
114
115 static void
sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)116 sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
117 {
118 }
119
120 static void
sched_perfcontrol_csw_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * offcore,__unused struct perfcontrol_thread_data * oncore,__unused struct perfcontrol_cpu_counters * cpu_counters,__unused void * unused)121 sched_perfcontrol_csw_default(
122 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
123 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
124 __unused struct perfcontrol_thread_data *oncore,
125 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
126 {
127 }
128
129 static void
sched_perfcontrol_state_update_default(__unused perfcontrol_event event,__unused uint32_t cpu_id,__unused uint64_t timestamp,__unused uint32_t flags,__unused struct perfcontrol_thread_data * thr_data,__unused void * unused)130 sched_perfcontrol_state_update_default(
131 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
132 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
133 __unused void *unused)
134 {
135 }
136
137 static void
sched_perfcontrol_thread_group_blocked_default(__unused thread_group_data_t blocked_tg,__unused thread_group_data_t blocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t blocked_thr_state)138 sched_perfcontrol_thread_group_blocked_default(
139 __unused thread_group_data_t blocked_tg, __unused thread_group_data_t blocking_tg,
140 __unused uint32_t flags, __unused perfcontrol_state_t blocked_thr_state)
141 {
142 }
143
144 static void
sched_perfcontrol_thread_group_unblocked_default(__unused thread_group_data_t unblocked_tg,__unused thread_group_data_t unblocking_tg,__unused uint32_t flags,__unused perfcontrol_state_t unblocked_thr_state)145 sched_perfcontrol_thread_group_unblocked_default(
146 __unused thread_group_data_t unblocked_tg, __unused thread_group_data_t unblocking_tg,
147 __unused uint32_t flags, __unused perfcontrol_state_t unblocked_thr_state)
148 {
149 }
150
151 sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
152 sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
153 sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
154 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
155 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
156 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
157 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
158 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
159 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
160 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
161 sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
162 sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
163 sched_perfcontrol_thread_group_blocked_t sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
164 sched_perfcontrol_thread_group_unblocked_t sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
165 boolean_t sched_perfcontrol_thread_shared_rsrc_flags_enabled = false;
166
167 void
sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks,unsigned long size_of_state)168 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
169 {
170 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
171
172 if (size_of_state > sizeof(struct perfcontrol_state)) {
173 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
174 }
175
176 if (callbacks) {
177 #if CONFIG_THREAD_GROUPS
178 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_3) {
179 if (callbacks->thread_group_init != NULL) {
180 sched_perfcontrol_thread_group_init = callbacks->thread_group_init;
181 } else {
182 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
183 }
184 if (callbacks->thread_group_deinit != NULL) {
185 sched_perfcontrol_thread_group_deinit = callbacks->thread_group_deinit;
186 } else {
187 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
188 }
189 // tell CLPC about existing thread groups
190 thread_group_resync(TRUE);
191 }
192
193 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_6) {
194 if (callbacks->thread_group_flags_update != NULL) {
195 sched_perfcontrol_thread_group_flags_update = callbacks->thread_group_flags_update;
196 } else {
197 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
198 }
199 }
200
201 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_8) {
202 if (callbacks->thread_group_blocked != NULL) {
203 sched_perfcontrol_thread_group_blocked = callbacks->thread_group_blocked;
204 } else {
205 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
206 }
207
208 if (callbacks->thread_group_unblocked != NULL) {
209 sched_perfcontrol_thread_group_unblocked = callbacks->thread_group_unblocked;
210 } else {
211 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
212 }
213 }
214 #endif
215 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_9) {
216 sched_perfcontrol_thread_shared_rsrc_flags_enabled = true;
217 }
218
219 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
220 if (callbacks->work_interval_ctl != NULL) {
221 sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
222 } else {
223 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
224 }
225 }
226
227 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
228 if (callbacks->csw != NULL) {
229 sched_perfcontrol_csw = callbacks->csw;
230 } else {
231 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
232 }
233
234 if (callbacks->state_update != NULL) {
235 sched_perfcontrol_state_update = callbacks->state_update;
236 } else {
237 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
238 }
239 }
240
241 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
242 if (callbacks->deadline_passed != NULL) {
243 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
244 } else {
245 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
246 }
247 }
248
249 if (callbacks->offcore != NULL) {
250 sched_perfcontrol_offcore = callbacks->offcore;
251 } else {
252 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
253 }
254
255 if (callbacks->context_switch != NULL) {
256 sched_perfcontrol_switch = callbacks->context_switch;
257 } else {
258 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
259 }
260
261 if (callbacks->oncore != NULL) {
262 sched_perfcontrol_oncore = callbacks->oncore;
263 } else {
264 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
265 }
266
267 if (callbacks->max_runnable_latency != NULL) {
268 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
269 } else {
270 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
271 }
272
273 if (callbacks->work_interval_notify != NULL) {
274 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
275 } else {
276 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
277 }
278 } else {
279 /* reset to defaults */
280 #if CONFIG_THREAD_GROUPS
281 thread_group_resync(FALSE);
282 #endif
283 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
284 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
285 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
286 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
287 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
288 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
289 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
290 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
291 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
292 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
293 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
294 sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default;
295 sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default;
296 }
297 }
298
299
300 static void
machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data * data,thread_t thread,uint64_t same_pri_latency)301 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
302 thread_t thread,
303 uint64_t same_pri_latency)
304 {
305 bzero(data, sizeof(struct perfcontrol_thread_data));
306 data->perfctl_class = thread_get_perfcontrol_class(thread);
307 data->energy_estimate_nj = 0;
308 data->thread_id = thread->thread_id;
309 #if CONFIG_THREAD_GROUPS
310 struct thread_group *tg = thread_group_get(thread);
311 data->thread_group_id = thread_group_get_id(tg);
312 data->thread_group_data = thread_group_get_machine_data(tg);
313 #endif
314 data->scheduling_latency_at_same_basepri = same_pri_latency;
315 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
316 }
317
318 static void
machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters * cpu_counters)319 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
320 {
321 #if MONOTONIC
322 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
323 #else /* MONOTONIC */
324 cpu_counters->instructions = 0;
325 cpu_counters->cycles = 0;
326 #endif /* !MONOTONIC */
327 }
328
329 int perfcontrol_callout_stats_enabled = 0;
330 static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
331 static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
332
333 #if MONOTONIC
334 static inline
335 bool
perfcontrol_callout_counters_begin(uint64_t * counters)336 perfcontrol_callout_counters_begin(uint64_t *counters)
337 {
338 if (!perfcontrol_callout_stats_enabled) {
339 return false;
340 }
341 mt_fixed_counts(counters);
342 return true;
343 }
344
345 static inline
346 void
perfcontrol_callout_counters_end(uint64_t * start_counters,perfcontrol_callout_type_t type)347 perfcontrol_callout_counters_end(uint64_t *start_counters,
348 perfcontrol_callout_type_t type)
349 {
350 uint64_t end_counters[MT_CORE_NFIXED];
351 mt_fixed_counts(end_counters);
352 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
353 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed);
354 #ifdef MT_CORE_INSTRS
355 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
356 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed);
357 #endif /* defined(MT_CORE_INSTRS) */
358 os_atomic_inc(&perfcontrol_callout_count[type], relaxed);
359 }
360 #endif /* MONOTONIC */
361
362 uint64_t
perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,perfcontrol_callout_stat_t stat)363 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
364 perfcontrol_callout_stat_t stat)
365 {
366 if (!perfcontrol_callout_stats_enabled) {
367 return 0;
368 }
369 return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) /
370 os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed);
371 }
372
373
374 #if CONFIG_SCHED_EDGE
375
376 /*
377 * The Edge scheduler allows the performance controller to update properties about the
378 * threads as part of the callouts. These properties typically include shared cluster
379 * resource usage. This allows the scheduler to manage specific threads within the
380 * workload more optimally.
381 */
382 static void
sched_perfcontrol_thread_flags_update(thread_t thread,struct perfcontrol_thread_data * thread_data,shared_rsrc_policy_agent_t agent)383 sched_perfcontrol_thread_flags_update(thread_t thread,
384 struct perfcontrol_thread_data *thread_data,
385 shared_rsrc_policy_agent_t agent)
386 {
387 kern_return_t kr = KERN_SUCCESS;
388 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
389 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_RR) {
390 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
391 } else {
392 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, agent);
393 }
394 }
395 if (thread_data->thread_flags_mask & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
396 if (thread_data->thread_flags & PERFCTL_THREAD_FLAGS_MASK_CLUSTER_SHARED_RSRC_NATIVE_FIRST) {
397 kr = thread_shared_rsrc_policy_set(thread, 0, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
398 } else {
399 kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST, agent);
400 }
401 }
402 /*
403 * The thread_shared_rsrc_policy_* routines only fail if the performance controller is
404 * attempting to double set/clear a policy on the thread.
405 */
406 assert(kr == KERN_SUCCESS);
407 }
408
409 #endif /* CONFIG_SCHED_EDGE */
410
411 void
machine_switch_perfcontrol_context(perfcontrol_event event,uint64_t timestamp,uint32_t flags,uint64_t new_thread_same_pri_latency,thread_t old,thread_t new)412 machine_switch_perfcontrol_context(perfcontrol_event event,
413 uint64_t timestamp,
414 uint32_t flags,
415 uint64_t new_thread_same_pri_latency,
416 thread_t old,
417 thread_t new)
418 {
419
420 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
421 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
422 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
423 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
424 }
425
426 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
427 uint32_t cpu_id = (uint32_t)cpu_number();
428 struct perfcontrol_cpu_counters cpu_counters;
429 struct perfcontrol_thread_data offcore, oncore;
430 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
431 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
432 new_thread_same_pri_latency);
433 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
434
435 #if MONOTONIC
436 uint64_t counters[MT_CORE_NFIXED];
437 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
438 #endif /* MONOTONIC */
439 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
440 &offcore, &oncore, &cpu_counters, NULL);
441 #if MONOTONIC
442 if (ctrs_enabled) {
443 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
444 }
445 #endif /* MONOTONIC */
446
447 #if __arm64__
448 old->machine.energy_estimate_nj += offcore.energy_estimate_nj;
449 new->machine.energy_estimate_nj += oncore.energy_estimate_nj;
450 #endif
451
452 #if CONFIG_SCHED_EDGE
453 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled) {
454 sched_perfcontrol_thread_flags_update(old, &offcore, SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW);
455 }
456 #endif /* CONFIG_SCHED_EDGE */
457 }
458 }
459
460 void
machine_switch_perfcontrol_state_update(perfcontrol_event event,uint64_t timestamp,uint32_t flags,thread_t thread)461 machine_switch_perfcontrol_state_update(perfcontrol_event event,
462 uint64_t timestamp,
463 uint32_t flags,
464 thread_t thread)
465 {
466
467 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) {
468 return;
469 }
470 uint32_t cpu_id = (uint32_t)cpu_number();
471 struct perfcontrol_thread_data data;
472 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
473
474 #if MONOTONIC
475 uint64_t counters[MT_CORE_NFIXED];
476 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
477 #endif /* MONOTONIC */
478 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
479 &data, NULL);
480 #if MONOTONIC
481 if (ctrs_enabled) {
482 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
483 }
484 #endif /* MONOTONIC */
485
486 #if __arm64__
487 thread->machine.energy_estimate_nj += data.energy_estimate_nj;
488 #endif
489
490 #if CONFIG_SCHED_EDGE
491 if (sched_perfcontrol_thread_shared_rsrc_flags_enabled && (event == QUANTUM_EXPIRY)) {
492 sched_perfcontrol_thread_flags_update(thread, &data, SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM);
493 } else {
494 assert(data.thread_flags_mask == 0);
495 }
496 #endif /* CONFIG_SCHED_EDGE */
497 }
498
499 void
machine_thread_going_on_core(thread_t new_thread,thread_urgency_t urgency,uint64_t sched_latency,uint64_t same_pri_latency,uint64_t timestamp)500 machine_thread_going_on_core(thread_t new_thread,
501 thread_urgency_t urgency,
502 uint64_t sched_latency,
503 uint64_t same_pri_latency,
504 uint64_t timestamp)
505 {
506 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) {
507 return;
508 }
509 struct going_on_core on_core;
510 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
511
512 on_core.thread_id = new_thread->thread_id;
513 on_core.energy_estimate_nj = 0;
514 on_core.qos_class = (uint16_t)proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
515 on_core.urgency = (uint16_t)urgency;
516 on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
517 on_core.is_kernel_thread = get_threadtask(new_thread) == kernel_task;
518 #if CONFIG_THREAD_GROUPS
519 struct thread_group *tg = thread_group_get(new_thread);
520 on_core.thread_group_id = thread_group_get_id(tg);
521 on_core.thread_group_data = thread_group_get_machine_data(tg);
522 #endif
523 on_core.scheduling_latency = sched_latency;
524 on_core.start_time = timestamp;
525 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
526
527 #if MONOTONIC
528 uint64_t counters[MT_CORE_NFIXED];
529 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
530 #endif /* MONOTONIC */
531 sched_perfcontrol_oncore(state, &on_core);
532 #if MONOTONIC
533 if (ctrs_enabled) {
534 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
535 }
536 #endif /* MONOTONIC */
537
538 #if __arm64__
539 new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj;
540 #endif
541 }
542
543 void
machine_thread_going_off_core(thread_t old_thread,boolean_t thread_terminating,uint64_t last_dispatch,__unused boolean_t thread_runnable)544 machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating,
545 uint64_t last_dispatch, __unused boolean_t thread_runnable)
546 {
547 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) {
548 return;
549 }
550 struct going_off_core off_core;
551 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
552
553 off_core.thread_id = old_thread->thread_id;
554 off_core.energy_estimate_nj = 0;
555 off_core.end_time = last_dispatch;
556 #if CONFIG_THREAD_GROUPS
557 struct thread_group *tg = thread_group_get(old_thread);
558 off_core.thread_group_id = thread_group_get_id(tg);
559 off_core.thread_group_data = thread_group_get_machine_data(tg);
560 #endif
561
562 #if MONOTONIC
563 uint64_t counters[MT_CORE_NFIXED];
564 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
565 #endif /* MONOTONIC */
566 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
567 #if MONOTONIC
568 if (ctrs_enabled) {
569 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
570 }
571 #endif /* MONOTONIC */
572
573 #if __arm64__
574 old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj;
575 #endif
576 }
577
578 #if CONFIG_THREAD_GROUPS
579 void
machine_thread_group_init(struct thread_group * tg)580 machine_thread_group_init(struct thread_group *tg)
581 {
582 if (sched_perfcontrol_thread_group_init == sched_perfcontrol_thread_group_default) {
583 return;
584 }
585 struct thread_group_data data;
586 data.thread_group_id = thread_group_get_id(tg);
587 data.thread_group_data = thread_group_get_machine_data(tg);
588 data.thread_group_size = thread_group_machine_data_size();
589 data.thread_group_flags = thread_group_get_flags(tg);
590 sched_perfcontrol_thread_group_init(&data);
591 }
592
593 void
machine_thread_group_deinit(struct thread_group * tg)594 machine_thread_group_deinit(struct thread_group *tg)
595 {
596 if (sched_perfcontrol_thread_group_deinit == sched_perfcontrol_thread_group_default) {
597 return;
598 }
599 struct thread_group_data data;
600 data.thread_group_id = thread_group_get_id(tg);
601 data.thread_group_data = thread_group_get_machine_data(tg);
602 data.thread_group_size = thread_group_machine_data_size();
603 data.thread_group_flags = thread_group_get_flags(tg);
604 sched_perfcontrol_thread_group_deinit(&data);
605 }
606
607 void
machine_thread_group_flags_update(struct thread_group * tg,uint32_t flags)608 machine_thread_group_flags_update(struct thread_group *tg, uint32_t flags)
609 {
610 if (sched_perfcontrol_thread_group_flags_update == sched_perfcontrol_thread_group_default) {
611 return;
612 }
613 struct thread_group_data data;
614 data.thread_group_id = thread_group_get_id(tg);
615 data.thread_group_data = thread_group_get_machine_data(tg);
616 data.thread_group_size = thread_group_machine_data_size();
617 data.thread_group_flags = flags;
618 sched_perfcontrol_thread_group_flags_update(&data);
619 }
620
621 void
machine_thread_group_blocked(struct thread_group * blocked_tg,struct thread_group * blocking_tg,uint32_t flags,thread_t blocked_thread)622 machine_thread_group_blocked(struct thread_group *blocked_tg,
623 struct thread_group *blocking_tg,
624 uint32_t flags,
625 thread_t blocked_thread)
626 {
627 if (sched_perfcontrol_thread_group_blocked == sched_perfcontrol_thread_group_blocked_default) {
628 return;
629 }
630
631 spl_t s = splsched();
632
633 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(blocked_thread);
634 struct thread_group_data blocked_data;
635 assert(blocked_tg != NULL);
636
637 blocked_data.thread_group_id = thread_group_get_id(blocked_tg);
638 blocked_data.thread_group_data = thread_group_get_machine_data(blocked_tg);
639 blocked_data.thread_group_size = thread_group_machine_data_size();
640
641 if (blocking_tg == NULL) {
642 /*
643 * For special cases such as the render server, the blocking TG is a
644 * well known TG. Only in that case, the blocking_tg should be NULL.
645 */
646 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
647 sched_perfcontrol_thread_group_blocked(&blocked_data, NULL, flags, state);
648 } else {
649 struct thread_group_data blocking_data;
650 blocking_data.thread_group_id = thread_group_get_id(blocking_tg);
651 blocking_data.thread_group_data = thread_group_get_machine_data(blocking_tg);
652 blocking_data.thread_group_size = thread_group_machine_data_size();
653 sched_perfcontrol_thread_group_blocked(&blocked_data, &blocking_data, flags, state);
654 }
655 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_START,
656 thread_tid(blocked_thread), thread_group_get_id(blocked_tg),
657 blocking_tg ? thread_group_get_id(blocking_tg) : THREAD_GROUP_INVALID,
658 flags);
659
660 splx(s);
661 }
662
663 void
machine_thread_group_unblocked(struct thread_group * unblocked_tg,struct thread_group * unblocking_tg,uint32_t flags,thread_t unblocked_thread)664 machine_thread_group_unblocked(struct thread_group *unblocked_tg,
665 struct thread_group *unblocking_tg,
666 uint32_t flags,
667 thread_t unblocked_thread)
668 {
669 if (sched_perfcontrol_thread_group_unblocked == sched_perfcontrol_thread_group_unblocked_default) {
670 return;
671 }
672
673 spl_t s = splsched();
674
675 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(unblocked_thread);
676 struct thread_group_data unblocked_data;
677 assert(unblocked_tg != NULL);
678
679 unblocked_data.thread_group_id = thread_group_get_id(unblocked_tg);
680 unblocked_data.thread_group_data = thread_group_get_machine_data(unblocked_tg);
681 unblocked_data.thread_group_size = thread_group_machine_data_size();
682
683 if (unblocking_tg == NULL) {
684 /*
685 * For special cases such as the render server, the unblocking TG is a
686 * well known TG. Only in that case, the unblocking_tg should be NULL.
687 */
688 assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER);
689 sched_perfcontrol_thread_group_unblocked(&unblocked_data, NULL, flags, state);
690 } else {
691 struct thread_group_data unblocking_data;
692 unblocking_data.thread_group_id = thread_group_get_id(unblocking_tg);
693 unblocking_data.thread_group_data = thread_group_get_machine_data(unblocking_tg);
694 unblocking_data.thread_group_size = thread_group_machine_data_size();
695 sched_perfcontrol_thread_group_unblocked(&unblocked_data, &unblocking_data, flags, state);
696 }
697 KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_END,
698 thread_tid(unblocked_thread), thread_group_get_id(unblocked_tg),
699 unblocking_tg ? thread_group_get_id(unblocking_tg) : THREAD_GROUP_INVALID,
700 flags);
701
702 splx(s);
703 }
704
705 #endif /* CONFIG_THREAD_GROUPS */
706
707 void
machine_max_runnable_latency(uint64_t bg_max_latency,uint64_t default_max_latency,uint64_t realtime_max_latency)708 machine_max_runnable_latency(uint64_t bg_max_latency,
709 uint64_t default_max_latency,
710 uint64_t realtime_max_latency)
711 {
712 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) {
713 return;
714 }
715 struct perfcontrol_max_runnable_latency latencies = {
716 .max_scheduling_latencies = {
717 [THREAD_URGENCY_NONE] = 0,
718 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
719 [THREAD_URGENCY_NORMAL] = default_max_latency,
720 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
721 }
722 };
723
724 sched_perfcontrol_max_runnable_latency(&latencies);
725 }
726
727 void
machine_work_interval_notify(thread_t thread,struct kern_work_interval_args * kwi_args)728 machine_work_interval_notify(thread_t thread,
729 struct kern_work_interval_args* kwi_args)
730 {
731 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) {
732 return;
733 }
734 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
735 struct perfcontrol_work_interval work_interval = {
736 .thread_id = thread->thread_id,
737 .qos_class = (uint16_t)proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
738 .urgency = kwi_args->urgency,
739 .flags = kwi_args->notify_flags,
740 .work_interval_id = kwi_args->work_interval_id,
741 .start = kwi_args->start,
742 .finish = kwi_args->finish,
743 .deadline = kwi_args->deadline,
744 .next_start = kwi_args->next_start,
745 .create_flags = kwi_args->create_flags,
746 };
747 #if CONFIG_THREAD_GROUPS
748 struct thread_group *tg;
749 tg = thread_group_get(thread);
750 work_interval.thread_group_id = thread_group_get_id(tg);
751 work_interval.thread_group_data = thread_group_get_machine_data(tg);
752 #endif
753 sched_perfcontrol_work_interval_notify(state, &work_interval);
754 }
755
756
757 void
machine_perfcontrol_deadline_passed(uint64_t deadline)758 machine_perfcontrol_deadline_passed(uint64_t deadline)
759 {
760 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) {
761 sched_perfcontrol_deadline_passed(deadline);
762 }
763 }
764
765 #if INTERRUPT_MASKED_DEBUG
766 /*
767 * ml_spin_debug_reset()
768 * Reset the timestamp on a thread that has been unscheduled
769 * to avoid false alarms. Alarm will go off if interrupts are held
770 * disabled for too long, starting from now.
771 *
772 * Call ml_get_timebase() directly to prevent extra overhead on newer
773 * platforms that's enabled in DEVELOPMENT kernel configurations.
774 */
775 void
ml_spin_debug_reset(thread_t thread)776 ml_spin_debug_reset(thread_t thread)
777 {
778 if (thread->machine.intmask_timestamp) {
779 thread->machine.intmask_timestamp = ml_get_speculative_timebase();
780 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
781 }
782 }
783
784 /*
785 * ml_spin_debug_clear()
786 * Clear the timestamp and cycle/instruction counts on a thread that
787 * has been unscheduled to avoid false alarms
788 */
789 void
ml_spin_debug_clear(thread_t thread)790 ml_spin_debug_clear(thread_t thread)
791 {
792 thread->machine.intmask_timestamp = 0;
793 thread->machine.intmask_cycles = 0;
794 thread->machine.intmask_instr = 0;
795 }
796
797 /*
798 * ml_spin_debug_clear_self()
799 * Clear the timestamp on the current thread to prevent
800 * false alarms
801 */
802 void
ml_spin_debug_clear_self()803 ml_spin_debug_clear_self()
804 {
805 ml_spin_debug_clear(current_thread());
806 }
807
808 #ifndef KASAN
809 #define PMC_DATA_STRING_SIZE 100
810
811 static uint32_t const interrupt_masked_dbgid = MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_MASKED_EXPIRED) | DBG_FUNC_NONE;
812 static uint32_t const interrupt_handled_dbgid = MACHDBG_CODE(DBG_MACH_SCHED, MACH_INT_HANDLED_EXPIRED) | DBG_FUNC_NONE;
813
814 static void
__ml_trigger_interrupts_disabled_handle(thread_t thread,uint64_t start,uint64_t now,uint64_t timeout,bool is_int_handler)815 __ml_trigger_interrupts_disabled_handle(thread_t thread, uint64_t start, uint64_t now, uint64_t timeout, bool is_int_handler)
816 {
817 mach_timebase_info_data_t timebase;
818 clock_timebase_info(&timebase);
819
820 const uint64_t time_elapsed = now - start;
821 const uint64_t time_elapsed_ns = ((time_elapsed) * timebase.numer) / timebase.denom;
822
823 uint64_t current_cycles = 0, current_instrs = 0;
824
825 #if MONOTONIC
826 if (interrupt_masked_debug_pmc) {
827 mt_cur_cpu_cycles_instrs_speculative(¤t_cycles, ¤t_instrs);
828 }
829 #endif
830
831 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
832 const uint64_t timeout_ns = ((timeout * debug_cpu_performance_degradation_factor) * timebase.numer) / timebase.denom;
833 char pmc_data_string[PMC_DATA_STRING_SIZE] = { '\0' };
834 #if MONOTONIC
835 if (interrupt_masked_debug_pmc) {
836 uint64_t const average_freq = (current_cycles - thread->machine.intmask_cycles) / (time_elapsed_ns / 1000);
837 uint64_t const average_cpi_whole = (current_cycles - thread->machine.intmask_cycles) / (current_instrs - thread->machine.intmask_instr);
838 uint64_t const average_cpi_fractional =
839 (((current_cycles - thread->machine.intmask_cycles) * 100) / (current_instrs - thread->machine.intmask_instr)) % 100;
840
841 snprintf(pmc_data_string, PMC_DATA_STRING_SIZE, ", freq = %llu MHz, CPI = %llu.%llu", average_freq, average_cpi_whole, average_cpi_fractional);
842 }
843 #endif
844
845 if (is_int_handler) {
846 panic("Processing of an interrupt (type = %u, handler address = %p, vector = %p) took %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
847 thread->machine.int_type, (void *)thread->machine.int_handler_addr, (void *)thread->machine.int_vector,
848 time_elapsed_ns, start, now, timeout_ns, pmc_data_string);
849 } else {
850 panic("Interrupts held disabled for %llu nanoseconds (start = %llu, now = %llu, timeout = %llu ns%s)",
851 time_elapsed_ns, start, now, timeout_ns, pmc_data_string);
852 }
853 } else if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_TRACE) {
854 uint64_t const cycles_elapsed = current_cycles - thread->machine.intmask_cycles;
855 uint64_t const instrs_elapsed = current_instrs - thread->machine.intmask_instr;
856
857 if (is_int_handler) {
858 DTRACE_SCHED3(interrupt_handled_dbgid, uint64_t, time_elapsed,
859 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
860
861 if (__improbable(kdebug_debugid_enabled(interrupt_handled_dbgid))) {
862 KDBG(interrupt_handled_dbgid, time_elapsed,
863 cycles_elapsed, instrs_elapsed);
864 }
865 } else {
866 DTRACE_SCHED3(interrupt_masked_dbgid, uint64_t, time_elapsed,
867 uint64_t, cycles_elapsed, uint64_t, instrs_elapsed);
868
869 if (__improbable(kdebug_debugid_enabled(interrupt_masked_dbgid))) {
870 KDBG(interrupt_masked_dbgid, time_elapsed,
871 cycles_elapsed, instrs_elapsed);
872 }
873 }
874 }
875 }
876 #endif
877
878 static inline void
__ml_handle_interrupts_disabled_duration(thread_t thread,uint64_t timeout,bool is_int_handler)879 __ml_handle_interrupts_disabled_duration(thread_t thread, uint64_t timeout, bool is_int_handler)
880 {
881 if (timeout == 0) {
882 return; // 0 means timeout disabled.
883 }
884 uint64_t start, now;
885
886 start = is_int_handler ? thread->machine.inthandler_timestamp : thread->machine.intmask_timestamp;
887 if (start != 0) {
888 now = ml_get_speculative_timebase();
889
890 if ((now - start) > timeout * debug_cpu_performance_degradation_factor) {
891 /*
892 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
893 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
894 */
895 #ifndef KASAN
896 __ml_trigger_interrupts_disabled_handle(thread, start, now, timeout, is_int_handler);
897 #endif
898 }
899
900 if (is_int_handler) {
901 uint64_t const duration = now - start;
902 #if SCHED_PREEMPTION_DISABLE_DEBUG
903 ml_adjust_preemption_disable_time(thread, duration);
904 #endif /* SCHED_PREEMPTION_DISABLE_DEBUG */
905 /*
906 * No need for an atomic add, the only thread modifying
907 * this is ourselves. Other threads querying will just see
908 * either the old or the new value. (This will also just
909 * resolve to regular loads and stores on relevant
910 * platforms.)
911 */
912 uint64_t const old_duration = os_atomic_load_wide(&thread->machine.int_time_mt, relaxed);
913 os_atomic_store_wide(&thread->machine.int_time_mt, old_duration + duration, relaxed);
914 }
915 }
916
917 return;
918 }
919
920 void
ml_handle_interrupts_disabled_duration(thread_t thread)921 ml_handle_interrupts_disabled_duration(thread_t thread)
922 {
923 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), false);
924 }
925
926 void
ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)927 ml_handle_stackshot_interrupt_disabled_duration(thread_t thread)
928 {
929 /* Use MAX() to let the user bump the timeout further if needed */
930 __ml_handle_interrupts_disabled_duration(thread, MAX(os_atomic_load(&stackshot_interrupt_masked_timeout, relaxed), os_atomic_load(&interrupt_masked_timeout, relaxed)), false);
931 }
932
933 void
ml_handle_interrupt_handler_duration(thread_t thread)934 ml_handle_interrupt_handler_duration(thread_t thread)
935 {
936 __ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed), true);
937 }
938
939 #if SCHED_PREEMPTION_DISABLE_DEBUG
940 void
ml_adjust_preemption_disable_time(thread_t thread,int64_t duration)941 ml_adjust_preemption_disable_time(thread_t thread, int64_t duration)
942 {
943 /* We don't want to count interrupt handler duration in preemption disable time. */
944 if (thread->machine.preemption_disable_adj_mt != 0) {
945 /* We don't care *when* preemption was disabled, just for how
946 * long. So to exclude interrupt handling intervals, we
947 * adjust the start time forward. */
948 thread->machine.preemption_disable_adj_mt += duration;
949 }
950 }
951 #endif /* SCHED_PREEMPTION_DISABLE_DEBUG */
952
953 void
ml_irq_debug_start(uintptr_t handler,uintptr_t vector)954 ml_irq_debug_start(uintptr_t handler, uintptr_t vector)
955 {
956 INTERRUPT_MASKED_DEBUG_START(handler, DBG_INTR_TYPE_OTHER);
957 current_thread()->machine.int_vector = (uintptr_t)VM_KERNEL_STRIP_PTR(vector);
958 }
959
960 void
ml_irq_debug_end()961 ml_irq_debug_end()
962 {
963 INTERRUPT_MASKED_DEBUG_END();
964 }
965 #endif // INTERRUPT_MASKED_DEBUG
966
967 #if INTERRUPT_MASKED_DEBUG
968 __attribute__((noinline))
969 static void
ml_interrupt_masked_debug_timestamp(thread_t thread)970 ml_interrupt_masked_debug_timestamp(thread_t thread)
971 {
972 thread->machine.intmask_timestamp = ml_get_speculative_timebase();
973 INTERRUPT_MASKED_DEBUG_CAPTURE_PMC(thread);
974 }
975 #endif
976
977 boolean_t
ml_set_interrupts_enabled(boolean_t enable)978 ml_set_interrupts_enabled(boolean_t enable)
979 {
980 thread_t thread;
981 uint64_t state;
982
983 thread = current_thread();
984
985 #if __arm__
986 #define INTERRUPT_MASK PSR_IRQF
987 state = __builtin_arm_rsr("cpsr");
988 #else
989 #define INTERRUPT_MASK DAIF_IRQF
990 state = __builtin_arm_rsr("DAIF");
991 #endif
992 if (enable && (state & INTERRUPT_MASK)) {
993 assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context
994 #if INTERRUPT_MASKED_DEBUG
995 if (interrupt_masked_debug_mode) {
996 // Interrupts are currently masked, we will enable them (after finishing this check)
997 if (stackshot_active()) {
998 ml_handle_stackshot_interrupt_disabled_duration(thread);
999 } else {
1000 ml_handle_interrupts_disabled_duration(thread);
1001 }
1002 thread->machine.intmask_timestamp = 0;
1003 thread->machine.intmask_cycles = 0;
1004 thread->machine.intmask_instr = 0;
1005 }
1006 #endif // INTERRUPT_MASKED_DEBUG
1007 if (get_preemption_level() == 0) {
1008 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
1009 #if __ARM_USER_PROTECT__
1010 uintptr_t up = arm_user_protect_begin(thread);
1011 #endif
1012 ast_taken_kernel();
1013 #if __ARM_USER_PROTECT__
1014 arm_user_protect_end(thread, up, FALSE);
1015 #endif
1016 }
1017 }
1018 #if __arm__
1019 __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
1020 #else
1021 __builtin_arm_wsr("DAIFClr", DAIFSC_STANDARD_DISABLE);
1022 #endif
1023 } else if (!enable && ((state & INTERRUPT_MASK) == 0)) {
1024 #if __arm__
1025 __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
1026 #else
1027 __builtin_arm_wsr("DAIFSet", DAIFSC_STANDARD_DISABLE);
1028 #endif
1029 #if INTERRUPT_MASKED_DEBUG
1030 if (__improbable(interrupt_masked_debug_mode)) {
1031 // Interrupts were enabled, we just masked them
1032 ml_interrupt_masked_debug_timestamp(thread);
1033 }
1034 #endif
1035 }
1036 return (state & INTERRUPT_MASK) == 0;
1037 }
1038
1039 boolean_t
ml_early_set_interrupts_enabled(boolean_t enable)1040 ml_early_set_interrupts_enabled(boolean_t enable)
1041 {
1042 return ml_set_interrupts_enabled(enable);
1043 }
1044
1045 /*
1046 * Routine: ml_at_interrupt_context
1047 * Function: Check if running at interrupt context
1048 */
1049 boolean_t
ml_at_interrupt_context(void)1050 ml_at_interrupt_context(void)
1051 {
1052 /* Do not use a stack-based check here, as the top-level exception handler
1053 * is free to use some other stack besides the per-CPU interrupt stack.
1054 * Interrupts should always be disabled if we're at interrupt context.
1055 * Check that first, as we may be in a preemptible non-interrupt context, in
1056 * which case we could be migrated to a different CPU between obtaining
1057 * the per-cpu data pointer and loading cpu_int_state. We then might end
1058 * up checking the interrupt state of a different CPU, resulting in a false
1059 * positive. But if interrupts are disabled, we also know we cannot be
1060 * preempted. */
1061 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL);
1062 }
1063
1064 vm_offset_t
ml_stack_remaining(void)1065 ml_stack_remaining(void)
1066 {
1067 uintptr_t local = (uintptr_t) &local;
1068 vm_offset_t intstack_top_ptr;
1069
1070 /* Since this is a stack-based check, we don't need to worry about
1071 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
1072 * then the sp should never be within any CPU's interrupt stack unless
1073 * something has gone horribly wrong. */
1074 intstack_top_ptr = getCpuDatap()->intstack_top;
1075 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
1076 return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE);
1077 } else {
1078 return local - current_thread()->kernel_stack;
1079 }
1080 }
1081
1082 static boolean_t ml_quiescing = FALSE;
1083
1084 void
ml_set_is_quiescing(boolean_t quiescing)1085 ml_set_is_quiescing(boolean_t quiescing)
1086 {
1087 ml_quiescing = quiescing;
1088 os_atomic_thread_fence(release);
1089 }
1090
1091 boolean_t
ml_is_quiescing(void)1092 ml_is_quiescing(void)
1093 {
1094 os_atomic_thread_fence(acquire);
1095 return ml_quiescing;
1096 }
1097
1098 uint64_t
ml_get_booter_memory_size(void)1099 ml_get_booter_memory_size(void)
1100 {
1101 uint64_t size;
1102 uint64_t roundsize = 512 * 1024 * 1024ULL;
1103 size = BootArgs->memSizeActual;
1104 if (!size) {
1105 size = BootArgs->memSize;
1106 if (size < (2 * roundsize)) {
1107 roundsize >>= 1;
1108 }
1109 size = (size + roundsize - 1) & ~(roundsize - 1);
1110 }
1111
1112 size -= BootArgs->memSize;
1113
1114 return size;
1115 }
1116
1117 uint64_t
ml_get_abstime_offset(void)1118 ml_get_abstime_offset(void)
1119 {
1120 return rtclock_base_abstime;
1121 }
1122
1123 uint64_t
ml_get_conttime_offset(void)1124 ml_get_conttime_offset(void)
1125 {
1126 #if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1127 return hwclock_conttime_offset;
1128 #elif HAS_CONTINUOUS_HWCLOCK
1129 return 0;
1130 #else
1131 return rtclock_base_abstime + mach_absolutetime_asleep;
1132 #endif
1133 }
1134
1135 uint64_t
ml_get_time_since_reset(void)1136 ml_get_time_since_reset(void)
1137 {
1138 #if HAS_CONTINUOUS_HWCLOCK
1139 if (wake_conttime == UINT64_MAX) {
1140 return UINT64_MAX;
1141 } else {
1142 return mach_continuous_time() - wake_conttime;
1143 }
1144 #else
1145 /* The timebase resets across S2R, so just return the raw value. */
1146 return ml_get_hwclock();
1147 #endif
1148 }
1149
1150 void
ml_set_reset_time(__unused uint64_t wake_time)1151 ml_set_reset_time(__unused uint64_t wake_time)
1152 {
1153 #if HAS_CONTINUOUS_HWCLOCK
1154 wake_conttime = wake_time;
1155 #endif
1156 }
1157
1158 uint64_t
ml_get_conttime_wake_time(void)1159 ml_get_conttime_wake_time(void)
1160 {
1161 #if HAS_CONTINUOUS_HWCLOCK
1162 /*
1163 * For now, we will reconstitute the timebase value from
1164 * cpu_timebase_init and use it as the wake time.
1165 */
1166 return wake_abstime - ml_get_abstime_offset();
1167 #else /* HAS_CONTINOUS_HWCLOCK */
1168 /* The wake time is simply our continuous time offset. */
1169 return ml_get_conttime_offset();
1170 #endif /* HAS_CONTINOUS_HWCLOCK */
1171 }
1172
1173 /*
1174 * ml_snoop_thread_is_on_core(thread_t thread)
1175 * Check if the given thread is currently on core. This function does not take
1176 * locks, disable preemption, or otherwise guarantee synchronization. The
1177 * result should be considered advisory.
1178 */
1179 bool
ml_snoop_thread_is_on_core(thread_t thread)1180 ml_snoop_thread_is_on_core(thread_t thread)
1181 {
1182 unsigned int cur_cpu_num = 0;
1183 const unsigned int max_cpu_id = ml_get_max_cpu_number();
1184
1185 for (cur_cpu_num = 0; cur_cpu_num <= max_cpu_id; cur_cpu_num++) {
1186 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) {
1187 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) {
1188 return true;
1189 }
1190 }
1191 }
1192
1193 return false;
1194 }
1195
1196 int
ml_early_cpu_max_number(void)1197 ml_early_cpu_max_number(void)
1198 {
1199 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1200 return ml_get_max_cpu_number();
1201 }
1202
1203 void
ml_set_max_cpus(unsigned int max_cpus __unused)1204 ml_set_max_cpus(unsigned int max_cpus __unused)
1205 {
1206 lck_mtx_lock(&max_cpus_lock);
1207 if (max_cpus_initialized != MAX_CPUS_SET) {
1208 if (max_cpus_initialized == MAX_CPUS_WAIT) {
1209 thread_wakeup((event_t) &max_cpus_initialized);
1210 }
1211 max_cpus_initialized = MAX_CPUS_SET;
1212 }
1213 lck_mtx_unlock(&max_cpus_lock);
1214 }
1215
1216 unsigned int
ml_wait_max_cpus(void)1217 ml_wait_max_cpus(void)
1218 {
1219 assert(lockdown_done);
1220 lck_mtx_lock(&max_cpus_lock);
1221 while (max_cpus_initialized != MAX_CPUS_SET) {
1222 max_cpus_initialized = MAX_CPUS_WAIT;
1223 lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT);
1224 }
1225 lck_mtx_unlock(&max_cpus_lock);
1226 return machine_info.max_cpus;
1227 }
1228
1229 void
ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info,cluster_type_t cluster_type)1230 ml_cpu_get_info_type(ml_cpu_info_t * ml_cpu_info, cluster_type_t cluster_type)
1231 {
1232 cache_info_t *cpuid_cache_info;
1233
1234 cpuid_cache_info = cache_info_type(cluster_type);
1235 ml_cpu_info->vector_unit = 0;
1236 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
1237 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
1238 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
1239
1240 #if (__ARM_ARCH__ >= 7)
1241 ml_cpu_info->l2_settings = 1;
1242 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
1243 #else
1244 ml_cpu_info->l2_settings = 0;
1245 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
1246 #endif
1247 ml_cpu_info->l3_settings = 0;
1248 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
1249 }
1250
1251 /*
1252 * Routine: ml_cpu_get_info
1253 * Function: Fill out the ml_cpu_info_t structure with parameters associated
1254 * with the boot cluster.
1255 */
1256 void
ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)1257 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
1258 {
1259 ml_cpu_get_info_type(ml_cpu_info, ml_get_topology_info()->boot_cpu->cluster_type);
1260 }
1261
1262 unsigned int
ml_get_cpu_number_type(cluster_type_t cluster_type,bool logical,bool available)1263 ml_get_cpu_number_type(cluster_type_t cluster_type, bool logical, bool available)
1264 {
1265 /*
1266 * At present no supported ARM system features SMT, so the "logical"
1267 * parameter doesn't have an impact on the result.
1268 */
1269 if (logical && available) {
1270 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1271 } else if (logical && !available) {
1272 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1273 } else if (!logical && available) {
1274 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed);
1275 } else {
1276 return ml_get_topology_info()->cluster_type_num_cpus[cluster_type];
1277 }
1278 }
1279
1280 unsigned int
ml_cpu_cache_sharing(unsigned int level,cluster_type_t cluster_type,bool include_all_cpu_types __unused)1281 ml_cpu_cache_sharing(unsigned int level, cluster_type_t cluster_type, bool include_all_cpu_types __unused)
1282 {
1283 unsigned int cpu_number = 0, cluster_types = 0;
1284
1285 /*
1286 * Level 0 corresponds to main memory, which is shared across all cores.
1287 */
1288 if (level == 0) {
1289 return ml_get_topology_info()->num_cpus;
1290 }
1291
1292 /*
1293 * At present no supported ARM system features more than 2 levels of caches.
1294 */
1295 if (level > 2) {
1296 return 0;
1297 }
1298
1299 /*
1300 * L1 caches are always per core.
1301 */
1302 if (level == 1) {
1303 return 1;
1304 }
1305
1306 cluster_types = (1 << cluster_type);
1307
1308 /*
1309 * Traverse clusters until we find the one(s) of the desired type(s).
1310 */
1311 for (int i = 0; i < ml_get_topology_info()->num_clusters; i++) {
1312 ml_topology_cluster_t *cluster = &ml_get_topology_info()->clusters[i];
1313 if ((1 << cluster->cluster_type) & cluster_types) {
1314 cpu_number += cluster->num_cpus;
1315 cluster_types &= ~(1 << cluster->cluster_type);
1316 if (!cluster_types) {
1317 break;
1318 }
1319 }
1320 }
1321
1322 return cpu_number;
1323 }
1324
1325 unsigned int
ml_get_cpu_types(void)1326 ml_get_cpu_types(void)
1327 {
1328 return ml_get_topology_info()->cluster_types;
1329 }
1330
1331 void
machine_conf(void)1332 machine_conf(void)
1333 {
1334 /*
1335 * This is known to be inaccurate. mem_size should always be capped at 2 GB
1336 */
1337 machine_info.memory_size = (uint32_t)mem_size;
1338
1339 // rdar://problem/58285685: Userland expects _COMM_PAGE_LOGICAL_CPUS to report
1340 // (max_cpu_id+1) rather than a literal *count* of logical CPUs.
1341 unsigned int num_cpus = ml_get_topology_info()->max_cpu_id + 1;
1342 machine_info.max_cpus = num_cpus;
1343 machine_info.physical_cpu_max = num_cpus;
1344 machine_info.logical_cpu_max = num_cpus;
1345 }
1346
1347 void
machine_init(void)1348 machine_init(void)
1349 {
1350 debug_log_init();
1351 clock_config();
1352 is_clock_configured = TRUE;
1353 if (debug_enabled) {
1354 pmap_map_globals();
1355 }
1356 ml_lockdown_init();
1357 }
1358