1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/percpu.h>
39 #include <kern/thread.h>
40 #include <kern/timer_queue.h>
41 #include <arm/cpu_data.h>
42 #include <arm/cpuid.h>
43 #include <arm/caches_internal.h>
44 #include <arm/cpu_data_internal.h>
45 #include <arm/cpu_internal.h>
46 #include <arm/misc_protos.h>
47 #include <arm/machine_cpu.h>
48 #include <arm/rtclock.h>
49 #include <arm64/proc_reg.h>
50 #include <mach/processor_info.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_map.h>
54 #include <pexpert/arm/protos.h>
55 #include <pexpert/device_tree.h>
56 #include <sys/kdebug.h>
57 #include <arm/machine_routines.h>
58
59 #include <machine/atomic.h>
60
61 #include <san/kasan.h>
62
63 #if KPC
64 #include <kern/kpc.h>
65 #endif
66
67 #if MONOTONIC
68 #include <kern/monotonic.h>
69 #endif /* MONOTONIC */
70
71 #if KPERF
72 #include <kperf/kptimer.h>
73 #endif /* KPERF */
74
75 #if HIBERNATION
76 #include <IOKit/IOPlatformExpert.h>
77 #include <IOKit/IOHibernatePrivate.h>
78 #endif /* HIBERNATION */
79
80
81 #include <libkern/section_keywords.h>
82
83 extern boolean_t idle_enable;
84 extern uint64_t wake_abstime;
85
86 #if WITH_CLASSIC_S2R
87 void sleep_token_buffer_init(void);
88 #endif
89
90 extern uintptr_t resume_idle_cpu;
91 extern uintptr_t start_cpu;
92 vm_address_t start_cpu_paddr;
93
94 #if __ARM_KERNEL_PROTECT__
95 extern void exc_vectors_table;
96 #endif /* __ARM_KERNEL_PROTECT__ */
97
98 #if APPLEVIRTUALPLATFORM
99 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep, unsigned int cpu, uint64_t entry_pa);
100 #else
101 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep);
102 #endif
103 extern void arm64_force_wfi_clock_gate(void);
104 #if defined(APPLETYPHOON)
105 // <rdar://problem/15827409>
106 extern void typhoon_prepare_for_wfi(void);
107 extern void typhoon_return_from_wfi(void);
108 #endif
109
110 #if HAS_RETENTION_STATE
111 extern void arm64_retention_wfi(void);
112 #endif
113
114 sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
115 .tcr_el1 = TCR_EL1_BOOT,
116 };
117
118 // wfi - wfi mode
119 // 0 : disabled
120 // 1 : normal
121 // 2 : overhead simulation (delay & flags)
122 TUNABLE(unsigned int, wfi, "wfi", 1);
123 #if DEVELOPMENT || DEBUG
124
125 // wfi_flags
126 // 1 << 0 : flush L1s
127 // 1 << 1 : flush TLBs
128 static int wfi_flags = 0;
129
130 // wfi_delay - delay ticks after wfi exit
131 static uint64_t wfi_delay = 0;
132
133 #endif /* DEVELOPMENT || DEBUG */
134
135 #define CPUPM_IDLE_WFE 0x5310300
136 #define CPUPM_IDLE_TIMER_WFE 0x5310304
137
138 /* When recommended, issue WFE with [FI]IRQ unmasked in the idle
139 * loop. The default.
140 */
141 uint32_t idle_proximate_io_wfe_unmasked = 1;
142 #if DEVELOPMENT || DEBUG
143 uint32_t idle_proximate_timer_wfe = 1;
144 uint32_t idle_proximate_io_wfe_masked = 0;
145 #else
146 /* Issue WFE in lieu of WFI when awaiting a proximate timer. */
147 static uint32_t idle_proximate_timer_wfe = 1;
148 /* When recommended, issue WFE with [FI]IRQ masked in the idle loop.
149 * Non-default, retained for experimentation.
150 */
151 static uint32_t idle_proximate_io_wfe_masked = 0;
152 #endif
153
154 #if __ARM_GLOBAL_SLEEP_BIT__
155 volatile boolean_t arm64_stall_sleep = TRUE;
156 #endif
157
158 #if WITH_CLASSIC_S2R
159 /*
160 * These must be aligned to avoid issues with calling bcopy_phys on them before
161 * we are done with pmap initialization.
162 */
163 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
164 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
165 #endif
166
167 #if WITH_CLASSIC_S2R
168 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
169 #endif
170 static boolean_t coresight_debug_enabled = FALSE;
171
172 #if defined(CONFIG_XNUPOST)
173 void arm64_ipi_test_callback(void *);
174 void arm64_immediate_ipi_test_callback(void *);
175
176 void
arm64_ipi_test_callback(void * parm)177 arm64_ipi_test_callback(void *parm)
178 {
179 volatile uint64_t *ipi_test_data = parm;
180 cpu_data_t *cpu_data;
181
182 cpu_data = getCpuDatap();
183
184 *ipi_test_data = cpu_data->cpu_number;
185 }
186
187 void
arm64_immediate_ipi_test_callback(void * parm)188 arm64_immediate_ipi_test_callback(void *parm)
189 {
190 volatile uint64_t *ipi_test_data = parm;
191 cpu_data_t *cpu_data;
192
193 cpu_data = getCpuDatap();
194
195 *ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
196 }
197
198 uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
199
200 MACHINE_TIMEOUT(arm64_ipi_test_timeout, "arm64-ipi-test", 100, MACHINE_TIMEOUT_UNIT_MSEC, NULL);
201
202 void
arm64_ipi_test()203 arm64_ipi_test()
204 {
205 volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
206 uint64_t timeout_ms = os_atomic_load(&arm64_ipi_test_timeout, relaxed);
207 uint64_t then, now, delta;
208 int current_cpu_number = getCpuDatap()->cpu_number;
209
210 /*
211 * probably the only way to have this on most systems is with the
212 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
213 * IPI is not available
214 */
215 if (real_ncpus == 1) {
216 return;
217 }
218
219 const unsigned int max_cpu_id = ml_get_max_cpu_number();
220 for (unsigned int i = 0; i <= max_cpu_id; ++i) {
221 ipi_test_data = &arm64_ipi_test_data[i];
222 immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
223 *ipi_test_data = ~i;
224 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
225 if (error != KERN_SUCCESS) {
226 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
227 }
228
229 while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
230 (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
231 now = mach_absolute_time();
232 absolutetime_to_nanoseconds(now - then, &delta);
233 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
234 panic("CPU %d was unable to immediate-IPI CPU %u within %lldms", current_cpu_number, i, timeout_ms);
235 }
236 }
237
238 if (error != KERN_SUCCESS) {
239 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
240 }
241
242 then = mach_absolute_time();
243
244 while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
245 now = mach_absolute_time();
246 absolutetime_to_nanoseconds(now - then, &delta);
247 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
248 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %lldms, responses: %llx, %llx",
249 current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
250 }
251 }
252 }
253 }
254 #endif /* defined(CONFIG_XNUPOST) */
255
256 static void
configure_coresight_registers(cpu_data_t * cdp)257 configure_coresight_registers(cpu_data_t *cdp)
258 {
259 int i;
260
261 assert(cdp);
262 vm_offset_t coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs;
263
264 /*
265 * ARMv8 coresight registers are optional. If the device tree did not
266 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
267 * or coresight_regs (from the new "coresight-reg" property), assume that
268 * coresight registers are not supported.
269 */
270 if (cdp->cpu_regmap_paddr || coresight_regs) {
271 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
272 if (i == CORESIGHT_CTI) {
273 continue;
274 }
275 /* Skip debug-only registers on production chips */
276 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
277 continue;
278 }
279
280 if (!cdp->coresight_base[i]) {
281 if (coresight_regs) {
282 cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i);
283 } else {
284 uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
285 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
286 }
287 }
288 /* Unlock EDLAR, CTILAR, PMLAR */
289 if (i != CORESIGHT_UTT) {
290 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
291 }
292 }
293 }
294 }
295
296
297 /*
298 * Routine: cpu_bootstrap
299 * Function:
300 */
301 void
cpu_bootstrap(void)302 cpu_bootstrap(void)
303 {
304 }
305
306 /*
307 * Routine: cpu_sleep
308 * Function:
309 */
310 void
cpu_sleep(void)311 cpu_sleep(void)
312 {
313 cpu_data_t *cpu_data_ptr = getCpuDatap();
314
315 cpu_data_ptr->cpu_active_thread = current_thread();
316 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
317 cpu_data_ptr->cpu_flags |= SleepState;
318
319 if (cpu_data_ptr->cpu_user_debug != NULL) {
320 arm_debug_set(NULL);
321 }
322
323 #if KPC
324 kpc_idle();
325 #endif /* KPC */
326 #if MONOTONIC
327 mt_cpu_down(cpu_data_ptr);
328 #endif /* MONOTONIC */
329 #if KPERF
330 kptimer_stop_curcpu();
331 #endif /* KPERF */
332
333 CleanPoC_Dcache();
334
335 #if USE_APPLEARMSMP
336 if (ml_is_quiescing()) {
337 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
338 } else {
339 bool deep_sleep = PE_cpu_down(cpu_data_ptr->cpu_id);
340 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
341 // hang CPU on spurious wakeup
342 cpu_data_ptr->cpu_reset_handler = (uintptr_t)0;
343 __builtin_arm_dsb(DSB_ISH);
344 CleanPoU_Dcache();
345 #if APPLEVIRTUALPLATFORM
346 arm64_prepare_for_sleep(deep_sleep, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
347 #else
348 arm64_prepare_for_sleep(deep_sleep);
349 #endif
350 }
351 #else
352 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
353 #endif
354 /*NOTREACHED*/
355 }
356
357 /*
358 * Routine: cpu_interrupt_is_pending
359 * Function: Returns a bool signifying a non-zero ISR_EL1,
360 * indicating a pending IRQ, FIQ or external abort.
361 */
362
363 bool
cpu_interrupt_is_pending(void)364 cpu_interrupt_is_pending(void)
365 {
366 uint64_t isr_value;
367 isr_value = __builtin_arm_rsr64("ISR_EL1");
368 return isr_value != 0;
369 }
370
371 static bool
cpu_proximate_timer(void)372 cpu_proximate_timer(void)
373 {
374 return !SetIdlePop();
375 }
376
377 #ifdef ARM64_BOARD_CONFIG_T6000
378 int wfe_allowed = 0;
379 #else
380 int wfe_allowed = 1;
381 #endif /* ARM64_BOARD_CONFIG_T6000 */
382
383 #if DEVELOPMENT || DEBUG
384 #define WFE_STAT(x) \
385 do { \
386 (x); \
387 } while(0)
388 #else
389 #define WFE_STAT(x) do {} while(0)
390 #endif /* DEVELOPMENT || DEBUG */
391
392 bool
wfe_to_deadline_or_interrupt(uint32_t cid,uint64_t wfe_deadline,cpu_data_t * cdp,bool unmask)393 wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, cpu_data_t *cdp, bool unmask)
394 {
395 bool ipending = false;
396 uint64_t irqc = 0, nirqc = 0;
397
398 /* The ARMv8 architecture permits a processor dwelling in WFE
399 * with F/IRQ masked to ignore a pending interrupt, i.e.
400 * not classify it as an 'event'. This is potentially
401 * problematic with AICv2's IRQ distribution model, as
402 * a transient interrupt masked interval can cause an SIQ
403 * query rejection, possibly routing the interrupt to
404 * another core/cluster in a powergated state.
405 * Hence, optionally unmask IRQs+FIQs across WFE.
406 */
407 if (unmask) {
408 /* Latch SW IRQ+FIQ counter prior to unmasking
409 * interrupts.
410 */
411 irqc = nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
412 /* Unmask IRQ+FIQ. Mirrors mask used by machine_idle()
413 * with ASYNCF omission. Consider that this could
414 * delay recognition of an async abort, including
415 * those triggered by ISRs
416 */
417 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
418 }
419
420 while ((ipending = (cpu_interrupt_is_pending())) == false) {
421 if (unmask) {
422 /* If WFE was issued with IRQs unmasked, an
423 * interrupt may have been processed.
424 * Consult the SW IRQ counter to determine
425 * whether the 'idle loop' must be
426 * re-evaluated.
427 */
428 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
429 if (nirqc != irqc) {
430 break;
431 }
432 }
433
434 if (__probable(wfe_allowed)) {
435 /*
436 * If IRQs are unmasked, there's a small window
437 * where an 'extra' WFE may be issued after
438 * the consultation of the SW interrupt counter
439 * and new interrupt arrival. Hence this WFE
440 * relies on the [FI]RQ interrupt handler
441 * epilogue issuing a 'SEVL', to post an
442 * event which causes the next WFE on the same
443 * PE to retire immediately.
444 */
445
446 __builtin_arm_wfe();
447 }
448
449 WFE_STAT(cdp->wfe_count++);
450 if (wfe_deadline != ~0ULL) {
451 WFE_STAT(cdp->wfe_deadline_checks++);
452 /* Check if the WFE recommendation has expired.
453 * We do not recompute the deadline here.
454 */
455 if ((ml_cluster_wfe_timeout(cid) == 0) ||
456 mach_absolute_time() >= wfe_deadline) {
457 WFE_STAT(cdp->wfe_terminations++);
458 break;
459 }
460 }
461 }
462
463 if (unmask) {
464 /* Re-mask IRQ+FIQ
465 * Mirrors mask used by machine_idle(), with ASYNCF
466 * omission
467 */
468 __builtin_arm_wsr64("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
469 /* Refetch SW interrupt counter with IRQs masked
470 * It is important that this routine accurately flags
471 * any observed interrupts via its return value,
472 * inaccuracy may lead to an erroneous WFI fallback.
473 */
474 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
475 }
476
477 return ipending || (nirqc != irqc);
478 }
479
480 /*
481 * Routine: cpu_idle
482 * Function:
483 */
484 void __attribute__((noreturn))
cpu_idle(void)485 cpu_idle(void)
486 {
487 cpu_data_t *cpu_data_ptr = getCpuDatap();
488 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
489 bool idle_disallowed = false;
490
491 if (__improbable((!idle_enable))) {
492 idle_disallowed = true;
493 } else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) {
494 idle_disallowed = true;
495 }
496
497 if (__improbable(idle_disallowed)) {
498 Idle_load_context();
499 }
500
501 bool ipending = false;
502 uint32_t cid = cpu_data_ptr->cpu_cluster_id;
503
504 if (idle_proximate_io_wfe_masked == 1) {
505 uint64_t wfe_deadline = 0;
506 /* Check for an active perf. controller generated
507 * WFE recommendation for this cluster.
508 */
509 uint64_t wfe_ttd = 0;
510 if ((wfe_ttd = ml_cluster_wfe_timeout(cid)) != 0) {
511 wfe_deadline = mach_absolute_time() + wfe_ttd;
512 }
513
514 if (wfe_deadline != 0) {
515 /* Poll issuing event-bounded WFEs until an interrupt
516 * arrives or the WFE recommendation expires
517 */
518 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, wfe_ttd, cid);
519 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cpu_data_ptr, false);
520 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, wfe_deadline);
521 if (ipending == true) {
522 /* Back to machine_idle() */
523 Idle_load_context();
524 }
525 }
526 }
527
528 if (__improbable(cpu_proximate_timer())) {
529 if (idle_proximate_timer_wfe == 1) {
530 /* Poll issuing WFEs until the expected
531 * timer FIQ arrives.
532 */
533 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, ~0ULL);
534 ipending = wfe_to_deadline_or_interrupt(cid, ~0ULL, cpu_data_ptr, false);
535 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, ~0ULL);
536 assert(ipending == true);
537 }
538 Idle_load_context();
539 }
540
541 lastPop = cpu_data_ptr->rtcPop;
542
543 cpu_data_ptr->cpu_active_thread = current_thread();
544
545 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
546 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
547 }
548
549 if (cpu_data_ptr->idle_timer_notify != NULL) {
550 if (new_idle_timeout_ticks == 0x0ULL) {
551 /* turn off the idle timer */
552 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
553 } else {
554 /* set the new idle timeout */
555 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
556 }
557 timer_resync_deadlines();
558 if (cpu_data_ptr->rtcPop != lastPop) {
559 SetIdlePop();
560 }
561 }
562
563 #if KPC
564 kpc_idle();
565 #endif
566 #if MONOTONIC
567 mt_cpu_idle(cpu_data_ptr);
568 #endif /* MONOTONIC */
569
570 if (wfi) {
571 #if !defined(APPLE_ARM64_ARCH_FAMILY)
572 platform_cache_idle_enter();
573 #endif
574
575 #if DEVELOPMENT || DEBUG
576 // When simulating wfi overhead,
577 // force wfi to clock gating only
578 if (wfi == 2) {
579 arm64_force_wfi_clock_gate();
580 }
581 #endif /* DEVELOPMENT || DEBUG */
582
583 #if defined(APPLETYPHOON)
584 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
585 typhoon_prepare_for_wfi();
586 #endif
587 __builtin_arm_dsb(DSB_SY);
588 #if HAS_RETENTION_STATE
589 arm64_retention_wfi();
590 #else
591 __builtin_arm_wfi();
592 #endif
593
594 #if defined(APPLETYPHOON)
595 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
596 typhoon_return_from_wfi();
597 #endif
598
599 #if DEVELOPMENT || DEBUG
600 // Handle wfi overhead simulation
601 if (wfi == 2) {
602 uint64_t deadline;
603
604 // Calculate wfi delay deadline
605 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
606
607 // Flush L1 caches
608 if ((wfi_flags & 1) != 0) {
609 InvalidatePoU_Icache();
610 FlushPoC_Dcache();
611 }
612
613 // Flush TLBs
614 if ((wfi_flags & 2) != 0) {
615 flush_core_tlb();
616 }
617
618 // Wait for the ballance of the wfi delay
619 clock_delay_until(deadline);
620 }
621 #endif /* DEVELOPMENT || DEBUG */
622 }
623
624 ClearIdlePop(TRUE);
625
626 cpu_idle_exit(FALSE);
627 }
628
629 /*
630 * Routine: cpu_idle_exit
631 * Function:
632 */
633 void
cpu_idle_exit(boolean_t from_reset)634 cpu_idle_exit(boolean_t from_reset)
635 {
636 uint64_t new_idle_timeout_ticks = 0x0ULL;
637 cpu_data_t *cpu_data_ptr = getCpuDatap();
638
639 assert(exception_stack_pointer() != 0);
640
641 /* Back from WFI, unlock OSLAR and EDLAR. */
642 if (from_reset) {
643 configure_coresight_registers(cpu_data_ptr);
644 }
645
646 #if KPC
647 kpc_idle_exit();
648 #endif
649
650 #if MONOTONIC
651 mt_cpu_run(cpu_data_ptr);
652 #endif /* MONOTONIC */
653
654 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
655 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
656 }
657
658 if (cpu_data_ptr->idle_timer_notify != NULL) {
659 if (new_idle_timeout_ticks == 0x0ULL) {
660 /* turn off the idle timer */
661 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
662 } else {
663 /* set the new idle timeout */
664 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
665 }
666 timer_resync_deadlines();
667 }
668
669 #if KASAN_TBI
670 kasan_unpoison_curstack(false);
671 #endif /* KASAN_TBI */
672
673 Idle_load_context();
674 }
675
676 void
cpu_init(void)677 cpu_init(void)
678 {
679 cpu_data_t *cdp = getCpuDatap();
680 arm_cpu_info_t *cpu_info_p;
681
682 assert(exception_stack_pointer() != 0);
683
684 if (cdp->cpu_type != CPU_TYPE_ARM64) {
685 cdp->cpu_type = CPU_TYPE_ARM64;
686
687 timer_call_queue_init(&cdp->rtclock_timer.queue);
688 cdp->rtclock_timer.deadline = EndOfAllTime;
689
690 if (cdp == &BootCpuData) {
691 do_cpuid();
692 do_mvfpid();
693 } else {
694 /*
695 * We initialize non-boot CPUs here; the boot CPU is
696 * dealt with as part of pmap_bootstrap.
697 */
698 pmap_cpu_data_init();
699 }
700
701 do_cacheid();
702
703 /* ARM_SMP: Assuming identical cpu */
704 do_debugid();
705
706 cpu_info_p = cpuid_info();
707
708 /* switch based on CPU's reported architecture */
709 switch (cpu_info_p->arm_info.arm_arch) {
710 case CPU_ARCH_ARMv8:
711 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
712 break;
713 case CPU_ARCH_ARMv8E:
714 cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
715 break;
716 default:
717 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
718 /* this panic doesn't work this early in startup */
719 panic("Unknown CPU subtype...");
720 break;
721 }
722
723 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
724 }
725 cdp->cpu_stat.irq_ex_cnt_wake = 0;
726 cdp->cpu_stat.ipi_cnt_wake = 0;
727 #if MONOTONIC
728 cdp->cpu_stat.pmi_cnt_wake = 0;
729 #endif /* MONOTONIC */
730 cdp->cpu_running = TRUE;
731 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
732 cdp->cpu_sleep_token = 0x0UL;
733 #if KPC
734 kpc_idle_exit();
735 #endif /* KPC */
736 #if MONOTONIC
737 mt_cpu_up(cdp);
738 #endif /* MONOTONIC */
739 }
740
741 void
cpu_stack_alloc(cpu_data_t * cpu_data_ptr)742 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
743 {
744 vm_offset_t irq_stack = 0;
745 vm_offset_t exc_stack = 0;
746
747 kmem_alloc(kernel_map, &irq_stack,
748 INTSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
749 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
750 VM_KERN_MEMORY_STACK);
751
752 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
753 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
754
755 kmem_alloc(kernel_map, &exc_stack,
756 EXCEPSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
757 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
758 VM_KERN_MEMORY_STACK);
759
760 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
761 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
762 }
763
764 void
cpu_data_free(cpu_data_t * cpu_data_ptr)765 cpu_data_free(cpu_data_t *cpu_data_ptr)
766 {
767 if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
768 return;
769 }
770
771 int cpu_number = cpu_data_ptr->cpu_number;
772
773 if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
774 CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
775 CpuDataEntries[cpu_number].cpu_data_paddr = 0;
776 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
777 }
778 kmem_free(kernel_map,
779 cpu_data_ptr->intstack_top - INTSTACK_SIZE - PAGE_SIZE,
780 INTSTACK_SIZE + 2 * PAGE_SIZE);
781 kmem_free(kernel_map,
782 cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE - PAGE_SIZE,
783 EXCEPSTACK_SIZE + 2 * PAGE_SIZE);
784 }
785
786 void
cpu_data_init(cpu_data_t * cpu_data_ptr)787 cpu_data_init(cpu_data_t *cpu_data_ptr)
788 {
789 uint32_t i;
790
791 cpu_data_ptr->cpu_flags = 0;
792 cpu_data_ptr->cpu_int_state = 0;
793 cpu_data_ptr->cpu_pending_ast = AST_NONE;
794 cpu_data_ptr->cpu_cache_dispatch = NULL;
795 cpu_data_ptr->rtcPop = EndOfAllTime;
796 cpu_data_ptr->rtclock_datap = &RTClockData;
797 cpu_data_ptr->cpu_user_debug = NULL;
798
799
800 cpu_data_ptr->cpu_base_timebase = 0;
801 cpu_data_ptr->cpu_idle_notify = NULL;
802 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
803 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
804 cpu_data_ptr->cpu_reset_type = 0x0UL;
805 cpu_data_ptr->cpu_reset_handler = 0x0UL;
806 cpu_data_ptr->cpu_reset_assist = 0x0UL;
807 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
808 cpu_data_ptr->cpu_phys_id = 0x0UL;
809 cpu_data_ptr->cpu_l2_access_penalty = 0;
810 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
811 cpu_data_ptr->cpu_cluster_id = 0;
812 cpu_data_ptr->cpu_l2_id = 0;
813 cpu_data_ptr->cpu_l2_size = 0;
814 cpu_data_ptr->cpu_l3_id = 0;
815 cpu_data_ptr->cpu_l3_size = 0;
816
817 cpu_data_ptr->cpu_signal = SIGPdisabled;
818
819 cpu_data_ptr->cpu_get_fiq_handler = NULL;
820 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
821 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
822 cpu_data_ptr->cpu_get_decrementer_func = NULL;
823 cpu_data_ptr->cpu_set_decrementer_func = NULL;
824 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
825 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
826 cpu_data_ptr->cpu_xcall_p0 = NULL;
827 cpu_data_ptr->cpu_xcall_p1 = NULL;
828 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
829 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
830
831 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
832 cpu_data_ptr->coresight_base[i] = 0;
833 }
834
835 #if !XNU_MONITOR
836 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
837
838 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
839 pmap_cpu_data_ptr->pv_free.list = NULL;
840 pmap_cpu_data_ptr->pv_free.count = 0;
841 pmap_cpu_data_ptr->pv_free_spill_marker = NULL;
842 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
843 bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
844 #endif /* !XNU_MONITOR */
845 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
846 #if __ARM_KERNEL_PROTECT__
847 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
848 #endif /* __ARM_KERNEL_PROTECT__ */
849
850 #if defined(HAS_APPLE_PAC)
851 cpu_data_ptr->rop_key = 0;
852 cpu_data_ptr->jop_key = ml_default_jop_pid();
853 #endif
854 }
855
856 kern_return_t
cpu_data_register(cpu_data_t * cpu_data_ptr)857 cpu_data_register(cpu_data_t *cpu_data_ptr)
858 {
859 int cpu = cpu_data_ptr->cpu_number;
860
861 #if KASAN
862 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
863 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
864 }
865 #endif
866
867 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
868 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
869 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
870 return KERN_SUCCESS;
871 }
872
873 #if defined(KERNEL_INTEGRITY_CTRR)
874 /* Hibernation needs to reset this state, so data and text are in the hib segment;
875 * this allows them be accessed and executed early.
876 */
877 LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock");
878 LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp);
879 enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA;
880
881 MARK_AS_HIBERNATE_TEXT
882 void
init_ctrr_cluster_states(void)883 init_ctrr_cluster_states(void)
884 {
885 for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
886 ctrr_cluster_locked[i] = CTRR_UNLOCKED;
887 }
888 }
889 #endif
890
891 kern_return_t
cpu_start(int cpu)892 cpu_start(int cpu)
893 {
894 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
895 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
896
897 if (processor_should_kprintf(processor, true)) {
898 kprintf("cpu_start() cpu: %d\n", cpu);
899 }
900
901 if (cpu == cpu_number()) {
902 cpu_machine_init();
903 configure_coresight_registers(cpu_data_ptr);
904 } else {
905 thread_t first_thread;
906 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
907 #if !XNU_MONITOR
908 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
909 #endif
910
911 if (processor->startup_thread != THREAD_NULL) {
912 first_thread = processor->startup_thread;
913 } else {
914 first_thread = processor->idle_thread;
915 }
916 cpu_data_ptr->cpu_active_thread = first_thread;
917 first_thread->machine.CpuDatap = cpu_data_ptr;
918 first_thread->machine.pcpu_data_base =
919 (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
920
921 configure_coresight_registers(cpu_data_ptr);
922
923 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
924 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
925 #if defined(KERNEL_INTEGRITY_CTRR)
926
927 /* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
928 * other CPUs block until cluster is locked. */
929 lck_spin_lock(&ctrr_cpu_start_lck);
930 switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) {
931 case CTRR_UNLOCKED:
932 ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING;
933 lck_spin_unlock(&ctrr_cpu_start_lck);
934 break;
935 case CTRR_LOCKING:
936 assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT);
937 lck_spin_unlock(&ctrr_cpu_start_lck);
938 thread_block(THREAD_CONTINUE_NULL);
939 assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING);
940 break;
941 default: // CTRR_LOCKED
942 lck_spin_unlock(&ctrr_cpu_start_lck);
943 break;
944 }
945 #endif
946 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
947 }
948
949 return KERN_SUCCESS;
950 }
951
952
953 void
cpu_timebase_init(boolean_t from_boot)954 cpu_timebase_init(boolean_t from_boot)
955 {
956 cpu_data_t *cdp = getCpuDatap();
957
958 if (cdp->cpu_get_fiq_handler == NULL) {
959 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
960 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
961 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
962 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
963 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
964 }
965
966 if (!from_boot && (cdp == &BootCpuData)) {
967 /*
968 * When we wake from sleep, we have no guarantee about the state
969 * of the hardware timebase. It may have kept ticking across sleep, or
970 * it may have reset.
971 *
972 * To deal with this, we calculate an offset to the clock that will
973 * produce a timebase value wake_abstime at the point the boot
974 * CPU calls cpu_timebase_init on wake.
975 *
976 * This ensures that mach_absolute_time() stops ticking across sleep.
977 */
978 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
979 } else if (from_boot) {
980 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
981 ml_set_reset_time(ml_get_hwclock());
982 }
983
984 cdp->cpu_decrementer = 0x7FFFFFFFUL;
985 cdp->cpu_timebase = 0x0UL;
986 cdp->cpu_base_timebase = rtclock_base_abstime;
987 }
988
989 int
cpu_cluster_id(void)990 cpu_cluster_id(void)
991 {
992 return getCpuDatap()->cpu_cluster_id;
993 }
994
995 __attribute__((noreturn))
996 void
ml_arm_sleep(void)997 ml_arm_sleep(void)
998 {
999 cpu_data_t *cpu_data_ptr = getCpuDatap();
1000
1001 if (cpu_data_ptr == &BootCpuData) {
1002 cpu_data_t *target_cdp;
1003 int cpu;
1004 int max_cpu;
1005
1006 max_cpu = ml_get_max_cpu_number();
1007 for (cpu = 0; cpu <= max_cpu; cpu++) {
1008 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1009
1010 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
1011 continue;
1012 }
1013
1014 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
1015 ;
1016 }
1017 }
1018
1019 /*
1020 * Now that the other cores have entered the sleep path, set
1021 * the abstime value we'll use when we resume.
1022 */
1023 wake_abstime = ml_get_timebase();
1024 ml_set_reset_time(UINT64_MAX);
1025 } else {
1026 CleanPoU_Dcache();
1027 }
1028
1029 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
1030
1031 if (cpu_data_ptr == &BootCpuData) {
1032 #if WITH_CLASSIC_S2R
1033 // Classic suspend to RAM writes the suspend signature into the
1034 // sleep token buffer so that iBoot knows that it's on the warm
1035 // boot (wake) path (as opposed to the cold boot path). Newer SoC
1036 // do not go through SecureROM/iBoot on the warm boot path. The
1037 // reconfig engine script brings the CPU out of reset at the kernel's
1038 // reset vector which points to the warm boot initialization code.
1039 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1040 platform_cache_shutdown();
1041 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
1042 } else {
1043 panic("No sleep token buffer");
1044 }
1045 #endif
1046
1047 #if __ARM_GLOBAL_SLEEP_BIT__
1048 /* Allow other CPUs to go to sleep. */
1049 arm64_stall_sleep = FALSE;
1050 __builtin_arm_dmb(DMB_ISH);
1051 #endif
1052
1053 /* Architectural debug state: <rdar://problem/12390433>:
1054 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1055 * tell debugger to not prevent power gating .
1056 */
1057 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1058 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1059 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1060 }
1061
1062 #if HIBERNATION
1063 uint32_t mode = hibernate_write_image();
1064 if (mode == kIOHibernatePostWriteHalt) {
1065 HIBLOG("powering off after writing hibernation image\n");
1066 int halt_result = -1;
1067 if (PE_halt_restart) {
1068 /**
1069 * Drain serial FIFOs now as the normal call further down won't
1070 * be hit when the CPU halts here for hibernation. Here, it'll
1071 * make sure the preceding HIBLOG is flushed as well.
1072 */
1073 serial_go_to_sleep();
1074 halt_result = (*PE_halt_restart)(kPEHaltCPU);
1075 }
1076 panic("can't shutdown: PE_halt_restart returned %d", halt_result);
1077 }
1078 #endif /* HIBERNATION */
1079
1080 serial_go_to_sleep();
1081
1082 #if MONOTONIC
1083 mt_sleep();
1084 #endif /* MONOTONIC */
1085 /* ARM64-specific preparation */
1086 #if APPLEVIRTUALPLATFORM
1087 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
1088 #else
1089 arm64_prepare_for_sleep(true);
1090 #endif
1091 } else {
1092 #if __ARM_GLOBAL_SLEEP_BIT__
1093 /*
1094 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1095 * global register to manage entering deep sleep, as opposed to a per-CPU
1096 * register. We cannot update this register until all CPUs are ready to enter
1097 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1098 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1099 * which can hang the sleep process or cause memory corruption on wake.
1100 *
1101 * To avoid these issues, we'll stall on this global value, which CPU0 will
1102 * manage.
1103 */
1104 while (arm64_stall_sleep) {
1105 __builtin_arm_wfe();
1106 }
1107 #endif
1108 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
1109
1110 /* Architectural debug state: <rdar://problem/12390433>:
1111 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1112 * tell debugger to not prevent power gating .
1113 */
1114 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1115 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1116 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1117 }
1118
1119 /* ARM64-specific preparation */
1120 #if APPLEVIRTUALPLATFORM
1121 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
1122 #else
1123 arm64_prepare_for_sleep(true);
1124 #endif
1125 }
1126 }
1127
1128 void
cpu_machine_idle_init(boolean_t from_boot)1129 cpu_machine_idle_init(boolean_t from_boot)
1130 {
1131 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
1132 cpu_data_t *cpu_data_ptr = getCpuDatap();
1133
1134 if (from_boot) {
1135 uint32_t production = 1;
1136 DTEntry entry;
1137
1138 unsigned long jtag = 0;
1139
1140 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
1141 if (jtag != 0) {
1142 idle_enable = FALSE;
1143 } else {
1144 idle_enable = TRUE;
1145 }
1146 } else {
1147 idle_enable = TRUE;
1148 }
1149
1150 #if DEVELOPMENT || DEBUG
1151 uint32_t wfe_mode = 0;
1152 if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) {
1153 idle_proximate_timer_wfe = ((wfe_mode & 1) == 1);
1154 idle_proximate_io_wfe_masked = ((wfe_mode & 2) == 2);
1155 extern uint32_t idle_proximate_io_wfe_unmasked;
1156 idle_proximate_io_wfe_unmasked = ((wfe_mode & 4) == 4);
1157 }
1158 #endif
1159
1160 // bits 7..0 give the wfi type
1161 switch (wfi & 0xff) {
1162 case 0:
1163 // disable wfi
1164 wfi = 0;
1165 break;
1166
1167 #if DEVELOPMENT || DEBUG
1168 case 2:
1169 // wfi overhead simulation
1170 // 31..16 - wfi delay is us
1171 // 15..8 - flags
1172 // 7..0 - 2
1173 wfi = 2;
1174 wfi_flags = (wfi >> 8) & 0xFF;
1175 nanoseconds_to_absolutetime(((wfi >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
1176 break;
1177 #endif /* DEVELOPMENT || DEBUG */
1178
1179 case 1:
1180 default:
1181 // do nothing
1182 break;
1183 }
1184 ResetHandlerData.assist_reset_handler = 0;
1185 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
1186
1187 #ifdef MONITOR
1188 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
1189 #elif !defined(NO_MONITOR)
1190 #error MONITOR undefined, WFI power gating may not operate correctly
1191 #endif /* MONITOR */
1192
1193 // Determine if we are on production or debug chip
1194 if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
1195 unsigned int size;
1196 void const *prop;
1197
1198 if (kSuccess == SecureDTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
1199 if (size == 4) {
1200 bcopy(prop, &production, size);
1201 }
1202 }
1203 }
1204 if (!production) {
1205 #if defined(APPLE_ARM64_ARCH_FAMILY)
1206 // Enable coresight debug registers on debug-fused chips
1207 coresight_debug_enabled = TRUE;
1208 #endif
1209 }
1210 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
1211 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
1212 }
1213
1214 #if WITH_CLASSIC_S2R
1215 if (cpu_data_ptr == &BootCpuData) {
1216 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
1217 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1218 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
1219 } else {
1220 panic("No sleep token buffer");
1221 }
1222
1223 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
1224 SleepToken_low_paddr, sizeof(SleepToken));
1225 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
1226 }
1227 ;
1228 #endif
1229 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
1230 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
1231 }
1232
1233 _Atomic uint32_t cpu_idle_count = 0;
1234
1235 void
machine_track_platform_idle(boolean_t entry)1236 machine_track_platform_idle(boolean_t entry)
1237 {
1238 if (entry) {
1239 os_atomic_inc(&cpu_idle_count, relaxed);
1240 } else {
1241 os_atomic_dec(&cpu_idle_count, relaxed);
1242 }
1243 }
1244
1245 #if WITH_CLASSIC_S2R
1246 void
sleep_token_buffer_init(void)1247 sleep_token_buffer_init(void)
1248 {
1249 cpu_data_t *cpu_data_ptr = getCpuDatap();
1250 DTEntry entry;
1251 size_t size;
1252 void const * const *prop;
1253
1254 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
1255 /* Find the stpage node in the device tree */
1256 if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) {
1257 return;
1258 }
1259
1260 if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) {
1261 return;
1262 }
1263
1264 /* Map the page into the kernel space */
1265 sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]);
1266 }
1267 }
1268 #endif
1269