1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/percpu.h>
39 #include <kern/thread.h>
40 #include <kern/timer_queue.h>
41 #include <arm/cpu_data.h>
42 #include <arm/cpuid.h>
43 #include <arm/caches_internal.h>
44 #include <arm/cpu_data_internal.h>
45 #include <arm/cpu_internal.h>
46 #include <arm/misc_protos.h>
47 #include <arm/machine_cpu.h>
48 #include <arm/rtclock.h>
49 #include <arm64/proc_reg.h>
50 #include <mach/processor_info.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_map.h>
54 #include <pexpert/arm/protos.h>
55 #include <pexpert/device_tree.h>
56 #include <sys/kdebug.h>
57 #include <arm/machine_routines.h>
58
59 #include <machine/atomic.h>
60
61 #include <san/kasan.h>
62
63 #if KPC
64 #include <kern/kpc.h>
65 #endif
66
67 #if MONOTONIC
68 #include <kern/monotonic.h>
69 #endif /* MONOTONIC */
70
71 #if KPERF
72 #include <kperf/kptimer.h>
73 #endif /* KPERF */
74
75 #if HIBERNATION
76 #include <IOKit/IOPlatformExpert.h>
77 #include <IOKit/IOHibernatePrivate.h>
78 #endif /* HIBERNATION */
79
80
81 #include <libkern/section_keywords.h>
82
83 extern boolean_t idle_enable;
84 extern uint64_t wake_abstime;
85
86 #if WITH_CLASSIC_S2R
87 void sleep_token_buffer_init(void);
88 #endif
89
90
91 extern uintptr_t resume_idle_cpu;
92 extern uintptr_t start_cpu;
93
94 #if __ARM_KERNEL_PROTECT__
95 extern void exc_vectors_table;
96 #endif /* __ARM_KERNEL_PROTECT__ */
97
98 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep, unsigned int cpu, uint64_t entry_pa);
99 extern void arm64_force_wfi_clock_gate(void);
100 #if defined(APPLETYPHOON)
101 // <rdar://problem/15827409>
102 extern void typhoon_prepare_for_wfi(void);
103 extern void typhoon_return_from_wfi(void);
104 #endif
105
106 #if HAS_RETENTION_STATE
107 extern void arm64_retention_wfi(void);
108 #endif
109
110 vm_address_t start_cpu_paddr;
111
112 sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
113 .tcr_el1 = TCR_EL1_BOOT,
114 };
115
116 // wfi - wfi mode
117 // 0 : disabled
118 // 1 : normal
119 // 2 : overhead simulation (delay & flags)
120 TUNABLE(unsigned int, wfi, "wfi", 1);
121 #if DEVELOPMENT || DEBUG
122
123 // wfi_flags
124 // 1 << 0 : flush L1s
125 // 1 << 1 : flush TLBs
126 static int wfi_flags = 0;
127
128 // wfi_delay - delay ticks after wfi exit
129 static uint64_t wfi_delay = 0;
130
131 #endif /* DEVELOPMENT || DEBUG */
132
133 #define CPUPM_IDLE_WFE 0x5310300
134 #define CPUPM_IDLE_TIMER_WFE 0x5310304
135
136 /* When recommended, issue WFE with [FI]IRQ unmasked in the idle
137 * loop. The default.
138 */
139 uint32_t idle_proximate_io_wfe_unmasked = 1;
140 #if DEVELOPMENT || DEBUG
141 uint32_t idle_proximate_timer_wfe = 1;
142 uint32_t idle_proximate_io_wfe_masked = 0;
143 #else
144 /* Issue WFE in lieu of WFI when awaiting a proximate timer. */
145 static uint32_t idle_proximate_timer_wfe = 1;
146 /* When recommended, issue WFE with [FI]IRQ masked in the idle loop.
147 * Non-default, retained for experimentation.
148 */
149 static uint32_t idle_proximate_io_wfe_masked = 0;
150 #endif
151
152 #if __ARM_GLOBAL_SLEEP_BIT__
153 volatile boolean_t arm64_stall_sleep = TRUE;
154 #endif
155
156 #if WITH_CLASSIC_S2R
157 /*
158 * These must be aligned to avoid issues with calling bcopy_phys on them before
159 * we are done with pmap initialization.
160 */
161 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
162 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
163 #endif
164
165 #if WITH_CLASSIC_S2R
166 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
167 #endif
168 static boolean_t coresight_debug_enabled = FALSE;
169
170 #if defined(CONFIG_XNUPOST)
171 void arm64_ipi_test_callback(void *);
172 void arm64_immediate_ipi_test_callback(void *);
173
174 void
arm64_ipi_test_callback(void * parm)175 arm64_ipi_test_callback(void *parm)
176 {
177 volatile uint64_t *ipi_test_data = parm;
178 cpu_data_t *cpu_data;
179
180 cpu_data = getCpuDatap();
181
182 *ipi_test_data = cpu_data->cpu_number;
183 }
184
185 void
arm64_immediate_ipi_test_callback(void * parm)186 arm64_immediate_ipi_test_callback(void *parm)
187 {
188 volatile uint64_t *ipi_test_data = parm;
189 cpu_data_t *cpu_data;
190
191 cpu_data = getCpuDatap();
192
193 *ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
194 }
195
196 uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
197
198 MACHINE_TIMEOUT(arm64_ipi_test_timeout, "arm64-ipi-test", 100, MACHINE_TIMEOUT_UNIT_MSEC, NULL);
199
200 void
arm64_ipi_test()201 arm64_ipi_test()
202 {
203 volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
204 uint64_t timeout_ms = os_atomic_load(&arm64_ipi_test_timeout, relaxed);
205 uint64_t then, now, delta;
206 int current_cpu_number = getCpuDatap()->cpu_number;
207
208 /*
209 * probably the only way to have this on most systems is with the
210 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
211 * IPI is not available
212 */
213 if (real_ncpus == 1) {
214 return;
215 }
216
217 const unsigned int max_cpu_id = ml_get_max_cpu_number();
218 for (unsigned int i = 0; i <= max_cpu_id; ++i) {
219 ipi_test_data = &arm64_ipi_test_data[i];
220 immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
221 *ipi_test_data = ~i;
222 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
223 if (error != KERN_SUCCESS) {
224 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
225 }
226
227 while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
228 (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
229 now = mach_absolute_time();
230 absolutetime_to_nanoseconds(now - then, &delta);
231 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
232 panic("CPU %d was unable to immediate-IPI CPU %u within %lldms", current_cpu_number, i, timeout_ms);
233 }
234 }
235
236 if (error != KERN_SUCCESS) {
237 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
238 }
239
240 then = mach_absolute_time();
241
242 while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
243 now = mach_absolute_time();
244 absolutetime_to_nanoseconds(now - then, &delta);
245 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
246 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %lldms, responses: %llx, %llx",
247 current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
248 }
249 }
250 }
251 }
252 #endif /* defined(CONFIG_XNUPOST) */
253
254 static void
configure_coresight_registers(cpu_data_t * cdp)255 configure_coresight_registers(cpu_data_t *cdp)
256 {
257 int i;
258
259 assert(cdp);
260 vm_offset_t coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs;
261
262 /*
263 * ARMv8 coresight registers are optional. If the device tree did not
264 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
265 * or coresight_regs (from the new "coresight-reg" property), assume that
266 * coresight registers are not supported.
267 */
268 if (cdp->cpu_regmap_paddr || coresight_regs) {
269 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
270 if (i == CORESIGHT_CTI) {
271 continue;
272 }
273 /* Skip debug-only registers on production chips */
274 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
275 continue;
276 }
277
278 if (!cdp->coresight_base[i]) {
279 if (coresight_regs) {
280 cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i);
281 } else {
282 uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
283 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
284 }
285
286 /*
287 * At this point, failing to io map the
288 * registers is considered as an error.
289 */
290 if (!cdp->coresight_base[i]) {
291 panic("unable to ml_io_map coresight regions");
292 }
293 }
294 /* Unlock EDLAR, CTILAR, PMLAR */
295 if (i != CORESIGHT_UTT) {
296 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
297 }
298 }
299 }
300 }
301
302
303 /*
304 * Routine: cpu_bootstrap
305 * Function:
306 */
307 void
cpu_bootstrap(void)308 cpu_bootstrap(void)
309 {
310 }
311
312 /*
313 * Routine: cpu_sleep
314 * Function:
315 */
316 void
cpu_sleep(void)317 cpu_sleep(void)
318 {
319 cpu_data_t *cpu_data_ptr = getCpuDatap();
320
321 cpu_data_ptr->cpu_active_thread = current_thread();
322 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
323 cpu_data_ptr->cpu_flags |= SleepState;
324 cpu_data_ptr->cpu_user_debug = NULL;
325 #if KPC
326 kpc_idle();
327 #endif /* KPC */
328 #if MONOTONIC
329 mt_cpu_down(cpu_data_ptr);
330 #endif /* MONOTONIC */
331 #if KPERF
332 kptimer_stop_curcpu();
333 #endif /* KPERF */
334
335 CleanPoC_Dcache();
336
337 #if USE_APPLEARMSMP
338 if (ml_is_quiescing()) {
339 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
340 } else {
341 bool deep_sleep = PE_cpu_down(cpu_data_ptr->cpu_id);
342 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
343 // hang CPU on spurious wakeup
344 cpu_data_ptr->cpu_reset_handler = (uintptr_t)0;
345 __builtin_arm_dsb(DSB_ISH);
346 CleanPoU_Dcache();
347 arm64_prepare_for_sleep(deep_sleep, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
348 }
349 #else
350 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
351 #endif
352 /*NOTREACHED*/
353 }
354
355 /*
356 * Routine: cpu_interrupt_is_pending
357 * Function: Returns a bool signifying a non-zero ISR_EL1,
358 * indicating a pending IRQ, FIQ or external abort.
359 */
360
361 bool
cpu_interrupt_is_pending(void)362 cpu_interrupt_is_pending(void)
363 {
364 uint64_t isr_value;
365 isr_value = __builtin_arm_rsr64("ISR_EL1");
366 return isr_value != 0;
367 }
368
369 static bool
cpu_proximate_timer(void)370 cpu_proximate_timer(void)
371 {
372 return !SetIdlePop();
373 }
374
375 #ifdef ARM64_BOARD_CONFIG_T6000
376 int wfe_allowed = 0;
377 #else
378 int wfe_allowed = 1;
379 #endif /* ARM64_BOARD_CONFIG_T6000 */
380
381 #if DEVELOPMENT || DEBUG
382 #define WFE_STAT(x) \
383 do { \
384 (x); \
385 } while(0)
386 #else
387 #define WFE_STAT(x) do {} while(0)
388 #endif /* DEVELOPMENT || DEBUG */
389
390 bool
wfe_to_deadline_or_interrupt(uint32_t cid,uint64_t wfe_deadline,cpu_data_t * cdp,bool unmask)391 wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, cpu_data_t *cdp, bool unmask)
392 {
393 bool ipending = false;
394 uint64_t irqc = 0, nirqc = 0;
395
396 /* The ARMv8 architecture permits a processor dwelling in WFE
397 * with F/IRQ masked to ignore a pending interrupt, i.e.
398 * not classify it as an 'event'. This is potentially
399 * problematic with AICv2's IRQ distribution model, as
400 * a transient interrupt masked interval can cause an SIQ
401 * query rejection, possibly routing the interrupt to
402 * another core/cluster in a powergated state.
403 * Hence, optionally unmask IRQs+FIQs across WFE.
404 */
405 if (unmask) {
406 /* Latch SW IRQ+FIQ counter prior to unmasking
407 * interrupts.
408 */
409 irqc = nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
410 /* Unmask IRQ+FIQ. Mirrors mask used by machine_idle()
411 * with ASYNCF omission. Consider that this could
412 * delay recognition of an async abort, including
413 * those triggered by ISRs
414 */
415 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
416 }
417
418 while ((ipending = (cpu_interrupt_is_pending())) == false) {
419 if (unmask) {
420 /* If WFE was issued with IRQs unmasked, an
421 * interrupt may have been processed.
422 * Consult the SW IRQ counter to determine
423 * whether the 'idle loop' must be
424 * re-evaluated.
425 */
426 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
427 if (nirqc != irqc) {
428 break;
429 }
430 }
431
432 if (__probable(wfe_allowed)) {
433 /*
434 * If IRQs are unmasked, there's a small window
435 * where an 'extra' WFE may be issued after
436 * the consultation of the SW interrupt counter
437 * and new interrupt arrival. Hence this WFE
438 * relies on the [FI]RQ interrupt handler
439 * epilogue issuing a 'SEVL', to post an
440 * event which causes the next WFE on the same
441 * PE to retire immediately.
442 */
443
444 __builtin_arm_wfe();
445 }
446
447 WFE_STAT(cdp->wfe_count++);
448 if (wfe_deadline != ~0ULL) {
449 WFE_STAT(cdp->wfe_deadline_checks++);
450 /* Check if the WFE recommendation has expired.
451 * We do not recompute the deadline here.
452 */
453 if ((ml_cluster_wfe_timeout(cid) == 0) ||
454 mach_absolute_time() >= wfe_deadline) {
455 WFE_STAT(cdp->wfe_terminations++);
456 break;
457 }
458 }
459 }
460
461 if (unmask) {
462 /* Re-mask IRQ+FIQ
463 * Mirrors mask used by machine_idle(), with ASYNCF
464 * omission
465 */
466 __builtin_arm_wsr64("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
467 /* Refetch SW interrupt counter with IRQs masked
468 * It is important that this routine accurately flags
469 * any observed interrupts via its return value,
470 * inaccuracy may lead to an erroneous WFI fallback.
471 */
472 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
473 }
474
475 return ipending || (nirqc != irqc);
476 }
477
478 /*
479 * Routine: cpu_idle
480 * Function:
481 */
482 void __attribute__((noreturn))
cpu_idle(void)483 cpu_idle(void)
484 {
485 cpu_data_t *cpu_data_ptr = getCpuDatap();
486 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
487 bool idle_disallowed = false;
488
489 if (__improbable((!idle_enable))) {
490 idle_disallowed = true;
491 } else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) {
492 idle_disallowed = true;
493 }
494
495 if (__improbable(idle_disallowed)) {
496 Idle_load_context();
497 }
498
499 bool ipending = false;
500 uint32_t cid = cpu_data_ptr->cpu_cluster_id;
501
502 if (idle_proximate_io_wfe_masked == 1) {
503 uint64_t wfe_deadline = 0;
504 /* Check for an active perf. controller generated
505 * WFE recommendation for this cluster.
506 */
507 uint64_t wfe_ttd = 0;
508 if ((wfe_ttd = ml_cluster_wfe_timeout(cid)) != 0) {
509 wfe_deadline = mach_absolute_time() + wfe_ttd;
510 }
511
512 if (wfe_deadline != 0) {
513 /* Poll issuing event-bounded WFEs until an interrupt
514 * arrives or the WFE recommendation expires
515 */
516 #if DEVELOPMENT || DEBUG
517 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, wfe_ttd, cid);
518 #endif
519 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cpu_data_ptr, false);
520 #if DEVELOPMENT || DEBUG
521 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, wfe_deadline, 0);
522 #endif
523 if (ipending == true) {
524 /* Back to machine_idle() */
525 Idle_load_context();
526 }
527 }
528 }
529
530 if (__improbable(cpu_proximate_timer())) {
531 if (idle_proximate_timer_wfe == 1) {
532 /* Poll issuing WFEs until the expected
533 * timer FIQ arrives.
534 */
535 #if DEVELOPMENT || DEBUG
536 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, ~0ULL, 0);
537 #endif
538 ipending = wfe_to_deadline_or_interrupt(cid, ~0ULL, cpu_data_ptr, false);
539 #if DEVELOPMENT || DEBUG
540 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, ~0ULL, 0);
541 #endif
542 assert(ipending == true);
543 }
544 Idle_load_context();
545 }
546
547 lastPop = cpu_data_ptr->rtcPop;
548
549 cpu_data_ptr->cpu_active_thread = current_thread();
550
551 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
552 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
553 }
554
555 if (cpu_data_ptr->idle_timer_notify != NULL) {
556 if (new_idle_timeout_ticks == 0x0ULL) {
557 /* turn off the idle timer */
558 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
559 } else {
560 /* set the new idle timeout */
561 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
562 }
563 timer_resync_deadlines();
564 if (cpu_data_ptr->rtcPop != lastPop) {
565 SetIdlePop();
566 }
567 }
568
569 #if KPC
570 kpc_idle();
571 #endif
572 #if MONOTONIC
573 mt_cpu_idle(cpu_data_ptr);
574 #endif /* MONOTONIC */
575
576 if (wfi) {
577 #if !defined(APPLE_ARM64_ARCH_FAMILY)
578 platform_cache_idle_enter();
579 #endif
580
581 #if DEVELOPMENT || DEBUG
582 // When simulating wfi overhead,
583 // force wfi to clock gating only
584 if (wfi == 2) {
585 arm64_force_wfi_clock_gate();
586 }
587 #endif /* DEVELOPMENT || DEBUG */
588
589 #if defined(APPLETYPHOON)
590 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
591 typhoon_prepare_for_wfi();
592 #endif
593 __builtin_arm_dsb(DSB_SY);
594 #if HAS_RETENTION_STATE
595 arm64_retention_wfi();
596 #else
597 __builtin_arm_wfi();
598 #endif
599
600 #if defined(APPLETYPHOON)
601 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
602 typhoon_return_from_wfi();
603 #endif
604
605 #if DEVELOPMENT || DEBUG
606 // Handle wfi overhead simulation
607 if (wfi == 2) {
608 uint64_t deadline;
609
610 // Calculate wfi delay deadline
611 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
612
613 // Flush L1 caches
614 if ((wfi_flags & 1) != 0) {
615 InvalidatePoU_Icache();
616 FlushPoC_Dcache();
617 }
618
619 // Flush TLBs
620 if ((wfi_flags & 2) != 0) {
621 flush_core_tlb();
622 }
623
624 // Wait for the ballance of the wfi delay
625 clock_delay_until(deadline);
626 }
627 #endif /* DEVELOPMENT || DEBUG */
628 #if !defined(APPLE_ARM64_ARCH_FAMILY)
629 platform_cache_idle_exit();
630 #endif
631 }
632
633 ClearIdlePop(TRUE);
634
635 cpu_idle_exit(FALSE);
636 }
637
638 /*
639 * Routine: cpu_idle_exit
640 * Function:
641 */
642 void
cpu_idle_exit(boolean_t from_reset)643 cpu_idle_exit(boolean_t from_reset)
644 {
645 uint64_t new_idle_timeout_ticks = 0x0ULL;
646 cpu_data_t *cpu_data_ptr = getCpuDatap();
647
648 assert(exception_stack_pointer() != 0);
649
650 /* Back from WFI, unlock OSLAR and EDLAR. */
651 if (from_reset) {
652 configure_coresight_registers(cpu_data_ptr);
653 }
654
655 #if KPC
656 kpc_idle_exit();
657 #endif
658
659 #if MONOTONIC
660 mt_cpu_run(cpu_data_ptr);
661 #endif /* MONOTONIC */
662
663 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
664 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
665 }
666
667 if (cpu_data_ptr->idle_timer_notify != NULL) {
668 if (new_idle_timeout_ticks == 0x0ULL) {
669 /* turn off the idle timer */
670 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
671 } else {
672 /* set the new idle timeout */
673 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
674 }
675 timer_resync_deadlines();
676 }
677
678 Idle_load_context();
679 }
680
681 void
cpu_init(void)682 cpu_init(void)
683 {
684 cpu_data_t *cdp = getCpuDatap();
685 arm_cpu_info_t *cpu_info_p;
686
687 assert(exception_stack_pointer() != 0);
688
689 if (cdp->cpu_type != CPU_TYPE_ARM64) {
690 cdp->cpu_type = CPU_TYPE_ARM64;
691
692 timer_call_queue_init(&cdp->rtclock_timer.queue);
693 cdp->rtclock_timer.deadline = EndOfAllTime;
694
695 if (cdp == &BootCpuData) {
696 do_cpuid();
697 do_mvfpid();
698 } else {
699 /*
700 * We initialize non-boot CPUs here; the boot CPU is
701 * dealt with as part of pmap_bootstrap.
702 */
703 pmap_cpu_data_init();
704 }
705
706 do_cacheid();
707
708 /* ARM_SMP: Assuming identical cpu */
709 do_debugid();
710
711 cpu_info_p = cpuid_info();
712
713 /* switch based on CPU's reported architecture */
714 switch (cpu_info_p->arm_info.arm_arch) {
715 case CPU_ARCH_ARMv8:
716 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
717 break;
718 case CPU_ARCH_ARMv8E:
719 cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
720 break;
721 default:
722 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
723 /* this panic doesn't work this early in startup */
724 panic("Unknown CPU subtype...");
725 break;
726 }
727
728 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
729 }
730 cdp->cpu_stat.irq_ex_cnt_wake = 0;
731 cdp->cpu_stat.ipi_cnt_wake = 0;
732 #if MONOTONIC
733 cdp->cpu_stat.pmi_cnt_wake = 0;
734 #endif /* MONOTONIC */
735 cdp->cpu_running = TRUE;
736 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
737 cdp->cpu_sleep_token = 0x0UL;
738 #if KPC
739 kpc_idle_exit();
740 #endif /* KPC */
741 #if MONOTONIC
742 mt_cpu_up(cdp);
743 #endif /* MONOTONIC */
744 }
745
746 void
cpu_stack_alloc(cpu_data_t * cpu_data_ptr)747 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
748 {
749 vm_offset_t irq_stack = 0;
750 vm_offset_t exc_stack = 0;
751
752 kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
753 INTSTACK_SIZE + (2 * PAGE_SIZE),
754 PAGE_MASK,
755 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
756 VM_KERN_MEMORY_STACK);
757 if (kr != KERN_SUCCESS) {
758 panic("Unable to allocate cpu interrupt stack");
759 }
760
761 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
762 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
763
764 kr = kernel_memory_allocate(kernel_map, &exc_stack,
765 EXCEPSTACK_SIZE + (2 * PAGE_SIZE),
766 PAGE_MASK,
767 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
768 VM_KERN_MEMORY_STACK);
769 if (kr != KERN_SUCCESS) {
770 panic("Unable to allocate cpu exception stack");
771 }
772
773 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
774 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
775 }
776
777 void
cpu_data_free(cpu_data_t * cpu_data_ptr)778 cpu_data_free(cpu_data_t *cpu_data_ptr)
779 {
780 if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
781 return;
782 }
783
784 int cpu_number = cpu_data_ptr->cpu_number;
785
786 if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
787 CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
788 CpuDataEntries[cpu_number].cpu_data_paddr = 0;
789 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
790 }
791 kmem_free(kernel_map,
792 cpu_data_ptr->intstack_top - INTSTACK_SIZE - PAGE_SIZE,
793 INTSTACK_SIZE + 2 * PAGE_SIZE);
794 kmem_free(kernel_map,
795 cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE - PAGE_SIZE,
796 EXCEPSTACK_SIZE + 2 * PAGE_SIZE);
797 }
798
799 void
cpu_data_init(cpu_data_t * cpu_data_ptr)800 cpu_data_init(cpu_data_t *cpu_data_ptr)
801 {
802 uint32_t i;
803
804 cpu_data_ptr->cpu_flags = 0;
805 cpu_data_ptr->cpu_int_state = 0;
806 cpu_data_ptr->cpu_pending_ast = AST_NONE;
807 cpu_data_ptr->cpu_cache_dispatch = NULL;
808 cpu_data_ptr->rtcPop = EndOfAllTime;
809 cpu_data_ptr->rtclock_datap = &RTClockData;
810 cpu_data_ptr->cpu_user_debug = NULL;
811
812
813 cpu_data_ptr->cpu_base_timebase = 0;
814 cpu_data_ptr->cpu_idle_notify = NULL;
815 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
816 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
817 cpu_data_ptr->cpu_reset_type = 0x0UL;
818 cpu_data_ptr->cpu_reset_handler = 0x0UL;
819 cpu_data_ptr->cpu_reset_assist = 0x0UL;
820 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
821 cpu_data_ptr->cpu_phys_id = 0x0UL;
822 cpu_data_ptr->cpu_l2_access_penalty = 0;
823 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
824 cpu_data_ptr->cpu_cluster_id = 0;
825 cpu_data_ptr->cpu_l2_id = 0;
826 cpu_data_ptr->cpu_l2_size = 0;
827 cpu_data_ptr->cpu_l3_id = 0;
828 cpu_data_ptr->cpu_l3_size = 0;
829
830 cpu_data_ptr->cpu_signal = SIGPdisabled;
831
832 cpu_data_ptr->cpu_get_fiq_handler = NULL;
833 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
834 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
835 cpu_data_ptr->cpu_get_decrementer_func = NULL;
836 cpu_data_ptr->cpu_set_decrementer_func = NULL;
837 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
838 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
839 cpu_data_ptr->cpu_xcall_p0 = NULL;
840 cpu_data_ptr->cpu_xcall_p1 = NULL;
841 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
842 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
843
844 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
845 cpu_data_ptr->coresight_base[i] = 0;
846 }
847
848 #if !XNU_MONITOR
849 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
850
851 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
852 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
853 pmap_cpu_data_ptr->pv_free.list = NULL;
854 pmap_cpu_data_ptr->pv_free.count = 0;
855 pmap_cpu_data_ptr->pv_free_spill_marker = NULL;
856
857 bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
858 #endif
859 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
860 #if __ARM_KERNEL_PROTECT__
861 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
862 #endif /* __ARM_KERNEL_PROTECT__ */
863
864 #if defined(HAS_APPLE_PAC)
865 cpu_data_ptr->rop_key = 0;
866 cpu_data_ptr->jop_key = ml_default_jop_pid();
867 #endif
868 }
869
870 kern_return_t
cpu_data_register(cpu_data_t * cpu_data_ptr)871 cpu_data_register(cpu_data_t *cpu_data_ptr)
872 {
873 int cpu = cpu_data_ptr->cpu_number;
874
875 #if KASAN
876 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
877 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
878 }
879 #endif
880
881 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
882 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
883 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
884 return KERN_SUCCESS;
885 }
886
887 #if defined(KERNEL_INTEGRITY_CTRR)
888 /* Hibernation needs to reset this state, so data and text are in the hib segment;
889 * this allows them be accessed and executed early.
890 */
891 LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock");
892 LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp);
893 enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA;
894
895 MARK_AS_HIBERNATE_TEXT
896 void
init_ctrr_cluster_states(void)897 init_ctrr_cluster_states(void)
898 {
899 for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
900 ctrr_cluster_locked[i] = CTRR_UNLOCKED;
901 }
902 }
903 #endif
904
905 kern_return_t
cpu_start(int cpu)906 cpu_start(int cpu)
907 {
908 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
909
910 kprintf("cpu_start() cpu: %d\n", cpu);
911
912 if (cpu == cpu_number()) {
913 cpu_machine_init();
914 configure_coresight_registers(cpu_data_ptr);
915 } else {
916 thread_t first_thread;
917 processor_t processor;
918
919 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
920
921 #if !XNU_MONITOR
922 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
923 #endif
924
925 processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
926 if (processor->startup_thread != THREAD_NULL) {
927 first_thread = processor->startup_thread;
928 } else {
929 first_thread = processor->idle_thread;
930 }
931 cpu_data_ptr->cpu_active_thread = first_thread;
932 first_thread->machine.CpuDatap = cpu_data_ptr;
933 first_thread->machine.pcpu_data_base =
934 (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
935
936 configure_coresight_registers(cpu_data_ptr);
937
938 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
939 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
940 #if defined(KERNEL_INTEGRITY_CTRR)
941
942 /* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
943 * other CPUs block until cluster is locked. */
944 lck_spin_lock(&ctrr_cpu_start_lck);
945 switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) {
946 case CTRR_UNLOCKED:
947 ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING;
948 lck_spin_unlock(&ctrr_cpu_start_lck);
949 break;
950 case CTRR_LOCKING:
951 assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT);
952 lck_spin_unlock(&ctrr_cpu_start_lck);
953 thread_block(THREAD_CONTINUE_NULL);
954 assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING);
955 break;
956 default: // CTRR_LOCKED
957 lck_spin_unlock(&ctrr_cpu_start_lck);
958 break;
959 }
960 #endif
961 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
962 }
963
964 return KERN_SUCCESS;
965 }
966
967
968 void
cpu_timebase_init(boolean_t from_boot)969 cpu_timebase_init(boolean_t from_boot)
970 {
971 cpu_data_t *cdp = getCpuDatap();
972
973 if (cdp->cpu_get_fiq_handler == NULL) {
974 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
975 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
976 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
977 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
978 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
979 }
980
981 if (!from_boot && (cdp == &BootCpuData)) {
982 /*
983 * When we wake from sleep, we have no guarantee about the state
984 * of the hardware timebase. It may have kept ticking across sleep, or
985 * it may have reset.
986 *
987 * To deal with this, we calculate an offset to the clock that will
988 * produce a timebase value wake_abstime at the point the boot
989 * CPU calls cpu_timebase_init on wake.
990 *
991 * This ensures that mach_absolute_time() stops ticking across sleep.
992 */
993 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
994 } else if (from_boot) {
995 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
996 ml_set_reset_time(ml_get_hwclock());
997 }
998
999 cdp->cpu_decrementer = 0x7FFFFFFFUL;
1000 cdp->cpu_timebase = 0x0UL;
1001 cdp->cpu_base_timebase = rtclock_base_abstime;
1002 }
1003
1004 int
cpu_cluster_id(void)1005 cpu_cluster_id(void)
1006 {
1007 return getCpuDatap()->cpu_cluster_id;
1008 }
1009
1010 __attribute__((noreturn))
1011 void
ml_arm_sleep(void)1012 ml_arm_sleep(void)
1013 {
1014 cpu_data_t *cpu_data_ptr = getCpuDatap();
1015
1016 if (cpu_data_ptr == &BootCpuData) {
1017 cpu_data_t *target_cdp;
1018 int cpu;
1019 int max_cpu;
1020
1021 max_cpu = ml_get_max_cpu_number();
1022 for (cpu = 0; cpu <= max_cpu; cpu++) {
1023 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1024
1025 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
1026 continue;
1027 }
1028
1029 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
1030 ;
1031 }
1032 }
1033
1034 /*
1035 * Now that the other cores have entered the sleep path, set
1036 * the abstime value we'll use when we resume.
1037 */
1038 wake_abstime = ml_get_timebase();
1039 ml_set_reset_time(UINT64_MAX);
1040 } else {
1041 CleanPoU_Dcache();
1042 }
1043
1044 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
1045
1046 if (cpu_data_ptr == &BootCpuData) {
1047 #if WITH_CLASSIC_S2R
1048 // Classic suspend to RAM writes the suspend signature into the
1049 // sleep token buffer so that iBoot knows that it's on the warm
1050 // boot (wake) path (as opposed to the cold boot path). Newer SoC
1051 // do not go through SecureROM/iBoot on the warm boot path. The
1052 // reconfig engine script brings the CPU out of reset at the kernel's
1053 // reset vector which points to the warm boot initialization code.
1054 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1055 platform_cache_shutdown();
1056 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
1057 } else {
1058 panic("No sleep token buffer");
1059 }
1060 #endif
1061
1062 #if __ARM_GLOBAL_SLEEP_BIT__
1063 /* Allow other CPUs to go to sleep. */
1064 arm64_stall_sleep = FALSE;
1065 __builtin_arm_dmb(DMB_ISH);
1066 #endif
1067
1068 /* Architectural debug state: <rdar://problem/12390433>:
1069 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1070 * tell debugger to not prevent power gating .
1071 */
1072 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1073 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1074 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1075 }
1076
1077 #if HIBERNATION
1078 uint32_t mode = hibernate_write_image();
1079 if (mode == kIOHibernatePostWriteHalt) {
1080 HIBLOG("powering off after writing hibernation image\n");
1081 int halt_result = -1;
1082 if (PE_halt_restart) {
1083 /**
1084 * Drain serial FIFOs now as the normal call further down won't
1085 * be hit when the CPU halts here for hibernation. Here, it'll
1086 * make sure the preceding HIBLOG is flushed as well.
1087 */
1088 serial_go_to_sleep();
1089 halt_result = (*PE_halt_restart)(kPEHaltCPU);
1090 }
1091 panic("can't shutdown: PE_halt_restart returned %d", halt_result);
1092 }
1093 #endif /* HIBERNATION */
1094
1095 serial_go_to_sleep();
1096
1097 #if MONOTONIC
1098 mt_sleep();
1099 #endif /* MONOTONIC */
1100 /* ARM64-specific preparation */
1101 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
1102 } else {
1103 #if __ARM_GLOBAL_SLEEP_BIT__
1104 /*
1105 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1106 * global register to manage entering deep sleep, as opposed to a per-CPU
1107 * register. We cannot update this register until all CPUs are ready to enter
1108 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1109 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1110 * which can hang the sleep process or cause memory corruption on wake.
1111 *
1112 * To avoid these issues, we'll stall on this global value, which CPU0 will
1113 * manage.
1114 */
1115 while (arm64_stall_sleep) {
1116 __builtin_arm_wfe();
1117 }
1118 #endif
1119 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
1120
1121 /* Architectural debug state: <rdar://problem/12390433>:
1122 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1123 * tell debugger to not prevent power gating .
1124 */
1125 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1126 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1127 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1128 }
1129
1130 /* ARM64-specific preparation */
1131 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
1132 }
1133 }
1134
1135 void
cpu_machine_idle_init(boolean_t from_boot)1136 cpu_machine_idle_init(boolean_t from_boot)
1137 {
1138 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
1139 cpu_data_t *cpu_data_ptr = getCpuDatap();
1140
1141 if (from_boot) {
1142 uint32_t production = 1;
1143 DTEntry entry;
1144
1145 unsigned long jtag = 0;
1146
1147 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
1148 if (jtag != 0) {
1149 idle_enable = FALSE;
1150 } else {
1151 idle_enable = TRUE;
1152 }
1153 } else {
1154 idle_enable = TRUE;
1155 }
1156
1157 #if DEVELOPMENT || DEBUG
1158 uint32_t wfe_mode = 0;
1159 if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) {
1160 idle_proximate_timer_wfe = ((wfe_mode & 1) == 1);
1161 idle_proximate_io_wfe_masked = ((wfe_mode & 2) == 2);
1162 extern uint32_t idle_proximate_io_wfe_unmasked;
1163 idle_proximate_io_wfe_unmasked = ((wfe_mode & 4) == 4);
1164 }
1165 #endif
1166
1167 // bits 7..0 give the wfi type
1168 switch (wfi & 0xff) {
1169 case 0:
1170 // disable wfi
1171 wfi = 0;
1172 break;
1173
1174 #if DEVELOPMENT || DEBUG
1175 case 2:
1176 // wfi overhead simulation
1177 // 31..16 - wfi delay is us
1178 // 15..8 - flags
1179 // 7..0 - 2
1180 wfi = 2;
1181 wfi_flags = (wfi >> 8) & 0xFF;
1182 nanoseconds_to_absolutetime(((wfi >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
1183 break;
1184 #endif /* DEVELOPMENT || DEBUG */
1185
1186 case 1:
1187 default:
1188 // do nothing
1189 break;
1190 }
1191
1192 ResetHandlerData.assist_reset_handler = 0;
1193 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
1194
1195 #ifdef MONITOR
1196 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
1197 #elif !defined(NO_MONITOR)
1198 #error MONITOR undefined, WFI power gating may not operate correctly
1199 #endif /* MONITOR */
1200
1201 // Determine if we are on production or debug chip
1202 if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
1203 unsigned int size;
1204 void const *prop;
1205
1206 if (kSuccess == SecureDTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
1207 if (size == 4) {
1208 bcopy(prop, &production, size);
1209 }
1210 }
1211 }
1212 if (!production) {
1213 #if defined(APPLE_ARM64_ARCH_FAMILY)
1214 // Enable coresight debug registers on debug-fused chips
1215 coresight_debug_enabled = TRUE;
1216 #endif
1217 }
1218
1219 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
1220 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
1221 }
1222
1223 #if WITH_CLASSIC_S2R
1224 if (cpu_data_ptr == &BootCpuData) {
1225 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
1226 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1227 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
1228 } else {
1229 panic("No sleep token buffer");
1230 }
1231
1232 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
1233 SleepToken_low_paddr, sizeof(SleepToken));
1234 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
1235 }
1236 ;
1237 #endif
1238
1239 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
1240 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
1241 }
1242
1243 _Atomic uint32_t cpu_idle_count = 0;
1244
1245 void
machine_track_platform_idle(boolean_t entry)1246 machine_track_platform_idle(boolean_t entry)
1247 {
1248 if (entry) {
1249 os_atomic_inc(&cpu_idle_count, relaxed);
1250 } else {
1251 os_atomic_dec(&cpu_idle_count, relaxed);
1252 }
1253 }
1254
1255 #if WITH_CLASSIC_S2R
1256 void
sleep_token_buffer_init(void)1257 sleep_token_buffer_init(void)
1258 {
1259 cpu_data_t *cpu_data_ptr = getCpuDatap();
1260 DTEntry entry;
1261 size_t size;
1262 void const * const *prop;
1263
1264 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
1265 /* Find the stpage node in the device tree */
1266 if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) {
1267 return;
1268 }
1269
1270 if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) {
1271 return;
1272 }
1273
1274 /* Map the page into the kernel space */
1275 sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]);
1276 }
1277 }
1278 #endif
1279