1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/percpu.h>
39 #include <kern/thread.h>
40 #include <kern/timer_queue.h>
41 #include <arm/cpu_data.h>
42 #include <arm/cpuid.h>
43 #include <arm/caches_internal.h>
44 #include <arm/cpu_data_internal.h>
45 #include <arm/cpu_internal.h>
46 #include <arm/misc_protos.h>
47 #include <arm/machine_cpu.h>
48 #include <arm/rtclock.h>
49 #include <arm64/proc_reg.h>
50 #include <mach/processor_info.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_map.h>
54 #include <pexpert/arm/protos.h>
55 #include <pexpert/device_tree.h>
56 #include <sys/kdebug.h>
57 #include <arm/machine_routines.h>
58
59 #include <machine/atomic.h>
60
61 #include <san/kasan.h>
62
63 #if KPC
64 #include <kern/kpc.h>
65 #endif
66
67 #if MONOTONIC
68 #include <kern/monotonic.h>
69 #endif /* MONOTONIC */
70
71 #if KPERF
72 #include <kperf/kptimer.h>
73 #endif /* KPERF */
74
75 #if HIBERNATION
76 #include <IOKit/IOPlatformExpert.h>
77 #include <IOKit/IOHibernatePrivate.h>
78 #endif /* HIBERNATION */
79
80
81 #include <libkern/section_keywords.h>
82
83 extern boolean_t idle_enable;
84 extern uint64_t wake_abstime;
85
86 #if WITH_CLASSIC_S2R
87 void sleep_token_buffer_init(void);
88 #endif
89
90 extern uintptr_t resume_idle_cpu;
91 extern uintptr_t start_cpu;
92 vm_address_t start_cpu_paddr;
93
94 #if __ARM_KERNEL_PROTECT__
95 extern void exc_vectors_table;
96 #endif /* __ARM_KERNEL_PROTECT__ */
97
98 #if APPLEVIRTUALPLATFORM
99 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep, unsigned int cpu, uint64_t entry_pa);
100 #else
101 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep);
102 #endif
103 extern void arm64_force_wfi_clock_gate(void);
104 #if defined(APPLETYPHOON)
105 // <rdar://problem/15827409>
106 extern void typhoon_prepare_for_wfi(void);
107 extern void typhoon_return_from_wfi(void);
108 #endif
109
110 #if HAS_RETENTION_STATE
111 extern void arm64_retention_wfi(void);
112 #endif
113
114 sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
115 .tcr_el1 = TCR_EL1_BOOT,
116 };
117
118 // wfi - wfi mode
119 // 0 : disabled
120 // 1 : normal
121 // 2 : overhead simulation (delay & flags)
122 TUNABLE(unsigned int, wfi, "wfi", 1);
123 #if DEVELOPMENT || DEBUG
124
125 // wfi_flags
126 // 1 << 0 : flush L1s
127 // 1 << 1 : flush TLBs
128 static int wfi_flags = 0;
129
130 // wfi_delay - delay ticks after wfi exit
131 static uint64_t wfi_delay = 0;
132
133 #endif /* DEVELOPMENT || DEBUG */
134
135 #define CPUPM_IDLE_WFE 0x5310300
136 #define CPUPM_IDLE_TIMER_WFE 0x5310304
137
138 #define DEFAULT_EXPECTING_IPI_WFE_TIMEOUT_USEC (60ULL)
139 TUNABLE(uint32_t, expecting_ipi_wfe_timeout_usec,
140 "expecting_ipi_wfe_timeout_usec", DEFAULT_EXPECTING_IPI_WFE_TIMEOUT_USEC);
141 uint64_t expecting_ipi_wfe_timeout_mt = 0x0ULL; /* initialized to a non-zero value in sched_init */
142
143 /* When recommended, issue WFE with [FI]IRQ unmasked in the idle
144 * loop. The default.
145 */
146 uint32_t idle_proximate_io_wfe_unmasked = 1;
147 #if DEVELOPMENT || DEBUG
148 uint32_t idle_proximate_timer_wfe = 1;
149 uint32_t idle_proximate_io_wfe_masked = 0;
150 #else
151 /* Issue WFE in lieu of WFI when awaiting a proximate timer. */
152 static uint32_t idle_proximate_timer_wfe = 1;
153 /* When recommended, issue WFE with [FI]IRQ masked in the idle loop.
154 * Non-default, retained for experimentation.
155 */
156 static uint32_t idle_proximate_io_wfe_masked = 0;
157 #endif
158
159 #if __ARM_GLOBAL_SLEEP_BIT__
160 volatile boolean_t arm64_stall_sleep = TRUE;
161 #endif
162
163 #if WITH_CLASSIC_S2R
164 /*
165 * These must be aligned to avoid issues with calling bcopy_phys on them before
166 * we are done with pmap initialization.
167 */
168 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
169 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
170 #endif
171
172 #if WITH_CLASSIC_S2R
173 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
174 #endif
175 static boolean_t coresight_debug_enabled = FALSE;
176
177 #if defined(CONFIG_XNUPOST)
178 void arm64_ipi_test_callback(void *);
179 void arm64_immediate_ipi_test_callback(void *);
180
181 void
arm64_ipi_test_callback(void * parm)182 arm64_ipi_test_callback(void *parm)
183 {
184 volatile uint64_t *ipi_test_data = parm;
185 cpu_data_t *cpu_data;
186
187 cpu_data = getCpuDatap();
188
189 *ipi_test_data = cpu_data->cpu_number;
190 }
191
192 void
arm64_immediate_ipi_test_callback(void * parm)193 arm64_immediate_ipi_test_callback(void *parm)
194 {
195 volatile uint64_t *ipi_test_data = parm;
196 cpu_data_t *cpu_data;
197
198 cpu_data = getCpuDatap();
199
200 *ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
201 }
202
203 uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
204
205 MACHINE_TIMEOUT(arm64_ipi_test_timeout, "arm64-ipi-test", 100, MACHINE_TIMEOUT_UNIT_MSEC, NULL);
206
207 void
arm64_ipi_test()208 arm64_ipi_test()
209 {
210 volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
211 uint64_t timeout_ms = os_atomic_load(&arm64_ipi_test_timeout, relaxed);
212 uint64_t then, now, delta;
213 int current_cpu_number = getCpuDatap()->cpu_number;
214
215 /*
216 * probably the only way to have this on most systems is with the
217 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
218 * IPI is not available
219 */
220 if (real_ncpus == 1) {
221 return;
222 }
223
224 const unsigned int max_cpu_id = ml_get_max_cpu_number();
225 for (unsigned int i = 0; i <= max_cpu_id; ++i) {
226 ipi_test_data = &arm64_ipi_test_data[i];
227 immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
228 *ipi_test_data = ~i;
229 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
230 if (error != KERN_SUCCESS) {
231 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
232 }
233
234 while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
235 (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
236 now = mach_absolute_time();
237 absolutetime_to_nanoseconds(now - then, &delta);
238 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
239 panic("CPU %d was unable to immediate-IPI CPU %u within %lldms", current_cpu_number, i, timeout_ms);
240 }
241 }
242
243 if (error != KERN_SUCCESS) {
244 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
245 }
246
247 then = mach_absolute_time();
248
249 while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
250 now = mach_absolute_time();
251 absolutetime_to_nanoseconds(now - then, &delta);
252 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
253 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %lldms, responses: %llx, %llx",
254 current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
255 }
256 }
257 }
258 }
259 #endif /* defined(CONFIG_XNUPOST) */
260
261 static void
configure_coresight_registers(cpu_data_t * cdp)262 configure_coresight_registers(cpu_data_t *cdp)
263 {
264 int i;
265
266 assert(cdp);
267 vm_offset_t coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs;
268
269 /*
270 * ARMv8 coresight registers are optional. If the device tree did not
271 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
272 * or coresight_regs (from the new "coresight-reg" property), assume that
273 * coresight registers are not supported.
274 */
275 if (cdp->cpu_regmap_paddr || coresight_regs) {
276 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
277 if (i == CORESIGHT_CTI) {
278 continue;
279 }
280 /* Skip debug-only registers on production chips */
281 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
282 continue;
283 }
284
285 if (!cdp->coresight_base[i]) {
286 if (coresight_regs) {
287 cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i);
288 } else {
289 uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
290 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
291 }
292 }
293 /* Unlock EDLAR, CTILAR, PMLAR */
294 if (i != CORESIGHT_UTT) {
295 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
296 }
297 }
298 }
299 }
300
301
302 /*
303 * Routine: cpu_bootstrap
304 * Function:
305 */
306 void
cpu_bootstrap(void)307 cpu_bootstrap(void)
308 {
309 }
310
311 /*
312 * Routine: cpu_sleep
313 * Function:
314 */
315 void
cpu_sleep(void)316 cpu_sleep(void)
317 {
318 cpu_data_t *cpu_data_ptr = getCpuDatap();
319
320 cpu_data_ptr->cpu_active_thread = current_thread();
321 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
322 cpu_data_ptr->cpu_flags |= SleepState;
323
324 if (cpu_data_ptr->cpu_user_debug != NULL) {
325 arm_debug_set(NULL);
326 }
327
328 #if KPC
329 kpc_idle();
330 #endif /* KPC */
331 #if MONOTONIC
332 mt_cpu_down(cpu_data_ptr);
333 #endif /* MONOTONIC */
334 #if KPERF
335 kptimer_stop_curcpu();
336 #endif /* KPERF */
337
338 CleanPoC_Dcache();
339
340 #if USE_APPLEARMSMP
341 if (ml_is_quiescing()) {
342 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
343 } else {
344 bool deep_sleep = PE_cpu_down(cpu_data_ptr->cpu_id);
345 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
346 // hang CPU on spurious wakeup
347 cpu_data_ptr->cpu_reset_handler = (uintptr_t)0;
348 __builtin_arm_dsb(DSB_ISH);
349 CleanPoU_Dcache();
350 #if APPLEVIRTUALPLATFORM
351 arm64_prepare_for_sleep(deep_sleep, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
352 #else
353 arm64_prepare_for_sleep(deep_sleep);
354 #endif
355 }
356 #else
357 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
358 #endif
359 /*NOTREACHED*/
360 }
361
362 /*
363 * Routine: cpu_interrupt_is_pending
364 * Function: Returns a bool signifying a non-zero ISR_EL1,
365 * indicating a pending IRQ, FIQ or external abort.
366 */
367
368 bool
cpu_interrupt_is_pending(void)369 cpu_interrupt_is_pending(void)
370 {
371 uint64_t isr_value;
372 isr_value = __builtin_arm_rsr64("ISR_EL1");
373 return isr_value != 0;
374 }
375
376 static bool
cpu_proximate_timer(void)377 cpu_proximate_timer(void)
378 {
379 return !SetIdlePop();
380 }
381
382 #ifdef ARM64_BOARD_CONFIG_T6000
383 int wfe_allowed = 0;
384 #else
385 int wfe_allowed = 1;
386 #endif /* ARM64_BOARD_CONFIG_T6000 */
387
388 #if DEVELOPMENT || DEBUG
389 #define WFE_STAT(x) \
390 do { \
391 (x); \
392 } while(0)
393 #else
394 #define WFE_STAT(x) do {} while(0)
395 #endif /* DEVELOPMENT || DEBUG */
396
397 bool
wfe_to_deadline_or_interrupt(uint32_t cid,uint64_t wfe_deadline,cpu_data_t * cdp,bool unmask,bool check_cluster_recommendation)398 wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, cpu_data_t *cdp, bool unmask, bool check_cluster_recommendation)
399 {
400 bool ipending = false;
401 uint64_t irqc = 0, nirqc = 0;
402
403 /* The ARMv8 architecture permits a processor dwelling in WFE
404 * with F/IRQ masked to ignore a pending interrupt, i.e.
405 * not classify it as an 'event'. This is potentially
406 * problematic with AICv2's IRQ distribution model, as
407 * a transient interrupt masked interval can cause an SIQ
408 * query rejection, possibly routing the interrupt to
409 * another core/cluster in a powergated state.
410 * Hence, optionally unmask IRQs+FIQs across WFE.
411 */
412 if (unmask) {
413 /* Latch SW IRQ+FIQ counter prior to unmasking
414 * interrupts.
415 */
416 irqc = nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
417 /* Unmask IRQ+FIQ. Mirrors mask used by machine_idle()
418 * with ASYNCF omission. Consider that this could
419 * delay recognition of an async abort, including
420 * those triggered by ISRs
421 */
422 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
423 }
424
425 while ((ipending = (cpu_interrupt_is_pending())) == false) {
426 if (unmask) {
427 /* If WFE was issued with IRQs unmasked, an
428 * interrupt may have been processed.
429 * Consult the SW IRQ counter to determine
430 * whether the 'idle loop' must be
431 * re-evaluated.
432 */
433 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
434 if (nirqc != irqc) {
435 break;
436 }
437 }
438
439 if (__probable(wfe_allowed)) {
440 /*
441 * If IRQs are unmasked, there's a small window
442 * where an 'extra' WFE may be issued after
443 * the consultation of the SW interrupt counter
444 * and new interrupt arrival. Hence this WFE
445 * relies on the [FI]RQ interrupt handler
446 * epilogue issuing a 'SEVL', to post an
447 * event which causes the next WFE on the same
448 * PE to retire immediately.
449 */
450
451 __builtin_arm_wfe();
452 }
453
454 WFE_STAT(cdp->wfe_count++);
455 if (wfe_deadline != ~0ULL) {
456 WFE_STAT(cdp->wfe_deadline_checks++);
457 /* Check if the WFE recommendation has expired.
458 * We do not recompute the deadline here.
459 */
460 if ((check_cluster_recommendation && ml_cluster_wfe_timeout(cid) == 0) ||
461 mach_absolute_time() >= wfe_deadline) {
462 WFE_STAT(cdp->wfe_terminations++);
463 break;
464 }
465 }
466 }
467
468 if (unmask) {
469 /* Re-mask IRQ+FIQ
470 * Mirrors mask used by machine_idle(), with ASYNCF
471 * omission
472 */
473 __builtin_arm_wsr64("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
474 /* Refetch SW interrupt counter with IRQs masked
475 * It is important that this routine accurately flags
476 * any observed interrupts via its return value,
477 * inaccuracy may lead to an erroneous WFI fallback.
478 */
479 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
480 }
481
482 return ipending || (nirqc != irqc);
483 }
484
485 /*
486 * Routine: cpu_idle
487 * Function:
488 */
489 void __attribute__((noreturn))
cpu_idle(void)490 cpu_idle(void)
491 {
492 cpu_data_t *cpu_data_ptr = getCpuDatap();
493 processor_t processor = current_processor();
494 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
495 bool idle_disallowed = false;
496 /* Read and reset the next_idle_short flag */
497 bool next_idle_short = processor->next_idle_short;
498 processor->next_idle_short = false;
499
500 if (__improbable((!idle_enable))) {
501 idle_disallowed = true;
502 } else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) {
503 idle_disallowed = true;
504 }
505
506 if (__improbable(idle_disallowed)) {
507 Idle_load_context();
508 }
509
510 bool ipending = false;
511 uint32_t cid = cpu_data_ptr->cpu_cluster_id;
512 bool check_cluster_recommendation = true;
513 uint64_t wfe_timeout = 0;
514
515 if (idle_proximate_io_wfe_masked == 1) {
516 /* Check for an active perf. controller generated
517 * WFE recommendation for this cluster.
518 */
519 wfe_timeout = ml_cluster_wfe_timeout(cid);
520 }
521
522 if (next_idle_short && expecting_ipi_wfe_timeout_mt > wfe_timeout) {
523 /* In this case we should WFE because a response IPI
524 * is expected soon.
525 */
526 wfe_timeout = expecting_ipi_wfe_timeout_mt;
527 check_cluster_recommendation = false;
528 }
529
530 if (wfe_timeout != 0) {
531 uint64_t wfe_deadline = mach_absolute_time() + wfe_timeout;
532 /* Poll issuing event-bounded WFEs until an interrupt
533 * arrives or the WFE recommendation expires
534 */
535 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, wfe_timeout, !check_cluster_recommendation);
536 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cpu_data_ptr, false, check_cluster_recommendation);
537 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, wfe_deadline);
538 if (ipending == true) {
539 /* Back to machine_idle() */
540 Idle_load_context();
541 }
542 }
543
544 if (__improbable(cpu_proximate_timer())) {
545 if (idle_proximate_timer_wfe == 1) {
546 /* Poll issuing WFEs until the expected
547 * timer FIQ arrives.
548 */
549 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, ~0ULL);
550 ipending = wfe_to_deadline_or_interrupt(cid, ~0ULL, cpu_data_ptr, false, false);
551 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, ~0ULL);
552 assert(ipending == true);
553 }
554 Idle_load_context();
555 }
556
557 lastPop = cpu_data_ptr->rtcPop;
558
559 cpu_data_ptr->cpu_active_thread = current_thread();
560
561 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
562 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
563 }
564
565 if (cpu_data_ptr->idle_timer_notify != NULL) {
566 if (new_idle_timeout_ticks == 0x0ULL) {
567 /* turn off the idle timer */
568 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
569 } else {
570 /* set the new idle timeout */
571 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
572 }
573 timer_resync_deadlines();
574 if (cpu_data_ptr->rtcPop != lastPop) {
575 SetIdlePop();
576 }
577 }
578
579 #if KPC
580 kpc_idle();
581 #endif
582 #if MONOTONIC
583 mt_cpu_idle(cpu_data_ptr);
584 #endif /* MONOTONIC */
585
586 if (wfi) {
587 #if !defined(APPLE_ARM64_ARCH_FAMILY)
588 platform_cache_idle_enter();
589 #endif
590
591 #if DEVELOPMENT || DEBUG
592 // When simulating wfi overhead,
593 // force wfi to clock gating only
594 if (wfi == 2) {
595 arm64_force_wfi_clock_gate();
596 }
597 #endif /* DEVELOPMENT || DEBUG */
598
599 #if defined(APPLETYPHOON)
600 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
601 typhoon_prepare_for_wfi();
602 #endif
603 __builtin_arm_dsb(DSB_SY);
604 #if HAS_RETENTION_STATE
605 arm64_retention_wfi();
606 #else
607 __builtin_arm_wfi();
608 #endif
609
610 #if defined(APPLETYPHOON)
611 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
612 typhoon_return_from_wfi();
613 #endif
614
615 #if DEVELOPMENT || DEBUG
616 // Handle wfi overhead simulation
617 if (wfi == 2) {
618 uint64_t deadline;
619
620 // Calculate wfi delay deadline
621 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
622
623 // Flush L1 caches
624 if ((wfi_flags & 1) != 0) {
625 InvalidatePoU_Icache();
626 FlushPoC_Dcache();
627 }
628
629 // Flush TLBs
630 if ((wfi_flags & 2) != 0) {
631 flush_core_tlb();
632 }
633
634 // Wait for the ballance of the wfi delay
635 clock_delay_until(deadline);
636 }
637 #endif /* DEVELOPMENT || DEBUG */
638 }
639
640 ClearIdlePop(TRUE);
641
642 cpu_idle_exit(FALSE);
643 }
644
645 /*
646 * Routine: cpu_idle_exit
647 * Function:
648 */
649 void
cpu_idle_exit(boolean_t from_reset)650 cpu_idle_exit(boolean_t from_reset)
651 {
652 uint64_t new_idle_timeout_ticks = 0x0ULL;
653 cpu_data_t *cpu_data_ptr = getCpuDatap();
654
655 assert(exception_stack_pointer() != 0);
656
657 /* Back from WFI, unlock OSLAR and EDLAR. */
658 if (from_reset) {
659 configure_coresight_registers(cpu_data_ptr);
660 }
661
662 #if KPC
663 kpc_idle_exit();
664 #endif
665
666 #if MONOTONIC
667 mt_cpu_run(cpu_data_ptr);
668 #endif /* MONOTONIC */
669
670 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
671 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
672 }
673
674 if (cpu_data_ptr->idle_timer_notify != NULL) {
675 if (new_idle_timeout_ticks == 0x0ULL) {
676 /* turn off the idle timer */
677 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
678 } else {
679 /* set the new idle timeout */
680 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
681 }
682 timer_resync_deadlines();
683 }
684
685 #if KASAN_TBI
686 kasan_unpoison_curstack(false);
687 #endif /* KASAN_TBI */
688
689 Idle_load_context();
690 }
691
692 void
cpu_init(void)693 cpu_init(void)
694 {
695 cpu_data_t *cdp = getCpuDatap();
696 arm_cpu_info_t *cpu_info_p;
697
698 assert(exception_stack_pointer() != 0);
699
700 if (cdp->cpu_type != CPU_TYPE_ARM64) {
701 cdp->cpu_type = CPU_TYPE_ARM64;
702
703 timer_call_queue_init(&cdp->rtclock_timer.queue);
704 cdp->rtclock_timer.deadline = EndOfAllTime;
705
706 if (cdp == &BootCpuData) {
707 do_cpuid();
708 do_mvfpid();
709 } else {
710 /*
711 * We initialize non-boot CPUs here; the boot CPU is
712 * dealt with as part of pmap_bootstrap.
713 */
714 pmap_cpu_data_init();
715 }
716
717 do_cacheid();
718
719 /* ARM_SMP: Assuming identical cpu */
720 do_debugid();
721
722 cpu_info_p = cpuid_info();
723
724 /* switch based on CPU's reported architecture */
725 switch (cpu_info_p->arm_info.arm_arch) {
726 case CPU_ARCH_ARMv8:
727 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
728 break;
729 case CPU_ARCH_ARMv8E:
730 cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
731 break;
732 default:
733 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
734 /* this panic doesn't work this early in startup */
735 panic("Unknown CPU subtype...");
736 break;
737 }
738
739 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
740 }
741 cdp->cpu_stat.irq_ex_cnt_wake = 0;
742 cdp->cpu_stat.ipi_cnt_wake = 0;
743 #if MONOTONIC
744 cdp->cpu_stat.pmi_cnt_wake = 0;
745 #endif /* MONOTONIC */
746 cdp->cpu_running = TRUE;
747 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
748 cdp->cpu_sleep_token = 0x0UL;
749 #if KPC
750 kpc_idle_exit();
751 #endif /* KPC */
752 #if MONOTONIC
753 mt_cpu_up(cdp);
754 #endif /* MONOTONIC */
755 }
756
757 void
cpu_stack_alloc(cpu_data_t * cpu_data_ptr)758 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
759 {
760 vm_offset_t irq_stack = 0;
761 vm_offset_t exc_stack = 0;
762
763 kmem_alloc(kernel_map, &irq_stack,
764 INTSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
765 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
766 VM_KERN_MEMORY_STACK);
767
768 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
769 cpu_data_ptr->istackptr = (void *)cpu_data_ptr->intstack_top;
770
771 kmem_alloc(kernel_map, &exc_stack,
772 EXCEPSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
773 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
774 VM_KERN_MEMORY_STACK);
775
776 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
777 }
778
779 void
cpu_data_free(cpu_data_t * cpu_data_ptr)780 cpu_data_free(cpu_data_t *cpu_data_ptr)
781 {
782 if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
783 return;
784 }
785
786 int cpu_number = cpu_data_ptr->cpu_number;
787
788 if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
789 CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
790 CpuDataEntries[cpu_number].cpu_data_paddr = 0;
791 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
792 }
793 kmem_free(kernel_map,
794 cpu_data_ptr->intstack_top - INTSTACK_SIZE - PAGE_SIZE,
795 INTSTACK_SIZE + 2 * PAGE_SIZE);
796 kmem_free(kernel_map,
797 cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE - PAGE_SIZE,
798 EXCEPSTACK_SIZE + 2 * PAGE_SIZE);
799 }
800
801 void
cpu_data_init(cpu_data_t * cpu_data_ptr)802 cpu_data_init(cpu_data_t *cpu_data_ptr)
803 {
804 uint32_t i;
805
806 cpu_data_ptr->cpu_flags = 0;
807 cpu_data_ptr->cpu_int_state = 0;
808 cpu_data_ptr->cpu_pending_ast = AST_NONE;
809 cpu_data_ptr->cpu_cache_dispatch = NULL;
810 cpu_data_ptr->rtcPop = EndOfAllTime;
811 cpu_data_ptr->rtclock_datap = &RTClockData;
812 cpu_data_ptr->cpu_user_debug = NULL;
813
814
815 cpu_data_ptr->cpu_base_timebase = 0;
816 cpu_data_ptr->cpu_idle_notify = NULL;
817 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
818 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
819 cpu_data_ptr->cpu_reset_type = 0x0UL;
820 cpu_data_ptr->cpu_reset_handler = 0x0UL;
821 cpu_data_ptr->cpu_reset_assist = 0x0UL;
822 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
823 cpu_data_ptr->cpu_phys_id = 0x0UL;
824 cpu_data_ptr->cpu_l2_access_penalty = 0;
825 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
826 cpu_data_ptr->cpu_cluster_id = 0;
827 cpu_data_ptr->cpu_l2_id = 0;
828 cpu_data_ptr->cpu_l2_size = 0;
829 cpu_data_ptr->cpu_l3_id = 0;
830 cpu_data_ptr->cpu_l3_size = 0;
831
832 cpu_data_ptr->cpu_signal = SIGPdisabled;
833
834 cpu_data_ptr->cpu_get_fiq_handler = NULL;
835 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
836 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
837 cpu_data_ptr->cpu_get_decrementer_func = NULL;
838 cpu_data_ptr->cpu_set_decrementer_func = NULL;
839 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
840 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
841 cpu_data_ptr->cpu_xcall_p0 = NULL;
842 cpu_data_ptr->cpu_xcall_p1 = NULL;
843 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
844 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
845
846 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
847 cpu_data_ptr->coresight_base[i] = 0;
848 }
849
850 #if !XNU_MONITOR
851 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
852
853 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
854 pmap_cpu_data_ptr->pv_free.list = NULL;
855 pmap_cpu_data_ptr->pv_free.count = 0;
856 pmap_cpu_data_ptr->pv_free_spill_marker = NULL;
857 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
858 bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
859 #endif /* !XNU_MONITOR */
860 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
861 #if __ARM_KERNEL_PROTECT__
862 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
863 #endif /* __ARM_KERNEL_PROTECT__ */
864
865 #if defined(HAS_APPLE_PAC)
866 cpu_data_ptr->rop_key = 0;
867 cpu_data_ptr->jop_key = ml_default_jop_pid();
868 #endif
869 }
870
871 kern_return_t
cpu_data_register(cpu_data_t * cpu_data_ptr)872 cpu_data_register(cpu_data_t *cpu_data_ptr)
873 {
874 int cpu = cpu_data_ptr->cpu_number;
875
876 #if KASAN
877 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
878 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
879 }
880 #endif
881
882 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
883 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
884 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
885 return KERN_SUCCESS;
886 }
887
888 #if defined(KERNEL_INTEGRITY_CTRR)
889 /* Hibernation needs to reset this state, so data and text are in the hib segment;
890 * this allows them be accessed and executed early.
891 */
892 LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock");
893 LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp);
894 enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA;
895
896 MARK_AS_HIBERNATE_TEXT
897 void
init_ctrr_cluster_states(void)898 init_ctrr_cluster_states(void)
899 {
900 for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
901 ctrr_cluster_locked[i] = CTRR_UNLOCKED;
902 }
903 }
904 #endif
905
906 kern_return_t
cpu_start(int cpu)907 cpu_start(int cpu)
908 {
909 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
910 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
911
912 if (processor_should_kprintf(processor, true)) {
913 kprintf("cpu_start() cpu: %d\n", cpu);
914 }
915
916 if (cpu == cpu_number()) {
917 cpu_machine_init();
918 configure_coresight_registers(cpu_data_ptr);
919 } else {
920 thread_t first_thread;
921 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
922 #if !XNU_MONITOR
923 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
924 #endif
925
926 if (processor->startup_thread != THREAD_NULL) {
927 first_thread = processor->startup_thread;
928 } else {
929 first_thread = processor->idle_thread;
930 }
931 cpu_data_ptr->cpu_active_thread = first_thread;
932 first_thread->machine.CpuDatap = cpu_data_ptr;
933 first_thread->machine.pcpu_data_base =
934 (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
935
936 configure_coresight_registers(cpu_data_ptr);
937
938 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
939 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
940 #if defined(KERNEL_INTEGRITY_CTRR)
941
942 /* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
943 * other CPUs block until cluster is locked. */
944 lck_spin_lock(&ctrr_cpu_start_lck);
945 switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) {
946 case CTRR_UNLOCKED:
947 ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING;
948 lck_spin_unlock(&ctrr_cpu_start_lck);
949 break;
950 case CTRR_LOCKING:
951 assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT);
952 lck_spin_unlock(&ctrr_cpu_start_lck);
953 thread_block(THREAD_CONTINUE_NULL);
954 assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING);
955 break;
956 default: // CTRR_LOCKED
957 lck_spin_unlock(&ctrr_cpu_start_lck);
958 break;
959 }
960 #endif
961 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
962 }
963
964 return KERN_SUCCESS;
965 }
966
967
968 void
cpu_timebase_init(boolean_t from_boot)969 cpu_timebase_init(boolean_t from_boot)
970 {
971 cpu_data_t *cdp = getCpuDatap();
972 uint64_t timebase_offset = 0;
973
974 if (cdp->cpu_get_fiq_handler == NULL) {
975 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
976 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
977 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
978 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
979 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
980 }
981
982 if (!from_boot && (cdp == &BootCpuData)) {
983 /*
984 * When we wake from sleep, we have no guarantee about the state
985 * of the hardware timebase. It may have kept ticking across sleep, or
986 * it may have reset.
987 *
988 * To deal with this, we calculate an offset to the clock that will
989 * produce a timebase value wake_abstime at the point the boot
990 * CPU calls cpu_timebase_init on wake.
991 *
992 * This ensures that mach_absolute_time() stops ticking across sleep.
993 */
994 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
995 } else if (from_boot) {
996 #if DEBUG || DEVELOPMENT
997 if (PE_parse_boot_argn("timebase_offset", &timebase_offset, sizeof(timebase_offset))) {
998 rtclock_base_abstime += timebase_offset;
999 }
1000 #endif
1001 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
1002 ml_set_reset_time(ml_get_hwclock());
1003 }
1004
1005 cdp->cpu_decrementer = 0x7FFFFFFFUL;
1006 cdp->cpu_timebase = timebase_offset;
1007 cdp->cpu_base_timebase = rtclock_base_abstime;
1008 }
1009
1010 int
cpu_cluster_id(void)1011 cpu_cluster_id(void)
1012 {
1013 return getCpuDatap()->cpu_cluster_id;
1014 }
1015
1016 __attribute__((noreturn))
1017 void
ml_arm_sleep(void)1018 ml_arm_sleep(void)
1019 {
1020 cpu_data_t *cpu_data_ptr = getCpuDatap();
1021
1022 if (cpu_data_ptr == &BootCpuData) {
1023 cpu_data_t *target_cdp;
1024 int cpu;
1025 int max_cpu;
1026
1027 max_cpu = ml_get_max_cpu_number();
1028 for (cpu = 0; cpu <= max_cpu; cpu++) {
1029 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1030
1031 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
1032 continue;
1033 }
1034
1035 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
1036 ;
1037 }
1038 }
1039
1040 /*
1041 * Now that the other cores have entered the sleep path, set
1042 * the abstime value we'll use when we resume.
1043 */
1044 wake_abstime = ml_get_timebase();
1045 ml_set_reset_time(UINT64_MAX);
1046 } else {
1047 CleanPoU_Dcache();
1048 }
1049
1050 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
1051
1052 if (cpu_data_ptr == &BootCpuData) {
1053 #if WITH_CLASSIC_S2R
1054 // Classic suspend to RAM writes the suspend signature into the
1055 // sleep token buffer so that iBoot knows that it's on the warm
1056 // boot (wake) path (as opposed to the cold boot path). Newer SoC
1057 // do not go through SecureROM/iBoot on the warm boot path. The
1058 // reconfig engine script brings the CPU out of reset at the kernel's
1059 // reset vector which points to the warm boot initialization code.
1060 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1061 platform_cache_shutdown();
1062 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
1063 } else {
1064 panic("No sleep token buffer");
1065 }
1066 #endif
1067
1068 #if __ARM_GLOBAL_SLEEP_BIT__
1069 /* Allow other CPUs to go to sleep. */
1070 arm64_stall_sleep = FALSE;
1071 __builtin_arm_dmb(DMB_ISH);
1072 #endif
1073
1074 /* Architectural debug state: <rdar://problem/12390433>:
1075 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1076 * tell debugger to not prevent power gating .
1077 */
1078 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1079 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1080 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1081 }
1082
1083 #if HIBERNATION
1084 uint32_t mode = hibernate_write_image();
1085 if (mode == kIOHibernatePostWriteHalt) {
1086 HIBLOG("powering off after writing hibernation image\n");
1087 int halt_result = -1;
1088 if (PE_halt_restart) {
1089 /**
1090 * Drain serial FIFOs now as the normal call further down won't
1091 * be hit when the CPU halts here for hibernation. Here, it'll
1092 * make sure the preceding HIBLOG is flushed as well.
1093 */
1094 serial_go_to_sleep();
1095 halt_result = (*PE_halt_restart)(kPEHaltCPU);
1096 }
1097 panic("can't shutdown: PE_halt_restart returned %d", halt_result);
1098 }
1099 #endif /* HIBERNATION */
1100
1101 serial_go_to_sleep();
1102
1103 #if MONOTONIC
1104 mt_sleep();
1105 #endif /* MONOTONIC */
1106 /* ARM64-specific preparation */
1107 #if APPLEVIRTUALPLATFORM
1108 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
1109 #else
1110 arm64_prepare_for_sleep(true);
1111 #endif
1112 } else {
1113 #if __ARM_GLOBAL_SLEEP_BIT__
1114 /*
1115 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1116 * global register to manage entering deep sleep, as opposed to a per-CPU
1117 * register. We cannot update this register until all CPUs are ready to enter
1118 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1119 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1120 * which can hang the sleep process or cause memory corruption on wake.
1121 *
1122 * To avoid these issues, we'll stall on this global value, which CPU0 will
1123 * manage.
1124 */
1125 while (arm64_stall_sleep) {
1126 __builtin_arm_wfe();
1127 }
1128 #endif
1129 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
1130
1131 /* Architectural debug state: <rdar://problem/12390433>:
1132 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1133 * tell debugger to not prevent power gating .
1134 */
1135 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1136 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1137 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1138 }
1139
1140 /* ARM64-specific preparation */
1141 #if APPLEVIRTUALPLATFORM
1142 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys((vm_offset_t)&LowResetVectorBase));
1143 #else
1144 arm64_prepare_for_sleep(true);
1145 #endif
1146 }
1147 }
1148
1149 void
cpu_machine_idle_init(boolean_t from_boot)1150 cpu_machine_idle_init(boolean_t from_boot)
1151 {
1152 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
1153 cpu_data_t *cpu_data_ptr = getCpuDatap();
1154
1155 if (from_boot) {
1156 uint32_t production = 1;
1157 DTEntry entry;
1158
1159 unsigned long jtag = 0;
1160
1161 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
1162 if (jtag != 0) {
1163 idle_enable = FALSE;
1164 } else {
1165 idle_enable = TRUE;
1166 }
1167 } else {
1168 idle_enable = TRUE;
1169 }
1170
1171 #if DEVELOPMENT || DEBUG
1172 uint32_t wfe_mode = 0;
1173 if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) {
1174 idle_proximate_timer_wfe = ((wfe_mode & 1) == 1);
1175 idle_proximate_io_wfe_masked = ((wfe_mode & 2) == 2);
1176 extern uint32_t idle_proximate_io_wfe_unmasked;
1177 idle_proximate_io_wfe_unmasked = ((wfe_mode & 4) == 4);
1178 }
1179 #endif
1180
1181 // bits 7..0 give the wfi type
1182 switch (wfi & 0xff) {
1183 case 0:
1184 // disable wfi
1185 wfi = 0;
1186 break;
1187
1188 #if DEVELOPMENT || DEBUG
1189 case 2:
1190 // wfi overhead simulation
1191 // 31..16 - wfi delay is us
1192 // 15..8 - flags
1193 // 7..0 - 2
1194 wfi = 2;
1195 wfi_flags = (wfi >> 8) & 0xFF;
1196 nanoseconds_to_absolutetime(((wfi >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
1197 break;
1198 #endif /* DEVELOPMENT || DEBUG */
1199
1200 case 1:
1201 default:
1202 // do nothing
1203 break;
1204 }
1205 ResetHandlerData.assist_reset_handler = 0;
1206 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
1207
1208 #ifdef MONITOR
1209 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
1210 #elif !defined(NO_MONITOR)
1211 #error MONITOR undefined, WFI power gating may not operate correctly
1212 #endif /* MONITOR */
1213
1214 // Determine if we are on production or debug chip
1215 if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
1216 unsigned int size;
1217 void const *prop;
1218
1219 if (kSuccess == SecureDTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
1220 if (size == 4) {
1221 bcopy(prop, &production, size);
1222 }
1223 }
1224 }
1225 if (!production) {
1226 #if defined(APPLE_ARM64_ARCH_FAMILY)
1227 // Enable coresight debug registers on debug-fused chips
1228 coresight_debug_enabled = TRUE;
1229 #endif
1230 }
1231 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
1232 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
1233 }
1234
1235 #if WITH_CLASSIC_S2R
1236 if (cpu_data_ptr == &BootCpuData) {
1237 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
1238 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1239 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
1240 } else {
1241 panic("No sleep token buffer");
1242 }
1243
1244 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
1245 SleepToken_low_paddr, sizeof(SleepToken));
1246 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
1247 }
1248 ;
1249 #endif
1250 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
1251 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
1252 }
1253
1254 _Atomic uint32_t cpu_idle_count = 0;
1255
1256 void
machine_track_platform_idle(boolean_t entry)1257 machine_track_platform_idle(boolean_t entry)
1258 {
1259 if (entry) {
1260 os_atomic_inc(&cpu_idle_count, relaxed);
1261 } else {
1262 os_atomic_dec(&cpu_idle_count, relaxed);
1263 }
1264 }
1265
1266 #if WITH_CLASSIC_S2R
1267 void
sleep_token_buffer_init(void)1268 sleep_token_buffer_init(void)
1269 {
1270 cpu_data_t *cpu_data_ptr = getCpuDatap();
1271 DTEntry entry;
1272 size_t size;
1273 void const * const *prop;
1274
1275 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
1276 /* Find the stpage node in the device tree */
1277 if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) {
1278 return;
1279 }
1280
1281 if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) {
1282 return;
1283 }
1284
1285 /* Map the page into the kernel space */
1286 sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]);
1287 }
1288 }
1289 #endif
1290