1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/percpu.h>
39 #include <kern/thread.h>
40 #include <kern/timer_queue.h>
41 #include <arm/cpu_data.h>
42 #include <arm/cpuid.h>
43 #include <arm/caches_internal.h>
44 #include <arm/cpu_data_internal.h>
45 #include <arm/cpu_internal.h>
46 #include <arm/misc_protos.h>
47 #include <arm/machine_cpu.h>
48 #include <arm/rtclock.h>
49 #include <arm64/proc_reg.h>
50 #include <mach/processor_info.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_kern_xnu.h>
53 #include <vm/vm_map.h>
54 #include <pexpert/arm/protos.h>
55 #include <pexpert/device_tree.h>
56 #include <sys/kdebug.h>
57 #include <arm/machine_routines.h>
58
59 #include <machine/atomic.h>
60
61 #include <san/kasan.h>
62
63 #include <kern/kpc.h>
64 #if CONFIG_CPU_COUNTERS
65 #include <kern/monotonic.h>
66 #endif /* CONFIG_CPU_COUNTERS */
67
68 #if KPERF
69 #include <kperf/kptimer.h>
70 #endif /* KPERF */
71
72 #if HIBERNATION
73 #include <IOKit/IOPlatformExpert.h>
74 #include <IOKit/IOHibernatePrivate.h>
75 #endif /* HIBERNATION */
76
77
78 #include <libkern/section_keywords.h>
79
80 extern boolean_t idle_enable;
81 extern uint64_t wake_abstime;
82
83 #if WITH_CLASSIC_S2R
84 void sleep_token_buffer_init(void);
85 #endif
86
87 #if !CONFIG_SPTM
88 extern uintptr_t resume_idle_cpu;
89 extern uintptr_t start_cpu;
90 vm_address_t start_cpu_paddr;
91 #endif
92
93 #if __ARM_KERNEL_PROTECT__
94 extern void exc_vectors_table;
95 #endif /* __ARM_KERNEL_PROTECT__ */
96
97 #if APPLEVIRTUALPLATFORM
98 extern vm_offset_t reset_vector_vaddr;
99 #endif /* APPLEVIRTUALPLATFORM */
100
101 #if APPLEVIRTUALPLATFORM
102 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep, unsigned int cpu, uint64_t entry_pa);
103 #else
104 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep);
105 #endif
106 extern void arm64_force_wfi_clock_gate(void);
107 #if defined(APPLETYPHOON)
108 // <rdar://problem/15827409>
109 extern void typhoon_prepare_for_wfi(void);
110 extern void typhoon_return_from_wfi(void);
111 #endif
112
113 #if HAS_RETENTION_STATE
114 extern void arm64_retention_wfi(void);
115 #endif
116
117 // wfi - wfi mode
118 // 0 : disabled
119 // 1 : normal
120 // 2 : overhead simulation (delay & flags)
121 TUNABLE(unsigned int, wfi, "wfi", 1);
122 #if DEVELOPMENT || DEBUG
123
124 // wfi_flags
125 // 1 << 0 : flush L1s
126 // 1 << 1 : flush TLBs
127 static int wfi_flags = 0;
128
129 // wfi_delay - delay ticks after wfi exit
130 static uint64_t wfi_delay = 0;
131
132 #endif /* DEVELOPMENT || DEBUG */
133
134 #define CPUPM_IDLE_WFE 0x5310300
135 #define CPUPM_IDLE_TIMER_WFE 0x5310304
136
137 #define DEFAULT_EXPECTING_IPI_WFE_TIMEOUT_USEC (60ULL)
138 TUNABLE(uint32_t, expecting_ipi_wfe_timeout_usec,
139 "expecting_ipi_wfe_timeout_usec", DEFAULT_EXPECTING_IPI_WFE_TIMEOUT_USEC);
140 uint64_t expecting_ipi_wfe_timeout_mt = 0x0ULL; /* initialized to a non-zero value in sched_init */
141
142 /* When recommended, issue WFE with [FI]IRQ unmasked in the idle
143 * loop. The default.
144 */
145 uint32_t idle_proximate_io_wfe_unmasked = 1;
146 #if DEVELOPMENT || DEBUG
147 uint32_t idle_proximate_timer_wfe = 1;
148 uint32_t idle_proximate_io_wfe_masked = 0;
149 #else
150 /* Issue WFE in lieu of WFI when awaiting a proximate timer. */
151 static uint32_t idle_proximate_timer_wfe = 1;
152 /* When recommended, issue WFE with [FI]IRQ masked in the idle loop.
153 * Non-default, retained for experimentation.
154 */
155 static uint32_t idle_proximate_io_wfe_masked = 0;
156 #endif
157
158 #if __ARM_GLOBAL_SLEEP_BIT__
159 volatile boolean_t arm64_stall_sleep = TRUE;
160 #endif
161
162 #if WITH_CLASSIC_S2R
163 /*
164 * These must be aligned to avoid issues with calling bcopy_phys on them before
165 * we are done with pmap initialization.
166 */
167 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
168 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
169 #endif
170
171 #if WITH_CLASSIC_S2R
172 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
173 #endif
174 static boolean_t coresight_debug_enabled = FALSE;
175
176 #if defined(CONFIG_XNUPOST)
177 void arm64_ipi_test_callback(void *);
178 void arm64_immediate_ipi_test_callback(void *);
179
180 void
arm64_ipi_test_callback(void * parm)181 arm64_ipi_test_callback(void *parm)
182 {
183 volatile uint64_t *ipi_test_data = parm;
184 cpu_data_t *cpu_data;
185
186 cpu_data = getCpuDatap();
187
188 *ipi_test_data = cpu_data->cpu_number;
189 }
190
191 void
arm64_immediate_ipi_test_callback(void * parm)192 arm64_immediate_ipi_test_callback(void *parm)
193 {
194 volatile uint64_t *ipi_test_data = parm;
195 cpu_data_t *cpu_data;
196
197 cpu_data = getCpuDatap();
198
199 *ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
200 }
201
202 uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
203
204 MACHINE_TIMEOUT(arm64_ipi_test_timeout, "arm64-ipi-test", 100, MACHINE_TIMEOUT_UNIT_MSEC, NULL);
205
206 void
arm64_ipi_test()207 arm64_ipi_test()
208 {
209 volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
210 uint64_t timeout_ms = os_atomic_load(&arm64_ipi_test_timeout, relaxed);
211 uint64_t then, now, delta;
212 int current_cpu_number = getCpuDatap()->cpu_number;
213
214 /*
215 * probably the only way to have this on most systems is with the
216 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
217 * IPI is not available
218 */
219 if (real_ncpus == 1) {
220 return;
221 }
222
223 const unsigned int max_cpu_id = ml_get_max_cpu_number();
224 for (unsigned int i = 0; i <= max_cpu_id; ++i) {
225 ipi_test_data = &arm64_ipi_test_data[i];
226 immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
227 *ipi_test_data = ~i;
228 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
229 if (error != KERN_SUCCESS) {
230 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
231 }
232
233 while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
234 (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
235 now = mach_absolute_time();
236 absolutetime_to_nanoseconds(now - then, &delta);
237 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
238 panic("CPU %d was unable to immediate-IPI CPU %u within %lldms", current_cpu_number, i, timeout_ms);
239 }
240 }
241
242 if (error != KERN_SUCCESS) {
243 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
244 }
245
246 then = mach_absolute_time();
247
248 while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
249 now = mach_absolute_time();
250 absolutetime_to_nanoseconds(now - then, &delta);
251 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
252 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %lldms, responses: %llx, %llx",
253 current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
254 }
255 }
256 }
257 }
258 #endif /* defined(CONFIG_XNUPOST) */
259
260 static void
configure_coresight_registers(cpu_data_t * cdp)261 configure_coresight_registers(cpu_data_t *cdp)
262 {
263 int i;
264
265 assert(cdp);
266 vm_offset_t coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs;
267
268 /*
269 * ARMv8 coresight registers are optional. If the device tree did not
270 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
271 * or coresight_regs (from the new "coresight-reg" property), assume that
272 * coresight registers are not supported.
273 */
274 if (cdp->cpu_regmap_paddr || coresight_regs) {
275 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
276 if (i == CORESIGHT_CTI || i == CORESIGHT_PMU) {
277 continue;
278 }
279 /* Skip debug-only registers on production chips */
280 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
281 continue;
282 }
283
284 if (!cdp->coresight_base[i]) {
285 if (coresight_regs) {
286 cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i);
287 } else {
288 uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
289 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
290 }
291 }
292 /* Unlock EDLAR (CTI and PMU are skipped above). */
293 if (i != CORESIGHT_UTT) {
294 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
295 }
296 }
297 }
298 }
299
300
301 /*
302 * Routine: cpu_bootstrap
303 * Function:
304 */
305 void
cpu_bootstrap(void)306 cpu_bootstrap(void)
307 {
308 }
309
310 /*
311 * Routine: cpu_sleep
312 * Function:
313 */
314 void
cpu_sleep(void)315 cpu_sleep(void)
316 {
317 cpu_data_t *cpu_data_ptr = getCpuDatap();
318
319 cpu_data_ptr->cpu_active_thread = current_thread();
320 #if CONFIG_SPTM
321 cpu_data_ptr->cpu_reset_handler = (uintptr_t) VM_KERNEL_STRIP_PTR(arm_init_cpu);
322 #else
323 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
324 #endif
325 os_atomic_or(&cpu_data_ptr->cpu_flags, SleepState, relaxed);
326
327 if (cpu_data_ptr->cpu_user_debug != NULL) {
328 arm_debug_set(NULL);
329 }
330
331 #if CONFIG_CPU_COUNTERS
332 kpc_idle();
333 mt_cpu_down(cpu_data_ptr);
334 #endif /* CONFIG_CPU_COUNTERS */
335 #if KPERF
336 kptimer_stop_curcpu();
337 #endif /* KPERF */
338
339 CleanPoC_Dcache();
340
341 #if USE_APPLEARMSMP
342 if (ml_is_quiescing()) {
343 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
344 } else {
345 bool deep_sleep = PE_cpu_down(cpu_data_ptr->cpu_id);
346 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
347 // hang CPU on spurious wakeup
348 cpu_data_ptr->cpu_reset_handler = (uintptr_t)0;
349 __builtin_arm_dsb(DSB_ISH);
350 CleanPoU_Dcache();
351 #if APPLEVIRTUALPLATFORM
352 arm64_prepare_for_sleep(deep_sleep, cpu_data_ptr->cpu_number, ml_vtophys(reset_vector_vaddr));
353 #else /* APPLEVIRTUALPLATFORM */
354 arm64_prepare_for_sleep(deep_sleep);
355 #endif /* APPLEVIRTUALPLATFORM */
356 }
357 #else
358 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
359 #endif
360 /*NOTREACHED*/
361 }
362
363 /*
364 * Routine: cpu_interrupt_is_pending
365 * Function: Returns a bool signifying a non-zero ISR_EL1,
366 * indicating a pending IRQ, FIQ or external abort.
367 */
368
369 bool
cpu_interrupt_is_pending(void)370 cpu_interrupt_is_pending(void)
371 {
372 uint64_t isr_value;
373 isr_value = __builtin_arm_rsr64("ISR_EL1");
374 return isr_value != 0;
375 }
376
377 static bool
cpu_proximate_timer(void)378 cpu_proximate_timer(void)
379 {
380 return !SetIdlePop();
381 }
382
383 #ifdef ARM64_BOARD_CONFIG_T6000
384 int wfe_allowed = 0;
385 #else
386 int wfe_allowed = 1;
387 #endif /* ARM64_BOARD_CONFIG_T6000 */
388
389 #if DEVELOPMENT || DEBUG
390 #define WFE_STAT(x) \
391 do { \
392 (x); \
393 } while(0)
394 #else
395 #define WFE_STAT(x) do {} while(0)
396 #endif /* DEVELOPMENT || DEBUG */
397
398 bool
wfe_to_deadline_or_interrupt(uint32_t cid,uint64_t wfe_deadline,cpu_data_t * cdp,bool unmask,bool check_cluster_recommendation)399 wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, cpu_data_t *cdp, bool unmask, bool check_cluster_recommendation)
400 {
401 bool ipending = false;
402 uint64_t irqc = 0, nirqc = 0;
403
404 /* The ARMv8 architecture permits a processor dwelling in WFE
405 * with F/IRQ masked to ignore a pending interrupt, i.e.
406 * not classify it as an 'event'. This is potentially
407 * problematic with AICv2's IRQ distribution model, as
408 * a transient interrupt masked interval can cause an SIQ
409 * query rejection, possibly routing the interrupt to
410 * another core/cluster in a powergated state.
411 * Hence, optionally unmask IRQs+FIQs across WFE.
412 */
413 if (unmask) {
414 /* Latch SW IRQ+FIQ counter prior to unmasking
415 * interrupts.
416 */
417 irqc = nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
418 __builtin_arm_wsr("DAIFClr", DAIFSC_STANDARD_DISABLE);
419 }
420
421 while ((ipending = (cpu_interrupt_is_pending())) == false) {
422 if (unmask) {
423 /* If WFE was issued with IRQs unmasked, an
424 * interrupt may have been processed.
425 * Consult the SW IRQ counter to determine
426 * whether the 'idle loop' must be
427 * re-evaluated.
428 */
429 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
430 if (nirqc != irqc) {
431 break;
432 }
433 }
434
435 if (__probable(wfe_allowed)) {
436 /*
437 * If IRQs are unmasked, there's a small window
438 * where an 'extra' WFE may be issued after
439 * the consultation of the SW interrupt counter
440 * and new interrupt arrival. Hence this WFE
441 * relies on the [FI]RQ interrupt handler
442 * epilogue issuing a 'SEVL', to post an
443 * event which causes the next WFE on the same
444 * PE to retire immediately.
445 */
446
447 __builtin_arm_wfe();
448 }
449
450 WFE_STAT(cdp->wfe_count++);
451 if (wfe_deadline != ~0ULL) {
452 WFE_STAT(cdp->wfe_deadline_checks++);
453 /* Check if the WFE recommendation has expired.
454 * We do not recompute the deadline here.
455 */
456 if ((check_cluster_recommendation && ml_cluster_wfe_timeout(cid) == 0) ||
457 mach_absolute_time() >= wfe_deadline) {
458 WFE_STAT(cdp->wfe_terminations++);
459 break;
460 }
461 }
462 }
463
464 if (unmask) {
465 __builtin_arm_wsr64("DAIFSet", DAIFSC_STANDARD_DISABLE);
466 /* Refetch SW interrupt counter with IRQs masked
467 * It is important that this routine accurately flags
468 * any observed interrupts via its return value,
469 * inaccuracy may lead to an erroneous WFI fallback.
470 */
471 nirqc = os_atomic_load(&cdp->cpu_stat.irq_ex_cnt_wake, relaxed);
472 }
473
474 return ipending || (nirqc != irqc);
475 }
476
477 /*
478 * Routine: cpu_idle
479 * Function:
480 */
481 void __attribute__((noreturn))
cpu_idle(void)482 cpu_idle(void)
483 {
484 cpu_data_t *cpu_data_ptr = getCpuDatap();
485 processor_t processor = current_processor();
486 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
487 bool idle_disallowed = false;
488 /* Read and reset the next_idle_short flag */
489 bool next_idle_short = processor->next_idle_short;
490 processor->next_idle_short = false;
491
492 if (__improbable((!idle_enable))) {
493 idle_disallowed = true;
494 } else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) {
495 idle_disallowed = true;
496 }
497
498 if (__improbable(idle_disallowed)) {
499 Idle_load_context();
500 }
501
502 bool ipending = false;
503 uint32_t cid = cpu_data_ptr->cpu_cluster_id;
504 bool check_cluster_recommendation = true;
505 uint64_t wfe_timeout = 0;
506
507 if (idle_proximate_io_wfe_masked == 1) {
508 /* Check for an active perf. controller generated
509 * WFE recommendation for this cluster.
510 */
511 wfe_timeout = ml_cluster_wfe_timeout(cid);
512 }
513
514 if (next_idle_short && expecting_ipi_wfe_timeout_mt > wfe_timeout) {
515 /* In this case we should WFE because a response IPI
516 * is expected soon.
517 */
518 wfe_timeout = expecting_ipi_wfe_timeout_mt;
519 check_cluster_recommendation = false;
520 }
521
522 if (wfe_timeout != 0) {
523 uint64_t wfe_deadline = mach_absolute_time() + wfe_timeout;
524 /* Poll issuing event-bounded WFEs until an interrupt
525 * arrives or the WFE recommendation expires
526 */
527 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, wfe_timeout, !check_cluster_recommendation);
528 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cpu_data_ptr, false, check_cluster_recommendation);
529 KDBG(CPUPM_IDLE_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, wfe_deadline);
530 if (ipending == true) {
531 /* Back to machine_idle() */
532 Idle_load_context();
533 }
534 }
535
536 if (__improbable(cpu_proximate_timer())) {
537 if (idle_proximate_timer_wfe == 1) {
538 /* Poll issuing WFEs until the expected
539 * timer FIQ arrives.
540 */
541 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_START, ipending, cpu_data_ptr->wfe_count, ~0ULL);
542 ipending = wfe_to_deadline_or_interrupt(cid, ~0ULL, cpu_data_ptr, false, false);
543 KDBG(CPUPM_IDLE_TIMER_WFE | DBG_FUNC_END, ipending, cpu_data_ptr->wfe_count, ~0ULL);
544 assert(ipending == true);
545 }
546 Idle_load_context();
547 }
548
549 lastPop = cpu_data_ptr->rtcPop;
550
551 cpu_data_ptr->cpu_active_thread = current_thread();
552
553 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
554 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
555 }
556
557 if (cpu_data_ptr->idle_timer_notify != NULL) {
558 if (new_idle_timeout_ticks == 0x0ULL) {
559 /* turn off the idle timer */
560 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
561 } else {
562 /* set the new idle timeout */
563 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
564 }
565 timer_resync_deadlines();
566 if (cpu_data_ptr->rtcPop != lastPop) {
567 SetIdlePop();
568 }
569 }
570
571 #if CONFIG_CPU_COUNTERS
572 kpc_idle();
573 mt_cpu_idle(cpu_data_ptr);
574 #endif /* CONFIG_CPU_COUNTERS */
575
576 if (wfi) {
577 #if !defined(APPLE_ARM64_ARCH_FAMILY)
578 platform_cache_idle_enter();
579 #endif
580
581 #if DEVELOPMENT || DEBUG
582 // When simulating wfi overhead,
583 // force wfi to clock gating only
584 if (wfi == 2) {
585 arm64_force_wfi_clock_gate();
586 }
587 #endif /* DEVELOPMENT || DEBUG */
588
589 #if defined(APPLETYPHOON)
590 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
591 typhoon_prepare_for_wfi();
592 #endif
593 __builtin_arm_dsb(DSB_SY);
594 #if HAS_RETENTION_STATE
595 arm64_retention_wfi();
596 #else
597 __builtin_arm_wfi();
598 #endif
599
600 #if defined(APPLETYPHOON)
601 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
602 typhoon_return_from_wfi();
603 #endif
604
605 #if DEVELOPMENT || DEBUG
606 // Handle wfi overhead simulation
607 if (wfi == 2) {
608 uint64_t deadline;
609
610 // Calculate wfi delay deadline
611 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
612
613 // Flush L1 caches
614 if ((wfi_flags & 1) != 0) {
615 InvalidatePoU_Icache();
616 FlushPoC_Dcache();
617 }
618
619 // Flush TLBs
620 if ((wfi_flags & 2) != 0) {
621 flush_core_tlb();
622 }
623
624 // Wait for the ballance of the wfi delay
625 clock_delay_until(deadline);
626 }
627 #endif /* DEVELOPMENT || DEBUG */
628 }
629
630 ClearIdlePop(TRUE);
631
632 cpu_idle_exit(FALSE);
633 }
634
635 /*
636 * Routine: cpu_idle_exit
637 * Function:
638 */
639 void
cpu_idle_exit(boolean_t from_reset)640 cpu_idle_exit(boolean_t from_reset)
641 {
642 uint64_t new_idle_timeout_ticks = 0x0ULL;
643 cpu_data_t *cpu_data_ptr = getCpuDatap();
644
645 assert(exception_stack_pointer() != 0);
646
647 /* Back from WFI, unlock OSLAR and EDLAR. */
648 if (from_reset) {
649 configure_coresight_registers(cpu_data_ptr);
650 }
651
652 #if CONFIG_CPU_COUNTERS
653 kpc_idle_exit();
654 mt_cpu_run(cpu_data_ptr);
655 #endif /* CONFIG_CPU_COUNTERS */
656
657 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
658 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
659 }
660
661 if (cpu_data_ptr->idle_timer_notify != NULL) {
662 if (new_idle_timeout_ticks == 0x0ULL) {
663 /* turn off the idle timer */
664 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
665 } else {
666 /* set the new idle timeout */
667 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
668 }
669 timer_resync_deadlines();
670 }
671
672 #if KASAN_TBI
673 kasan_unpoison_curstack(false);
674 #endif /* KASAN_TBI */
675
676 Idle_load_context();
677 }
678
679 void
cpu_init(void)680 cpu_init(void)
681 {
682 cpu_data_t *cdp = getCpuDatap();
683 arm_cpu_info_t *cpu_info_p;
684
685 assert(exception_stack_pointer() != 0);
686
687 if (cdp->cpu_type != CPU_TYPE_ARM64) {
688 cdp->cpu_type = CPU_TYPE_ARM64;
689
690 timer_call_queue_init(&cdp->rtclock_timer.queue);
691 cdp->rtclock_timer.deadline = EndOfAllTime;
692
693 if (cdp == &BootCpuData) {
694 do_cpuid();
695 do_mvfpid();
696 } else {
697 /*
698 * We initialize non-boot CPUs here; the boot CPU is
699 * dealt with as part of pmap_bootstrap.
700 */
701 pmap_cpu_data_init();
702 }
703
704 do_cacheid();
705
706 /* ARM_SMP: Assuming identical cpu */
707 do_debugid();
708
709 cpu_info_p = cpuid_info();
710
711 /* switch based on CPU's reported architecture */
712 switch (cpu_info_p->arm_info.arm_arch) {
713 case CPU_ARCH_ARMv8:
714 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
715 break;
716 case CPU_ARCH_ARMv8E:
717 cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
718 break;
719 default:
720 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
721 /* this panic doesn't work this early in startup */
722 panic("Unknown CPU subtype...");
723 break;
724 }
725
726 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
727 }
728 cdp->cpu_stat.irq_ex_cnt_wake = 0;
729 cdp->cpu_stat.ipi_cnt_wake = 0;
730 #if CONFIG_CPU_COUNTERS
731 cdp->cpu_stat.pmi_cnt_wake = 0;
732 #endif /* CONFIG_CPU_COUNTERS */
733 cdp->cpu_running = TRUE;
734 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
735 cdp->cpu_sleep_token = 0x0UL;
736 #if CONFIG_CPU_COUNTERS
737 kpc_idle_exit();
738 mt_cpu_up(cdp);
739 #endif /* CONFIG_CPU_COUNTERS */
740 }
741
742 void
cpu_stack_alloc(cpu_data_t * cpu_data_ptr)743 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
744 {
745 vm_offset_t irq_stack = 0;
746 vm_offset_t exc_stack = 0;
747
748 kmem_alloc(kernel_map, &irq_stack,
749 INTSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
750 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
751 VM_KERN_MEMORY_STACK);
752
753 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
754 cpu_data_ptr->istackptr = (void *)cpu_data_ptr->intstack_top;
755
756 kmem_alloc(kernel_map, &exc_stack,
757 EXCEPSTACK_SIZE + ptoa(2), KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO |
758 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
759 VM_KERN_MEMORY_STACK);
760
761 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
762 cpu_data_ptr->excepstackptr = (void *)cpu_data_ptr->excepstack_top;
763 }
764
765 void
cpu_data_init(cpu_data_t * cpu_data_ptr)766 cpu_data_init(cpu_data_t *cpu_data_ptr)
767 {
768 uint32_t i;
769
770 os_atomic_store(&cpu_data_ptr->cpu_flags, 0, relaxed);
771 cpu_data_ptr->cpu_int_state = 0;
772 cpu_data_ptr->cpu_pending_ast = AST_NONE;
773 cpu_data_ptr->cpu_cache_dispatch = NULL;
774 cpu_data_ptr->rtcPop = EndOfAllTime;
775 cpu_data_ptr->rtclock_datap = &RTClockData;
776 cpu_data_ptr->cpu_user_debug = NULL;
777
778
779 cpu_data_ptr->cpu_base_timebase = 0;
780 cpu_data_ptr->cpu_idle_notify = NULL;
781 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
782 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
783 cpu_data_ptr->cpu_reset_type = 0x0UL;
784 cpu_data_ptr->cpu_reset_handler = 0x0UL;
785 cpu_data_ptr->cpu_reset_assist = 0x0UL;
786 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
787 cpu_data_ptr->cpu_phys_id = 0x0UL;
788 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
789 cpu_data_ptr->cpu_cluster_id = 0;
790 cpu_data_ptr->cpu_l2_id = 0;
791 cpu_data_ptr->cpu_l2_size = 0;
792 cpu_data_ptr->cpu_l3_id = 0;
793 cpu_data_ptr->cpu_l3_size = 0;
794
795 cpu_data_ptr->cpu_signal = SIGPdisabled;
796
797 cpu_data_ptr->cpu_get_fiq_handler = NULL;
798 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
799 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
800 cpu_data_ptr->cpu_get_decrementer_func = NULL;
801 cpu_data_ptr->cpu_set_decrementer_func = NULL;
802 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
803 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
804 cpu_data_ptr->cpu_xcall_p0 = NULL;
805 cpu_data_ptr->cpu_xcall_p1 = NULL;
806 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
807 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
808
809 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
810 cpu_data_ptr->coresight_base[i] = 0;
811 }
812
813 #if !XNU_MONITOR
814 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
815
816 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
817 pmap_cpu_data_ptr->pv_free.list = NULL;
818 pmap_cpu_data_ptr->pv_free.count = 0;
819 pmap_cpu_data_ptr->pv_free_spill_marker = NULL;
820 #if !CONFIG_SPTM
821 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
822 bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
823 #endif
824 #endif /* !XNU_MONITOR */
825 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
826 #if __ARM_KERNEL_PROTECT__
827 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
828 #endif /* __ARM_KERNEL_PROTECT__ */
829
830 #if defined(HAS_APPLE_PAC)
831 cpu_data_ptr->rop_key = 0;
832 cpu_data_ptr->jop_key = ml_default_jop_pid();
833 #endif
834 }
835
836 void
cpu_data_register(cpu_data_t * cpu_data_ptr)837 cpu_data_register(cpu_data_t *cpu_data_ptr)
838 {
839 int cpu = cpu_data_ptr->cpu_number;
840
841 #if KASAN
842 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
843 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
844 }
845 #endif
846
847 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
848 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
849 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
850 }
851
852 #if defined(KERNEL_INTEGRITY_CTRR)
853 /* Hibernation needs to reset this state, so data and text are in the hib segment;
854 * this allows them be accessed and executed early.
855 */
856 LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock");
857 LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp);
858 enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA;
859
860 MARK_AS_HIBERNATE_TEXT
861 void
init_ctrr_cluster_states(void)862 init_ctrr_cluster_states(void)
863 {
864 for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
865 ctrr_cluster_locked[i] = CTRR_UNLOCKED;
866 }
867 }
868 #endif
869
870 void
cpu_start(int cpu)871 cpu_start(int cpu)
872 {
873 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
874 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
875
876 if (processor_should_kprintf(processor, true)) {
877 kprintf("cpu_start() cpu: %d\n", cpu);
878 }
879
880 if (cpu == cpu_number()) {
881 /* Current CPU is already running, just needs initialization */
882 cpu_machine_init();
883 configure_coresight_registers(cpu_data_ptr);
884 } else {
885 thread_t first_thread;
886 #if CONFIG_SPTM
887 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) VM_KERNEL_STRIP_PTR(arm_init_cpu);
888 #else
889 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
890 #if !XNU_MONITOR
891 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
892 #endif
893 #endif /* !CONFIG_SPTM */
894
895 if (processor->startup_thread != THREAD_NULL) {
896 first_thread = processor->startup_thread;
897 } else {
898 first_thread = processor->idle_thread;
899 }
900 cpu_data_ptr->cpu_active_thread = first_thread;
901 first_thread->machine.CpuDatap = cpu_data_ptr;
902 first_thread->machine.pcpu_data_base_and_cpu_number =
903 ml_make_pcpu_base_and_cpu_number((vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data),
904 cpu_data_ptr->cpu_number);
905
906 configure_coresight_registers(cpu_data_ptr);
907
908 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
909 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
910 #if CONFIG_SPTM
911 /**
912 * On SPTM devices, CTRR is configured entirely by the SPTM. Due to this, this logic
913 * is no longer required in XNU.
914 */
915 #else
916 #if defined(KERNEL_INTEGRITY_CTRR)
917
918 /* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
919 * other CPUs block until cluster is locked. */
920 lck_spin_lock(&ctrr_cpu_start_lck);
921 switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) {
922 case CTRR_UNLOCKED:
923 ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING;
924 lck_spin_unlock(&ctrr_cpu_start_lck);
925 break;
926 case CTRR_LOCKING:
927
928 lck_spin_sleep(&ctrr_cpu_start_lck, LCK_SLEEP_UNLOCK,
929 &ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id],
930 THREAD_UNINT | THREAD_WAIT_NOREPORT);
931
932 assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING);
933 break;
934 default: // CTRR_LOCKED
935 lck_spin_unlock(&ctrr_cpu_start_lck);
936 break;
937 }
938 #endif
939 #endif /* CONFIG_SPTM */
940
941 PE_cpu_start_internal(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
942
943 }
944 }
945
946
947 void
cpu_timebase_init(boolean_t from_boot)948 cpu_timebase_init(boolean_t from_boot)
949 {
950 cpu_data_t *cdp = getCpuDatap();
951 uint64_t timebase_offset = 0;
952
953 if (cdp->cpu_get_fiq_handler == NULL) {
954 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
955 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
956 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
957 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
958 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
959 }
960
961 if (!from_boot && (cdp == &BootCpuData) && ml_is_quiescing()) {
962 /*
963 * When we wake from sleep, we have no guarantee about the state
964 * of the hardware timebase. It may have kept ticking across sleep, or
965 * it may have reset.
966 *
967 * To deal with this, we calculate an offset to the clock that will
968 * produce a timebase value wake_abstime at the point the boot
969 * CPU calls cpu_timebase_init on wake.
970 *
971 * This ensures that mach_absolute_time() stops ticking across sleep.
972 */
973 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
974 } else if (from_boot) {
975 #if DEBUG || DEVELOPMENT
976 if (PE_parse_boot_argn("timebase_offset", &timebase_offset, sizeof(timebase_offset))) {
977 rtclock_base_abstime += timebase_offset;
978 }
979 #endif
980 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
981 ml_set_reset_time(ml_get_hwclock());
982 }
983
984 cdp->cpu_decrementer = 0x7FFFFFFFUL;
985 cdp->cpu_timebase = timebase_offset;
986 cdp->cpu_base_timebase = rtclock_base_abstime;
987 }
988
989 int
cpu_cluster_id(void)990 cpu_cluster_id(void)
991 {
992 return getCpuDatap()->cpu_cluster_id;
993 }
994
995 __attribute__((noreturn))
996 void
ml_arm_sleep(void)997 ml_arm_sleep(void)
998 {
999 cpu_data_t *cpu_data_ptr = getCpuDatap();
1000
1001 if (cpu_data_ptr == &BootCpuData) {
1002 cpu_data_t *target_cdp;
1003 int cpu;
1004 int max_cpu;
1005
1006 max_cpu = ml_get_max_cpu_number();
1007 for (cpu = 0; cpu <= max_cpu; cpu++) {
1008 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1009
1010 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
1011 continue;
1012 }
1013
1014 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
1015 ;
1016 }
1017 }
1018
1019 /*
1020 * Now that the other cores have entered the sleep path, set
1021 * the abstime value we'll use when we resume.
1022 */
1023 wake_abstime = ml_get_timebase();
1024 ml_set_reset_time(UINT64_MAX);
1025 } else {
1026 CleanPoU_Dcache();
1027 }
1028
1029 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
1030
1031 if (cpu_data_ptr == &BootCpuData) {
1032 #if WITH_CLASSIC_S2R
1033 // Classic suspend to RAM writes the suspend signature into the
1034 // sleep token buffer so that iBoot knows that it's on the warm
1035 // boot (wake) path (as opposed to the cold boot path). Newer SoC
1036 // do not go through SecureROM/iBoot on the warm boot path. The
1037 // reconfig engine script brings the CPU out of reset at the kernel's
1038 // reset vector which points to the warm boot initialization code.
1039 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1040 platform_cache_shutdown();
1041 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
1042 } else {
1043 panic("No sleep token buffer");
1044 }
1045 #endif
1046
1047 #if __ARM_GLOBAL_SLEEP_BIT__
1048 /* Allow other CPUs to go to sleep. */
1049 arm64_stall_sleep = FALSE;
1050 __builtin_arm_dmb(DMB_ISH);
1051 #endif
1052
1053 /* Architectural debug state: <rdar://problem/12390433>:
1054 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1055 * tell debugger to not prevent power gating .
1056 */
1057 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1058 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1059 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1060 }
1061
1062 #if HIBERNATION
1063 uint32_t mode = hibernate_write_image();
1064 if (mode == kIOHibernatePostWriteHalt) {
1065 HIBLOG("powering off after writing hibernation image\n");
1066 int halt_result = -1;
1067 if (PE_halt_restart) {
1068 /**
1069 * Drain serial FIFOs now as the normal call further down won't
1070 * be hit when the CPU halts here for hibernation. Here, it'll
1071 * make sure the preceding HIBLOG is flushed as well.
1072 */
1073 serial_go_to_sleep();
1074 halt_result = (*PE_halt_restart)(kPEHaltCPU);
1075 }
1076 panic("can't shutdown: PE_halt_restart returned %d", halt_result);
1077 }
1078 #endif /* HIBERNATION */
1079
1080 serial_go_to_sleep();
1081
1082 #if CONFIG_CPU_COUNTERS
1083 mt_sleep();
1084 #endif /* CONFIG_CPU_COUNTERS */
1085 /* ARM64-specific preparation */
1086 #if APPLEVIRTUALPLATFORM
1087 extern bool test_sleep_in_vm;
1088 if (test_sleep_in_vm) {
1089 /*
1090 * Until sleep is supported on APPLEVIRTUALPLATFORM, use this
1091 * trick for testing sleep - just jump straight to the CPU resume point.
1092 */
1093 arm_init_cpu(cpu_data_ptr, 0);
1094 }
1095 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys(reset_vector_vaddr));
1096 #else /* APPLEVIRTUALPLATFORM */
1097 arm64_prepare_for_sleep(true);
1098 #endif /* APPLEVIRTUALPLATFORM */
1099 } else {
1100 #if __ARM_GLOBAL_SLEEP_BIT__
1101 /*
1102 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1103 * global register to manage entering deep sleep, as opposed to a per-CPU
1104 * register. We cannot update this register until all CPUs are ready to enter
1105 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1106 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1107 * which can hang the sleep process or cause memory corruption on wake.
1108 *
1109 * To avoid these issues, we'll stall on this global value, which CPU0 will
1110 * manage.
1111 */
1112 while (arm64_stall_sleep) {
1113 __builtin_arm_wfe();
1114 }
1115 #endif
1116 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
1117
1118 /* Architectural debug state: <rdar://problem/12390433>:
1119 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1120 * tell debugger to not prevent power gating .
1121 */
1122 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1123 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1124 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1125 }
1126
1127 /* ARM64-specific preparation */
1128 #if APPLEVIRTUALPLATFORM
1129 arm64_prepare_for_sleep(true, cpu_data_ptr->cpu_number, ml_vtophys(reset_vector_vaddr));
1130 #else /* APPLEVIRTUALPLATFORM */
1131 arm64_prepare_for_sleep(true);
1132 #endif /* APPLEVIRTUALPLATFORM */
1133 }
1134 }
1135
1136 void
cpu_machine_idle_init(boolean_t from_boot)1137 cpu_machine_idle_init(boolean_t from_boot)
1138 {
1139 #if !CONFIG_SPTM
1140 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
1141 #endif
1142 cpu_data_t *cpu_data_ptr = getCpuDatap();
1143
1144 if (from_boot) {
1145 uint32_t production = 1;
1146 DTEntry entry;
1147
1148 unsigned long jtag = 0;
1149
1150 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
1151 if (jtag != 0) {
1152 idle_enable = FALSE;
1153 } else {
1154 idle_enable = TRUE;
1155 }
1156 } else {
1157 idle_enable = TRUE;
1158 }
1159
1160 #if DEVELOPMENT || DEBUG
1161 uint32_t wfe_mode = 0;
1162 if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) {
1163 idle_proximate_timer_wfe = ((wfe_mode & 1) == 1);
1164 idle_proximate_io_wfe_masked = ((wfe_mode & 2) == 2);
1165 extern uint32_t idle_proximate_io_wfe_unmasked;
1166 idle_proximate_io_wfe_unmasked = ((wfe_mode & 4) == 4);
1167 }
1168 #endif
1169
1170 // bits 7..0 give the wfi type
1171 switch (wfi & 0xff) {
1172 case 0:
1173 // disable wfi
1174 wfi = 0;
1175 break;
1176
1177 #if DEVELOPMENT || DEBUG
1178 case 2:
1179 // wfi overhead simulation
1180 // 31..16 - wfi delay is us
1181 // 15..8 - flags
1182 // 7..0 - 2
1183 wfi = 2;
1184 wfi_flags = (wfi >> 8) & 0xFF;
1185 nanoseconds_to_absolutetime(((wfi >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
1186 break;
1187 #endif /* DEVELOPMENT || DEBUG */
1188
1189 case 1:
1190 default:
1191 // do nothing
1192 break;
1193 }
1194 #if !CONFIG_SPTM
1195 ResetHandlerData.assist_reset_handler = 0;
1196 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
1197 #endif
1198
1199 #ifdef MONITOR
1200 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
1201 #elif !defined(NO_MONITOR)
1202 #error MONITOR undefined, WFI power gating may not operate correctly
1203 #endif /* MONITOR */
1204
1205 // Determine if we are on production or debug chip
1206 if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
1207 unsigned int size;
1208 void const *prop;
1209
1210 if (kSuccess == SecureDTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
1211 if (size == 4) {
1212 bcopy(prop, &production, size);
1213 }
1214 }
1215 }
1216 if (!production) {
1217 #if defined(APPLE_ARM64_ARCH_FAMILY)
1218 // Enable coresight debug registers on debug-fused chips
1219 coresight_debug_enabled = TRUE;
1220 #endif
1221 }
1222 #if !CONFIG_SPTM
1223 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
1224 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
1225 #endif
1226 }
1227
1228 #if WITH_CLASSIC_S2R
1229 if (cpu_data_ptr == &BootCpuData) {
1230 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
1231 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1232 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
1233 } else {
1234 panic("No sleep token buffer");
1235 }
1236
1237 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
1238 SleepToken_low_paddr, sizeof(SleepToken));
1239 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
1240 }
1241 ;
1242 #endif
1243 #if CONFIG_SPTM
1244 cpu_data_ptr->cpu_reset_handler = (uintptr_t) VM_KERNEL_STRIP_PTR(arm_init_idle_cpu);
1245 #else
1246 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
1247 #endif
1248 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
1249 }
1250
1251 _Atomic uint32_t cpu_idle_count = 0;
1252
1253 void
machine_track_platform_idle(boolean_t entry)1254 machine_track_platform_idle(boolean_t entry)
1255 {
1256 if (entry) {
1257 os_atomic_inc(&cpu_idle_count, relaxed);
1258 } else {
1259 os_atomic_dec(&cpu_idle_count, relaxed);
1260 }
1261 }
1262
1263 #if WITH_CLASSIC_S2R
1264 void
sleep_token_buffer_init(void)1265 sleep_token_buffer_init(void)
1266 {
1267 cpu_data_t *cpu_data_ptr = getCpuDatap();
1268 DTEntry entry;
1269 size_t size;
1270 void const * const *prop;
1271
1272 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
1273 /* Find the stpage node in the device tree */
1274 if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) {
1275 return;
1276 }
1277
1278 if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) {
1279 return;
1280 }
1281
1282 /* Map the page into the kernel space */
1283 sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]);
1284 }
1285 }
1286 #endif
1287