1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/percpu.h>
38 #include <kern/thread.h>
39 #include <kern/timer_queue.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <arm/proc_reg.h>
49 #include <mach/processor_info.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/board_config.h>
54 #include <pexpert/arm/protos.h>
55 #include <sys/kdebug.h>
56
57 #include <machine/atomic.h>
58
59 #if KPC
60 #include <kern/kpc.h>
61 #endif
62
63 extern unsigned int resume_idle_cpu;
64 extern unsigned int start_cpu;
65
66 unsigned int start_cpu_paddr;
67
68 extern boolean_t idle_enable;
69 extern unsigned int real_ncpus;
70 extern uint64_t wake_abstime;
71
72 extern void* wfi_inst;
73 unsigned wfi_fast = 1;
74 unsigned patch_to_nop = 0xe1a00000;
75
76 void *LowExceptionVectorsAddr;
77 #define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80))
78 #define IOS_STATE_SIZE (0x08UL)
79 static const uint8_t suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
80 static const uint8_t running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
81
82 /*
83 * Routine: cpu_bootstrap
84 * Function:
85 */
86 void
cpu_bootstrap(void)87 cpu_bootstrap(void)
88 {
89 }
90
91
92 /*
93 * Routine: cpu_sleep
94 * Function:
95 */
96 void
cpu_sleep(void)97 cpu_sleep(void)
98 {
99 cpu_data_t *cpu_data_ptr = getCpuDatap();
100 cpu_data_ptr->cpu_active_thread = current_thread();
101 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
102 cpu_data_ptr->cpu_flags |= SleepState;
103 cpu_data_ptr->cpu_user_debug = NULL;
104
105 CleanPoC_Dcache();
106
107 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
108 }
109
110 _Atomic uint32_t cpu_idle_count = 0;
111
112 /*
113 * Routine: cpu_idle
114 * Function:
115 */
116 void __attribute__((noreturn))
cpu_idle(void)117 cpu_idle(void)
118 {
119 cpu_data_t *cpu_data_ptr = getCpuDatap();
120 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
121
122 if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) {
123 Idle_load_context();
124 }
125 if (!SetIdlePop()) {
126 Idle_load_context();
127 }
128 lastPop = cpu_data_ptr->rtcPop;
129
130 cpu_data_ptr->cpu_active_thread = current_thread();
131
132 if (cpu_data_ptr->cpu_idle_notify != NULL) {
133 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
134 }
135
136 if (cpu_data_ptr->idle_timer_notify != 0) {
137 if (new_idle_timeout_ticks == 0x0ULL) {
138 /* turn off the idle timer */
139 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
140 } else {
141 /* set the new idle timeout */
142 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
143 }
144 timer_resync_deadlines();
145 if (cpu_data_ptr->rtcPop != lastPop) {
146 /*
147 * Ignore the return value here: this CPU has called idle_notify and
148 * committed to going idle.
149 */
150 SetIdlePop();
151 }
152 }
153
154 #if KPC
155 kpc_idle();
156 #endif /* KPC */
157
158 platform_cache_idle_enter();
159 cpu_idle_wfi((boolean_t) wfi_fast);
160 platform_cache_idle_exit();
161
162 ClearIdlePop(TRUE);
163 cpu_idle_exit(FALSE);
164 }
165
166 /*
167 * Routine: cpu_idle_exit
168 * Function:
169 */
170 void
cpu_idle_exit(boolean_t from_reset __unused)171 cpu_idle_exit(boolean_t from_reset __unused)
172 {
173 uint64_t new_idle_timeout_ticks = 0x0ULL;
174 cpu_data_t *cpu_data_ptr = getCpuDatap();
175
176 #if KPC
177 kpc_idle_exit();
178 #endif
179 if (cpu_data_ptr->cpu_idle_notify != NULL) {
180 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
181 }
182
183 if (cpu_data_ptr->idle_timer_notify != 0) {
184 if (new_idle_timeout_ticks == 0x0ULL) {
185 /* turn off the idle timer */
186 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
187 } else {
188 /* set the new idle timeout */
189 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
190 }
191 timer_resync_deadlines();
192 }
193
194 Idle_load_context();
195 }
196
197 void
cpu_init(void)198 cpu_init(void)
199 {
200 cpu_data_t *cdp = getCpuDatap();
201 arm_cpu_info_t *cpu_info_p;
202
203 if (cdp->cpu_type != CPU_TYPE_ARM) {
204 cdp->cpu_type = CPU_TYPE_ARM;
205
206 timer_call_queue_init(&cdp->rtclock_timer.queue);
207 cdp->rtclock_timer.deadline = EndOfAllTime;
208
209 if (cdp == &BootCpuData) {
210 do_cpuid();
211 do_mvfpid();
212 } else {
213 /*
214 * We initialize non-boot CPUs here; the boot CPU is
215 * dealt with as part of pmap_bootstrap.
216 */
217 pmap_cpu_data_init();
218 }
219
220 do_cacheid();
221
222 /* ARM_SMP: Assuming identical cpu */
223 do_debugid();
224
225 cpu_info_p = cpuid_info();
226
227 /* switch based on CPU's reported architecture */
228 switch (cpu_info_p->arm_info.arm_arch) {
229 case CPU_ARCH_ARMv4T:
230 case CPU_ARCH_ARMv5T:
231 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V4T;
232 break;
233 case CPU_ARCH_ARMv5TE:
234 case CPU_ARCH_ARMv5TEJ:
235 if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL) {
236 cdp->cpu_subtype = CPU_SUBTYPE_ARM_XSCALE;
237 } else {
238 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V5TEJ;
239 }
240 break;
241 case CPU_ARCH_ARMv6:
242 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V6;
243 break;
244 case CPU_ARCH_ARMv7:
245 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7;
246 break;
247 case CPU_ARCH_ARMv7f:
248 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7F;
249 break;
250 case CPU_ARCH_ARMv7s:
251 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7S;
252 break;
253 case CPU_ARCH_ARMv7k:
254 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7K;
255 break;
256 default:
257 cdp->cpu_subtype = CPU_SUBTYPE_ARM_ALL;
258 break;
259 }
260
261 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
262 }
263 cdp->cpu_stat.irq_ex_cnt_wake = 0;
264 cdp->cpu_stat.ipi_cnt_wake = 0;
265 cdp->cpu_running = TRUE;
266 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
267 cdp->cpu_sleep_token = 0x0UL;
268 }
269
270 void
cpu_stack_alloc(cpu_data_t * cpu_data_ptr)271 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
272 {
273 vm_offset_t irq_stack = 0;
274 vm_offset_t fiq_stack = 0;
275
276 kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
277 INTSTACK_SIZE + (2 * PAGE_SIZE),
278 PAGE_MASK,
279 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
280 VM_KERN_MEMORY_STACK);
281 if (kr != KERN_SUCCESS) {
282 panic("Unable to allocate cpu interrupt stack");
283 }
284
285 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
286 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
287
288 kr = kernel_memory_allocate(kernel_map, &fiq_stack,
289 FIQSTACK_SIZE + (2 * PAGE_SIZE),
290 PAGE_MASK,
291 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
292 VM_KERN_MEMORY_STACK);
293 if (kr != KERN_SUCCESS) {
294 panic("Unable to allocate cpu exception stack");
295 }
296
297 cpu_data_ptr->fiqstack_top = fiq_stack + PAGE_SIZE + FIQSTACK_SIZE;
298 cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top;
299 }
300
301 void
cpu_data_free(cpu_data_t * cpu_data_ptr)302 cpu_data_free(cpu_data_t *cpu_data_ptr)
303 {
304 if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
305 return;
306 }
307
308 int cpu_number = cpu_data_ptr->cpu_number;
309
310 if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
311 OSDecrementAtomic((SInt32*)&real_ncpus);
312 CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
313 CpuDataEntries[cpu_number].cpu_data_paddr = 0;
314 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
315 }
316 (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
317 (kfree)((void *)(cpu_data_ptr->fiqstack_top - FIQSTACK_SIZE), FIQSTACK_SIZE);
318 }
319
320 void
cpu_data_init(cpu_data_t * cpu_data_ptr)321 cpu_data_init(cpu_data_t *cpu_data_ptr)
322 {
323 cpu_data_ptr->cpu_flags = 0;
324 #if __arm__
325 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable;
326 #endif
327 cpu_data_ptr->cpu_int_state = 0;
328 cpu_data_ptr->cpu_pending_ast = AST_NONE;
329 cpu_data_ptr->cpu_cache_dispatch = NULL;
330 cpu_data_ptr->rtcPop = EndOfAllTime;
331 cpu_data_ptr->rtclock_datap = &RTClockData;
332 cpu_data_ptr->cpu_user_debug = NULL;
333 cpu_data_ptr->cpu_base_timebase_low = 0;
334 cpu_data_ptr->cpu_base_timebase_high = 0;
335 cpu_data_ptr->cpu_idle_notify = NULL;
336 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
337 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
338 cpu_data_ptr->cpu_reset_type = 0x0UL;
339 cpu_data_ptr->cpu_reset_handler = 0x0UL;
340 cpu_data_ptr->cpu_reset_assist = 0x0UL;
341 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
342 cpu_data_ptr->cpu_phys_id = 0x0UL;
343 cpu_data_ptr->cpu_l2_access_penalty = 0;
344 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
345 cpu_data_ptr->cpu_cluster_id = 0;
346 cpu_data_ptr->cpu_l2_id = 0;
347 cpu_data_ptr->cpu_l2_size = 0;
348 cpu_data_ptr->cpu_l3_id = 0;
349 cpu_data_ptr->cpu_l3_size = 0;
350
351 cpu_data_ptr->cpu_signal = SIGPdisabled;
352
353 cpu_data_ptr->cpu_get_fiq_handler = NULL;
354 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
355 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
356 cpu_data_ptr->cpu_get_decrementer_func = NULL;
357 cpu_data_ptr->cpu_set_decrementer_func = NULL;
358 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
359 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
360 cpu_data_ptr->cpu_xcall_p0 = NULL;
361 cpu_data_ptr->cpu_xcall_p1 = NULL;
362 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
363 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
364
365 #if defined(ARMA7)
366 cpu_data_ptr->cpu_CLWFlush_req = 0x0ULL;
367 cpu_data_ptr->cpu_CLWFlush_last = 0x0ULL;
368 cpu_data_ptr->cpu_CLWClean_req = 0x0ULL;
369 cpu_data_ptr->cpu_CLWClean_last = 0x0ULL;
370 cpu_data_ptr->cpu_CLW_active = 0x1UL;
371 #endif
372
373 #if !XNU_MONITOR
374 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
375
376 pmap_cpu_data_ptr->cpu_user_pmap = (struct pmap *) NULL;
377 pmap_cpu_data_ptr->cpu_user_pmap_stamp = 0;
378 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
379
380 bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
381 #endif
382 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
383 }
384
385 kern_return_t
cpu_data_register(cpu_data_t * cpu_data_ptr)386 cpu_data_register(cpu_data_t *cpu_data_ptr)
387 {
388 int cpu;
389
390 cpu = OSIncrementAtomic((SInt32*)&real_ncpus);
391 if (real_ncpus > ml_get_cpu_count()) {
392 return KERN_FAILURE;
393 }
394
395 cpu_data_ptr->cpu_number = cpu;
396 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
397 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
398 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
399 return KERN_SUCCESS;
400 }
401
402 kern_return_t
cpu_start(int cpu)403 cpu_start(int cpu)
404 {
405 kprintf("cpu_start() cpu: %d\n", cpu);
406 if (cpu == cpu_number()) {
407 cpu_machine_init();
408 return KERN_SUCCESS;
409 } else {
410 cpu_data_t *cpu_data_ptr;
411 thread_t first_thread;
412 processor_t processor;
413
414 cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
415 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
416
417 #if !XNU_MONITOR
418 cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL;
419 #endif
420
421 processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
422 if (processor->startup_thread != THREAD_NULL) {
423 first_thread = processor->startup_thread;
424 } else {
425 first_thread = processor->idle_thread;
426 }
427 cpu_data_ptr->cpu_active_thread = first_thread;
428 first_thread->machine.CpuDatap = cpu_data_ptr;
429 first_thread->machine.pcpu_data_base =
430 (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
431
432 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
433 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
434 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
435 return KERN_SUCCESS;
436 }
437 }
438
439 void
cpu_timebase_init(boolean_t from_boot __unused)440 cpu_timebase_init(boolean_t from_boot __unused)
441 {
442 cpu_data_t *cdp = getCpuDatap();
443
444 if (cdp->cpu_get_fiq_handler == NULL) {
445 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
446 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
447 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
448 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
449 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
450 }
451 cdp->cpu_decrementer = 0x7FFFFFFFUL;
452 cdp->cpu_timebase_low = 0x0UL;
453 cdp->cpu_timebase_high = 0x0UL;
454
455 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
456 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
457 * are 32-bit. */
458 cdp->cpu_base_timebase_low = rtclock_base_abstime_low;
459 cdp->cpu_base_timebase_high = rtclock_base_abstime_high;
460 #else
461 *((uint64_t *) &cdp->cpu_base_timebase_low) = rtclock_base_abstime;
462 #endif
463 }
464
465
466 __attribute__((noreturn))
467 void
ml_arm_sleep(void)468 ml_arm_sleep(void)
469 {
470 cpu_data_t *cpu_data_ptr = getCpuDatap();
471
472 if (cpu_data_ptr == &BootCpuData) {
473 cpu_data_t *target_cdp;
474 unsigned int cpu;
475
476 const unsigned int max_cpu_id = ml_get_max_cpu_number();
477 for (cpu = 0; cpu <= max_cpu_id; cpu++) {
478 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
479 if (target_cdp == (cpu_data_t *)NULL) {
480 break;
481 }
482
483 if (target_cdp == cpu_data_ptr) {
484 continue;
485 }
486
487 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
488 ;
489 }
490 }
491
492 /* Now that the other cores have entered the sleep path, set
493 * the abstime fixup we'll use when we resume.*/
494 rtclock_base_abstime = ml_get_timebase();
495 wake_abstime = rtclock_base_abstime;
496 } else {
497 platform_cache_disable();
498 CleanPoU_Dcache();
499 }
500 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
501 #if defined(ARMA7)
502 cpu_data_ptr->cpu_CLWFlush_req = 0;
503 cpu_data_ptr->cpu_CLWClean_req = 0;
504 __builtin_arm_dmb(DMB_ISH);
505 cpu_data_ptr->cpu_CLW_active = 0;
506 #endif
507 if (cpu_data_ptr == &BootCpuData) {
508 platform_cache_disable();
509 platform_cache_shutdown();
510 bcopy((const void *)suspend_signature, (void *)(IOS_STATE), IOS_STATE_SIZE);
511 } else {
512 CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
513 }
514
515 __builtin_arm_dsb(DSB_SY);
516 while (TRUE) {
517 __builtin_arm_wfe();
518 } /* Spin */
519 }
520
521 TUNABLE(unsigned int, wfi, "wfi", 1);
522
523 void
cpu_machine_idle_init(boolean_t from_boot)524 cpu_machine_idle_init(boolean_t from_boot)
525 {
526 static const unsigned int *BootArgs_paddr = (unsigned int *)NULL;
527 static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL;
528 static unsigned int resume_idle_cpu_paddr = (unsigned int)NULL;
529 cpu_data_t *cpu_data_ptr = getCpuDatap();
530
531 if (from_boot) {
532 unsigned int jtag = 0;
533
534 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
535 if (jtag != 0) {
536 idle_enable = FALSE;
537 } else {
538 idle_enable = TRUE;
539 }
540 } else {
541 idle_enable = TRUE;
542 }
543
544 if (wfi == 0) {
545 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&patch_to_nop),
546 (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned));
547 }
548 if (wfi == 2) {
549 wfi_fast = 0;
550 }
551
552 LowExceptionVectorsAddr = (void *)ml_io_map(ml_vtophys((vm_offset_t)gPhysBase), PAGE_SIZE);
553
554 /* Copy Exception Vectors low, but don't touch the sleep token */
555 bcopy((void *)&ExceptionLowVectorsBase, (void *)LowExceptionVectorsAddr, 0x90);
556 bcopy(((void *)(((vm_offset_t)&ExceptionLowVectorsBase) + 0xA0)), ((void *)(((vm_offset_t)LowExceptionVectorsAddr) + 0xA0)), ARM_PGBYTES - 0xA0);
557
558 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
559
560 BootArgs_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)BootArgs);
561 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&BootArgs_paddr),
562 (addr64_t)((unsigned int)(gPhysBase) +
563 ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)),
564 4);
565
566 CpuDataEntries_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)CpuDataEntries);
567 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&CpuDataEntries_paddr),
568 (addr64_t)((unsigned int)(gPhysBase) +
569 ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)),
570 4);
571
572 CleanPoC_DcacheRegion((vm_offset_t) phystokv(gPhysBase), PAGE_SIZE);
573
574 resume_idle_cpu_paddr = (unsigned int)ml_static_vtop((vm_offset_t)&resume_idle_cpu);
575 }
576
577 if (cpu_data_ptr == &BootCpuData) {
578 bcopy(((const void *)running_signature), (void *)(IOS_STATE), IOS_STATE_SIZE);
579 }
580 ;
581
582 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
583 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
584 }
585
586 void
machine_track_platform_idle(boolean_t entry)587 machine_track_platform_idle(boolean_t entry)
588 {
589 if (entry) {
590 os_atomic_inc(&cpu_idle_count, relaxed);
591 } else {
592 os_atomic_dec(&cpu_idle_count, relaxed);
593 }
594 }
595