1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
33 #include <i386/mp.h>
34 #include <i386/cpu_data.h>
35 #if CONFIG_MTRR
36 #include <i386/mtrr.h>
37 #endif
38 #if HYPERVISOR
39 #include <kern/hv_support.h>
40 #endif
41 #if CONFIG_VMX
42 #include <i386/vmx/vmx_cpu.h>
43 #endif
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
46 #include <i386/fpu.h>
47 #include <i386/lapic.h>
48 #include <i386/mp.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
51 #if CONFIG_MCA
52 #include <i386/machine_check.h>
53 #endif
54 #include <i386/pmCPU.h>
55
56 #include <i386/tsc.h>
57
58 #define UINT64 uint64_t
59 #define UINT32 uint32_t
60 #define UINT16 uint16_t
61 #define UINT8 uint8_t
62 #define RSDP_VERSION_ACPI10 0
63 #define RSDP_VERSION_ACPI20 2
64 #include <acpi/Acpi.h>
65 #include <acpi/Acpi_v1.h>
66 #include <pexpert/i386/efi.h>
67
68 #include <kern/cpu_data.h>
69 #include <kern/machine.h>
70 #include <kern/monotonic.h>
71 #include <kern/timer_queue.h>
72 #include <console/serial_protos.h>
73 #include <machine/pal_routines.h>
74 #include <vm/vm_page.h>
75
76 #if HIBERNATION
77 #include <IOKit/IOHibernatePrivate.h>
78 #endif
79 #include <IOKit/IOPlatformExpert.h>
80 #include <sys/kdebug.h>
81
82 #if KPERF
83 #include <kperf/kptimer.h>
84 #endif /* KPERF */
85
86 #if CONFIG_SLEEP
87 extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
88 extern void acpi_wake_prot(void);
89 #endif
90 extern kern_return_t IOCPURunPlatformQuiesceActions(void);
91 extern kern_return_t IOCPURunPlatformActiveActions(void);
92 extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
93
94 extern void fpinit(void);
95
96 #if DEVELOPMENT || DEBUG
97 #define DBG(x...) kprintf(x)
98 #else
99 #define DBG(x...)
100 #endif
101
102 vm_offset_t
acpi_install_wake_handler(void)103 acpi_install_wake_handler(void)
104 {
105 #if CONFIG_SLEEP
106 install_real_mode_bootstrap(acpi_wake_prot);
107 return REAL_MODE_BOOTSTRAP_OFFSET;
108 #else
109 return 0;
110 #endif
111 }
112
113 #if CONFIG_SLEEP
114
115 unsigned int save_kdebug_enable = 0;
116 static uint64_t acpi_sleep_abstime;
117 static uint64_t acpi_idle_abstime;
118 static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime;
119 boolean_t deep_idle_rebase = TRUE;
120
121 #if HIBERNATION
122 struct acpi_hibernate_callback_data {
123 acpi_sleep_callback func;
124 void *refcon;
125 };
126 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
127
128 static void
acpi_hibernate(void * refcon)129 acpi_hibernate(void *refcon)
130 {
131 uint32_t mode;
132
133 acpi_hibernate_callback_data_t *data =
134 (acpi_hibernate_callback_data_t *)refcon;
135
136 if (current_cpu_datap()->cpu_hibernate) {
137 mode = hibernate_write_image();
138
139 if (mode == kIOHibernatePostWriteHalt) {
140 // off
141 HIBLOG("power off\n");
142 IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
143 if (PE_halt_restart) {
144 (*PE_halt_restart)(kPEHaltCPU);
145 }
146 } else if (mode == kIOHibernatePostWriteRestart) {
147 // restart
148 HIBLOG("restart\n");
149 IOCPURunPlatformHaltRestartActions(kPERestartCPU);
150 if (PE_halt_restart) {
151 (*PE_halt_restart)(kPERestartCPU);
152 }
153 } else {
154 // sleep
155 HIBLOG("sleep\n");
156
157 // should we come back via regular wake, set the state in memory.
158 cpu_datap(0)->cpu_hibernate = 0;
159 }
160 }
161
162 #if CONFIG_VMX
163 vmx_suspend();
164 #endif
165 kdebug_enable = 0;
166
167 IOCPURunPlatformQuiesceActions();
168
169 acpi_sleep_abstime = mach_absolute_time();
170
171 (data->func)(data->refcon);
172
173 /* should never get here! */
174 }
175 #endif /* HIBERNATION */
176 #endif /* CONFIG_SLEEP */
177
178 extern void slave_pstart(void);
179
180 void
acpi_sleep_kernel(acpi_sleep_callback func,void * refcon)181 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
182 {
183 #if HIBERNATION
184 acpi_hibernate_callback_data_t data;
185 #endif
186 boolean_t did_hibernate;
187 cpu_data_t *cdp = current_cpu_datap();
188 unsigned int cpu;
189 kern_return_t rc;
190 unsigned int my_cpu;
191 uint64_t start;
192 uint64_t elapsed = 0;
193 uint64_t elapsed_trace_start = 0;
194
195 my_cpu = cpu_number();
196 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate,
197 my_cpu);
198
199 /* Get all CPUs to be in the "off" state */
200 for (cpu = 0; cpu < real_ncpus; cpu += 1) {
201 if (cpu == my_cpu) {
202 continue;
203 }
204 rc = pmCPUExitHaltToOff(cpu);
205 if (rc != KERN_SUCCESS) {
206 panic("Error %d trying to transition CPU %d to OFF",
207 rc, cpu);
208 }
209 }
210
211 /* shutdown local APIC before passing control to firmware */
212 lapic_shutdown(true);
213
214 #if HIBERNATION
215 data.func = func;
216 data.refcon = refcon;
217 #endif
218
219 #if CONFIG_CPU_COUNTERS
220 mt_cpu_down(cdp);
221 #endif /* CONFIG_CPU_COUNTERS */
222 #if KPERF
223 kptimer_stop_curcpu();
224 #endif /* KPERF */
225
226 /* Save power management timer state */
227 pmTimerSave();
228
229 #if HYPERVISOR
230 /* Notify hypervisor that we are about to sleep */
231 hv_suspend();
232 #endif
233
234 /*
235 * Enable FPU/SIMD unit for potential hibernate acceleration
236 */
237 clear_ts();
238
239 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START);
240
241 save_kdebug_enable = kdebug_enable;
242 kdebug_enable = 0;
243
244 acpi_sleep_abstime = mach_absolute_time();
245
246 #if CONFIG_SLEEP
247 /*
248 * Save master CPU state and sleep platform.
249 * Will not return until platform is woken up,
250 * or if sleep failed.
251 */
252 uint64_t old_cr3 = x86_64_pre_sleep();
253 #if HIBERNATION
254 acpi_sleep_cpu(acpi_hibernate, &data);
255 #else
256 #if CONFIG_VMX
257 vmx_suspend();
258 #endif
259 acpi_sleep_cpu(func, refcon);
260 #endif
261
262 acpi_wake_abstime = mach_absolute_time();
263 /* Rebase TSC->absolute time conversion, using timestamp
264 * recorded before sleep.
265 */
266 rtc_nanotime_init(acpi_sleep_abstime);
267 acpi_wake_postrebase_abstime = start = mach_absolute_time();
268 assert(start >= acpi_sleep_abstime);
269
270 x86_64_post_sleep(old_cr3);
271
272 #endif /* CONFIG_SLEEP */
273
274 /* Reset UART if kprintf is enabled.
275 * However kprintf should not be used before rtc_sleep_wakeup()
276 * for compatibility with firewire kprintf.
277 */
278
279 if (false == disable_serial_output) {
280 pal_serial_init();
281 }
282
283 #if HIBERNATION
284 if (current_cpu_datap()->cpu_hibernate) {
285 did_hibernate = TRUE;
286 } else
287 #endif
288 {
289 did_hibernate = FALSE;
290 }
291
292 /* Re-enable fast syscall */
293 cpu_syscall_init(current_cpu_datap());
294
295 #if CONFIG_MCA
296 /* Re-enable machine check handling */
297 mca_cpu_init();
298 #endif
299
300 #if CONFIG_MTRR
301 /* restore MTRR settings */
302 mtrr_update_cpu();
303 #endif
304
305 /* update CPU microcode and apply CPU workarounds */
306 ucode_update_wake_and_apply_cpu_was();
307
308 #if CONFIG_MTRR
309 /* set up PAT following boot processor power up */
310 pat_init();
311 #endif
312
313 #if CONFIG_VMX
314 /*
315 * Restore VT mode
316 */
317 vmx_resume(did_hibernate);
318 #endif
319
320 /*
321 * Go through all of the CPUs and mark them as requiring
322 * a full restart.
323 */
324 pmMarkAllCPUsOff();
325
326
327 /* re-enable and re-init local apic (prior to starting timers) */
328 if (lapic_probe()) {
329 lapic_configure(true);
330 }
331
332 #if KASAN
333 /*
334 * The sleep implementation uses indirect noreturn calls, so we miss stack
335 * unpoisoning. Do it explicitly.
336 */
337 kasan_unpoison_curstack(true);
338 #endif
339
340 elapsed += mach_absolute_time() - start;
341
342 rtc_decrementer_configure();
343 kdebug_enable = save_kdebug_enable;
344
345 if (kdebug_enable == 0) {
346 elapsed_trace_start += kdebug_wake();
347 }
348 start = mach_absolute_time();
349
350 /* Reconfigure FP/SIMD unit */
351 init_fpu();
352 clear_ts();
353
354
355 #if HYPERVISOR
356 /* Notify hypervisor that we are about to resume */
357 hv_resume();
358 #endif
359
360 IOCPURunPlatformActiveActions();
361
362 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed,
363 elapsed_trace_start, acpi_wake_abstime);
364
365 /* Restore power management register state */
366 pmCPUMarkRunning(current_cpu_datap());
367
368 /* Restore power management timer state */
369 pmTimerRestore();
370
371 /* Restart timer interrupts */
372 rtc_timer_start();
373
374 #if CONFIG_CPU_COUNTERS
375 mt_cpu_up(cdp);
376 #endif /* CONFIG_CPU_COUNTERS */
377 #if KPERF
378 kptimer_curcpu_up();
379 #endif /* KPERF */
380
381 #if HIBERNATION
382 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
383 #endif /* HIBERNATION */
384
385 #if CONFIG_SLEEP
386 /* Because we don't save the bootstrap page, and we share it
387 * between sleep and mp slave init, we need to recreate it
388 * after coming back from sleep or hibernate */
389 install_real_mode_bootstrap(slave_pstart);
390 #endif /* CONFIG_SLEEP */
391 }
392
393 void
ml_hibernate_active_pre(void)394 ml_hibernate_active_pre(void)
395 {
396 #if HIBERNATION
397 hibernate_rebuild_vm_structs();
398 #endif /* HIBERNATION */
399 }
400
401 void
ml_hibernate_active_post(void)402 ml_hibernate_active_post(void)
403 {
404 #if HIBERNATION
405 if (current_cpu_datap()->cpu_hibernate) {
406 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START);
407 hibernate_machine_init();
408 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END);
409 current_cpu_datap()->cpu_hibernate = 0;
410 }
411 #endif /* HIBERNATION */
412 }
413
414 /*
415 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
416 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
417 * processors are expected already to have been offlined in the deepest C-state.
418 *
419 * The contract with ACPI is that although the kernel is called with interrupts
420 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
421 * interrupt. However, the callback function will be called once this has
422 * occurred and interrupts are guaranteed to be disabled at that time,
423 * and to remain disabled during C-state entry, exit (wake) and return
424 * from acpi_idle_kernel.
425 */
426 void
acpi_idle_kernel(acpi_sleep_callback func,void * refcon)427 acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
428 {
429 boolean_t istate = ml_get_interrupts_enabled();
430
431 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
432 cpu_number(), istate ? "enabled" : "disabled");
433
434 assert(cpu_number() == master_cpu);
435
436 #if CONFIG_CPU_COUNTERS
437 mt_cpu_down(cpu_datap(0));
438 #endif /* CONFIG_CPU_COUNTERS */
439 #if KPERF
440 kptimer_stop_curcpu();
441 #endif /* KPERF */
442
443 /* Cancel any pending deadline */
444 setPop(0);
445 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)
446 #if CONFIG_CPU_COUNTERS
447 || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT))
448 #endif /* CONFIG_CPU_COUNTERS */
449 ) {
450 (void) ml_set_interrupts_enabled(TRUE);
451 setPop(0);
452 ml_set_interrupts_enabled(FALSE);
453 }
454
455 if (current_cpu_datap()->cpu_hibernate) {
456 /* Call hibernate_write_image() to put disk to low power state */
457 hibernate_write_image();
458 cpu_datap(0)->cpu_hibernate = 0;
459 }
460
461 /*
462 * Call back to caller to indicate that interrupts will remain
463 * disabled while we deep idle, wake and return.
464 */
465 IOCPURunPlatformQuiesceActions();
466
467 func(refcon);
468
469 acpi_idle_abstime = mach_absolute_time();
470
471 KERNEL_DEBUG_CONSTANT(
472 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
473 acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
474
475 /*
476 * Disable tracing during S0-sleep
477 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
478 */
479 if (deep_idle_rebase) {
480 save_kdebug_enable = kdebug_enable;
481 kdebug_enable = 0;
482 }
483
484 /*
485 * Call into power-management to enter the lowest C-state.
486 * Note when called on the boot processor this routine will
487 * return directly when awoken.
488 */
489 pmCPUHalt(PM_HALT_SLEEP);
490
491 /*
492 * Get wakeup time relative to the TSC which has progressed.
493 * Then rebase nanotime to reflect time not progressing over sleep
494 * - unless overriden so that tracing can occur during deep_idle.
495 */
496 acpi_wake_abstime = mach_absolute_time();
497 if (deep_idle_rebase) {
498 rtc_sleep_wakeup(acpi_idle_abstime);
499 kdebug_enable = save_kdebug_enable;
500 }
501 acpi_wake_postrebase_abstime = mach_absolute_time();
502 assert(mach_absolute_time() >= acpi_idle_abstime);
503
504 KERNEL_DEBUG_CONSTANT(
505 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
506 acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
507
508 #if CONFIG_CPU_COUNTERS
509 mt_cpu_up(cpu_datap(0));
510 #endif /* CONFIG_CPU_COUNTERS */
511
512 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
513 if (kdebug_enable == 0) {
514 kdebug_wake();
515 }
516
517 IOCPURunPlatformActiveActions();
518
519 /* Restart timer interrupts */
520 rtc_timer_start();
521 }
522
523 extern char real_mode_bootstrap_end[];
524 extern char real_mode_bootstrap_base[];
525
526 void
install_real_mode_bootstrap(void * prot_entry)527 install_real_mode_bootstrap(void *prot_entry)
528 {
529 /*
530 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
531 * This is in page 1 which has been reserved for this purpose by
532 * machine_startup() from the boot processor.
533 * The slave boot code is responsible for switching to protected
534 * mode and then jumping to the common startup, _start().
535 */
536 bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
537 (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
538 real_mode_bootstrap_end - real_mode_bootstrap_base);
539
540 /*
541 * Set the location at the base of the stack to point to the
542 * common startup entry.
543 */
544 ml_phys_write_word(
545 PROT_MODE_START + REAL_MODE_BOOTSTRAP_OFFSET,
546 (unsigned int)kvtophys((vm_offset_t)prot_entry));
547
548 /* Flush caches */
549 __asm__("wbinvd");
550 }
551
552 boolean_t
ml_recent_wake(void)553 ml_recent_wake(void)
554 {
555 uint64_t ctime = mach_absolute_time();
556 assert(ctime > acpi_wake_postrebase_abstime);
557 return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC;
558 }
559
560 static uint8_t
cksum8(uint8_t * ptr,uint32_t size)561 cksum8(uint8_t *ptr, uint32_t size)
562 {
563 uint8_t sum = 0;
564 uint32_t i;
565
566 for (i = 0; i < size; i++) {
567 sum += ptr[i];
568 }
569
570 return sum;
571 }
572
573 /*
574 * Parameterized search for a specified table given an sdtp (either RSDT or XSDT).
575 * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT
576 * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to
577 * physical.
578 */
579 #define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \
580 { \
581 uint32_t i, pointer_count; \
582 \
583 /* Walk the list of tables in the *SDT, looking for the signature passed in */ \
584 pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type); \
585 \
586 for (i = 0; i < pointer_count; i++) { \
587 ACPI_TABLE_HEADER *next_table = \
588 (ACPI_TABLE_HEADER *)PHYSMAP_PTOV( \
589 (uintptr_t)(sdtp)->TableOffsetEntry[i]); \
590 if (strncmp(&next_table->Signature[0], (signature), 4) == 0) { \
591 /* \
592 * Checksum the table first, then return it if the checksum \
593 * is valid. \
594 */ \
595 if (cksum8((uint8_t *)next_table, next_table->Length) == 0) { \
596 return next_table; \
597 } else { \
598 DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature), \
599 (unsigned long)(sdtp)->TableOffsetEntry[i]); \
600 return NULL; \
601 } \
602 } \
603 } \
604 \
605 return NULL; \
606 }
607
608 static ACPI_TABLE_HEADER *
acpi_find_table_via_xsdt(XSDT_DESCRIPTOR * xsdtp,const char * signature)609 acpi_find_table_via_xsdt(XSDT_DESCRIPTOR *xsdtp, const char *signature)
610 {
611 SEARCH_FOR_ACPI_TABLE(xsdtp, signature, UINT64);
612 }
613
614 static ACPI_TABLE_HEADER *
acpi_find_table_via_rsdt(RSDT_DESCRIPTOR * rsdtp,const char * signature)615 acpi_find_table_via_rsdt(RSDT_DESCRIPTOR *rsdtp, const char *signature)
616 {
617 SEARCH_FOR_ACPI_TABLE(rsdtp, signature, UINT32);
618 }
619
620 /*
621 * Returns a pointer to an ACPI table header corresponding to the table
622 * whose signature is passed in, or NULL if no such table could be found.
623 */
624 static ACPI_TABLE_HEADER *
acpi_find_table(uintptr_t rsdp_physaddr,const char * signature)625 acpi_find_table(uintptr_t rsdp_physaddr, const char *signature)
626 {
627 static RSDP_DESCRIPTOR *rsdp = NULL;
628 static XSDT_DESCRIPTOR *xsdtp = NULL;
629 static RSDT_DESCRIPTOR *rsdtp = NULL;
630
631 if (signature == NULL) {
632 DBG("Invalid NULL signature passed to acpi_find_table\n");
633 return NULL;
634 }
635
636 /*
637 * RSDT or XSDT is required; without it, we cannot locate other tables.
638 */
639 if (__improbable(rsdp == NULL || (rsdtp == NULL && xsdtp == NULL))) {
640 rsdp = PHYSMAP_PTOV(rsdp_physaddr);
641
642 /* Verify RSDP signature */
643 if (__improbable(strncmp((void *)rsdp, "RSD PTR ", 8) != 0)) {
644 DBG("RSDP signature mismatch: Aborting acpi_find_table\n");
645 rsdp = NULL;
646 return NULL;
647 }
648
649 /* Verify RSDP checksum */
650 if (__improbable(cksum8((uint8_t *)rsdp, sizeof(RSDP_DESCRIPTOR)) != 0)) {
651 DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n",
652 (unsigned long)rsdp_physaddr);
653 rsdp = NULL;
654 return NULL;
655 }
656
657 /* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */
658 if (__improbable(rsdp->Revision >= RSDP_VERSION_ACPI20 && rsdp->XsdtPhysicalAddress == 0ULL)) {
659 DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n");
660 rsdp = NULL;
661 return NULL;
662 } else if (__probable(rsdp->Revision >= RSDP_VERSION_ACPI20)) {
663 /* XSDT (with 64-bit pointers to tables) */
664 rsdtp = NULL;
665 xsdtp = PHYSMAP_PTOV(rsdp->XsdtPhysicalAddress);
666 if (cksum8((uint8_t *)xsdtp, xsdtp->Length) != 0) {
667 DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n",
668 (unsigned long)rsdp->XsdtPhysicalAddress);
669 xsdtp = NULL;
670 return NULL;
671 }
672 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10 && rsdp->RsdtPhysicalAddress == 0)) {
673 DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n");
674 rsdp = NULL;
675 return NULL;
676 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10)) {
677 /* RSDT (with 32-bit pointers to tables) */
678 xsdtp = NULL;
679 rsdtp = PHYSMAP_PTOV((uintptr_t)rsdp->RsdtPhysicalAddress);
680 if (cksum8((uint8_t *)rsdtp, rsdtp->Length) != 0) {
681 DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n",
682 (unsigned long)rsdp->RsdtPhysicalAddress);
683 rsdtp = NULL;
684 return NULL;
685 }
686 } else {
687 DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n",
688 rsdp->Revision);
689 rsdp = NULL;
690 return NULL;
691 }
692 }
693
694 assert(xsdtp != NULL || rsdtp != NULL);
695
696 if (__probable(xsdtp != NULL)) {
697 return acpi_find_table_via_xsdt(xsdtp, signature);
698 } else if (rsdtp != NULL) {
699 return acpi_find_table_via_rsdt(rsdtp, signature);
700 }
701
702 return NULL;
703 }
704
705 /*
706 * Returns the count of enabled logical processors present in the ACPI
707 * MADT, or 0 if the MADT could not be located.
708 */
709 uint32_t
acpi_count_enabled_logical_processors(void)710 acpi_count_enabled_logical_processors(void)
711 {
712 MULTIPLE_APIC_TABLE *madtp;
713 void *end_ptr;
714 APIC_HEADER *next_apic_entryp;
715 uint32_t enabled_cpu_count = 0;
716 uint64_t rsdp_physaddr;
717
718 rsdp_physaddr = efi_get_rsdp_physaddr();
719 if (__improbable(rsdp_physaddr == 0)) {
720 DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n");
721 return 0;
722 }
723
724 madtp = (MULTIPLE_APIC_TABLE *)acpi_find_table(rsdp_physaddr, ACPI_SIG_MADT);
725
726 if (__improbable(madtp == NULL)) {
727 DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n");
728 return 0;
729 }
730
731 end_ptr = (void *)((uintptr_t)madtp + madtp->Length);
732 next_apic_entryp = (APIC_HEADER *)((uintptr_t)madtp + sizeof(MULTIPLE_APIC_TABLE));
733
734 while ((void *)next_apic_entryp < end_ptr) {
735 switch (next_apic_entryp->Type) {
736 case APIC_PROCESSOR:
737 {
738 MADT_PROCESSOR_APIC *madt_procp = (MADT_PROCESSOR_APIC *)next_apic_entryp;
739 if (madt_procp->ProcessorEnabled) {
740 enabled_cpu_count++;
741 }
742
743 break;
744 }
745
746 default:
747 DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp->Type,
748 next_apic_entryp->Length);
749 break;
750 }
751
752 next_apic_entryp = (APIC_HEADER *)((uintptr_t)next_apic_entryp + next_apic_entryp->Length);
753 }
754
755 return enabled_cpu_count;
756 }
757