1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
33 #include <i386/mp.h>
34 #include <i386/cpu_data.h>
35 #if CONFIG_MTRR
36 #include <i386/mtrr.h>
37 #endif
38 #if HYPERVISOR
39 #include <kern/hv_support.h>
40 #endif
41 #if CONFIG_VMX
42 #include <i386/vmx/vmx_cpu.h>
43 #endif
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
46 #include <i386/fpu.h>
47 #include <i386/lapic.h>
48 #include <i386/mp.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
51 #if CONFIG_MCA
52 #include <i386/machine_check.h>
53 #endif
54 #include <i386/pmCPU.h>
55
56 #include <i386/tsc.h>
57
58 #define UINT64 uint64_t
59 #define UINT32 uint32_t
60 #define UINT16 uint16_t
61 #define UINT8 uint8_t
62 #define RSDP_VERSION_ACPI10 0
63 #define RSDP_VERSION_ACPI20 2
64 #include <acpi/Acpi.h>
65 #include <acpi/Acpi_v1.h>
66 #include <pexpert/i386/efi.h>
67
68 #include <kern/cpu_data.h>
69 #include <kern/machine.h>
70 #include <kern/timer_queue.h>
71 #include <console/serial_protos.h>
72 #include <machine/pal_routines.h>
73 #include <vm/vm_page.h>
74
75 #if HIBERNATION
76 #include <IOKit/IOHibernatePrivate.h>
77 #endif
78 #include <IOKit/IOPlatformExpert.h>
79 #include <sys/kdebug.h>
80
81 #if MONOTONIC
82 #include <kern/monotonic.h>
83 #endif /* MONOTONIC */
84
85 #if KPERF
86 #include <kperf/kptimer.h>
87 #endif /* KPERF */
88
89 #if CONFIG_SLEEP
90 extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
91 extern void acpi_wake_prot(void);
92 #endif
93 extern kern_return_t IOCPURunPlatformQuiesceActions(void);
94 extern kern_return_t IOCPURunPlatformActiveActions(void);
95 extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
96
97 extern void fpinit(void);
98
99 #if DEVELOPMENT || DEBUG
100 #define DBG(x...) kprintf(x)
101 #else
102 #define DBG(x...)
103 #endif
104
105 vm_offset_t
acpi_install_wake_handler(void)106 acpi_install_wake_handler(void)
107 {
108 #if CONFIG_SLEEP
109 install_real_mode_bootstrap(acpi_wake_prot);
110 return REAL_MODE_BOOTSTRAP_OFFSET;
111 #else
112 return 0;
113 #endif
114 }
115
116 #if CONFIG_SLEEP
117
118 unsigned int save_kdebug_enable = 0;
119 static uint64_t acpi_sleep_abstime;
120 static uint64_t acpi_idle_abstime;
121 static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime;
122 boolean_t deep_idle_rebase = TRUE;
123
124 #if HIBERNATION
125 struct acpi_hibernate_callback_data {
126 acpi_sleep_callback func;
127 void *refcon;
128 };
129 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
130
131 static void
acpi_hibernate(void * refcon)132 acpi_hibernate(void *refcon)
133 {
134 uint32_t mode;
135
136 acpi_hibernate_callback_data_t *data =
137 (acpi_hibernate_callback_data_t *)refcon;
138
139 if (current_cpu_datap()->cpu_hibernate) {
140 mode = hibernate_write_image();
141
142 if (mode == kIOHibernatePostWriteHalt) {
143 // off
144 HIBLOG("power off\n");
145 IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
146 if (PE_halt_restart) {
147 (*PE_halt_restart)(kPEHaltCPU);
148 }
149 } else if (mode == kIOHibernatePostWriteRestart) {
150 // restart
151 HIBLOG("restart\n");
152 IOCPURunPlatformHaltRestartActions(kPERestartCPU);
153 if (PE_halt_restart) {
154 (*PE_halt_restart)(kPERestartCPU);
155 }
156 } else {
157 // sleep
158 HIBLOG("sleep\n");
159
160 // should we come back via regular wake, set the state in memory.
161 cpu_datap(0)->cpu_hibernate = 0;
162 }
163 }
164
165 #if CONFIG_VMX
166 vmx_suspend();
167 #endif
168 kdebug_enable = 0;
169
170 IOCPURunPlatformQuiesceActions();
171
172 acpi_sleep_abstime = mach_absolute_time();
173
174 (data->func)(data->refcon);
175
176 /* should never get here! */
177 }
178 #endif /* HIBERNATION */
179 #endif /* CONFIG_SLEEP */
180
181 extern void slave_pstart(void);
182
183 void
acpi_sleep_kernel(acpi_sleep_callback func,void * refcon)184 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
185 {
186 #if HIBERNATION
187 acpi_hibernate_callback_data_t data;
188 #endif
189 boolean_t did_hibernate;
190 cpu_data_t *cdp = current_cpu_datap();
191 unsigned int cpu;
192 kern_return_t rc;
193 unsigned int my_cpu;
194 uint64_t start;
195 uint64_t elapsed = 0;
196 uint64_t elapsed_trace_start = 0;
197
198 my_cpu = cpu_number();
199 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate,
200 my_cpu);
201
202 /* Get all CPUs to be in the "off" state */
203 for (cpu = 0; cpu < real_ncpus; cpu += 1) {
204 if (cpu == my_cpu) {
205 continue;
206 }
207 rc = pmCPUExitHaltToOff(cpu);
208 if (rc != KERN_SUCCESS) {
209 panic("Error %d trying to transition CPU %d to OFF",
210 rc, cpu);
211 }
212 }
213
214 /* shutdown local APIC before passing control to firmware */
215 lapic_shutdown(true);
216
217 #if HIBERNATION
218 data.func = func;
219 data.refcon = refcon;
220 #endif
221
222 #if MONOTONIC
223 mt_cpu_down(cdp);
224 #endif /* MONOTONIC */
225 #if KPERF
226 kptimer_stop_curcpu();
227 #endif /* KPERF */
228
229 /* Save power management timer state */
230 pmTimerSave();
231
232 #if HYPERVISOR
233 /* Notify hypervisor that we are about to sleep */
234 hv_suspend();
235 #endif
236
237 /*
238 * Enable FPU/SIMD unit for potential hibernate acceleration
239 */
240 clear_ts();
241
242 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START);
243
244 save_kdebug_enable = kdebug_enable;
245 kdebug_enable = 0;
246
247 acpi_sleep_abstime = mach_absolute_time();
248
249 #if CONFIG_SLEEP
250 /*
251 * Save master CPU state and sleep platform.
252 * Will not return until platform is woken up,
253 * or if sleep failed.
254 */
255 uint64_t old_cr3 = x86_64_pre_sleep();
256 #if HIBERNATION
257 acpi_sleep_cpu(acpi_hibernate, &data);
258 #else
259 #if CONFIG_VMX
260 vmx_suspend();
261 #endif
262 acpi_sleep_cpu(func, refcon);
263 #endif
264
265 acpi_wake_abstime = mach_absolute_time();
266 /* Rebase TSC->absolute time conversion, using timestamp
267 * recorded before sleep.
268 */
269 rtc_nanotime_init(acpi_sleep_abstime);
270 acpi_wake_postrebase_abstime = start = mach_absolute_time();
271 assert(start >= acpi_sleep_abstime);
272
273 x86_64_post_sleep(old_cr3);
274
275 #endif /* CONFIG_SLEEP */
276
277 /* Reset UART if kprintf is enabled.
278 * However kprintf should not be used before rtc_sleep_wakeup()
279 * for compatibility with firewire kprintf.
280 */
281
282 if (false == disable_serial_output) {
283 pal_serial_init();
284 }
285
286 #if HIBERNATION
287 if (current_cpu_datap()->cpu_hibernate) {
288 did_hibernate = TRUE;
289 } else
290 #endif
291 {
292 did_hibernate = FALSE;
293 }
294
295 /* Re-enable fast syscall */
296 cpu_syscall_init(current_cpu_datap());
297
298 #if CONFIG_MCA
299 /* Re-enable machine check handling */
300 mca_cpu_init();
301 #endif
302
303 #if CONFIG_MTRR
304 /* restore MTRR settings */
305 mtrr_update_cpu();
306 #endif
307
308 /* update CPU microcode and apply CPU workarounds */
309 ucode_update_wake_and_apply_cpu_was();
310
311 #if CONFIG_MTRR
312 /* set up PAT following boot processor power up */
313 pat_init();
314 #endif
315
316 #if CONFIG_VMX
317 /*
318 * Restore VT mode
319 */
320 vmx_resume(did_hibernate);
321 #endif
322
323 /*
324 * Go through all of the CPUs and mark them as requiring
325 * a full restart.
326 */
327 pmMarkAllCPUsOff();
328
329
330 /* re-enable and re-init local apic (prior to starting timers) */
331 if (lapic_probe()) {
332 lapic_configure(true);
333 }
334
335 #if KASAN
336 /*
337 * The sleep implementation uses indirect noreturn calls, so we miss stack
338 * unpoisoning. Do it explicitly.
339 */
340 kasan_unpoison_curstack(true);
341 #endif
342
343 elapsed += mach_absolute_time() - start;
344
345 rtc_decrementer_configure();
346 kdebug_enable = save_kdebug_enable;
347
348 if (kdebug_enable == 0) {
349 elapsed_trace_start += kdebug_wake();
350 }
351 start = mach_absolute_time();
352
353 /* Reconfigure FP/SIMD unit */
354 init_fpu();
355 clear_ts();
356
357
358 #if HYPERVISOR
359 /* Notify hypervisor that we are about to resume */
360 hv_resume();
361 #endif
362
363 IOCPURunPlatformActiveActions();
364
365 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed,
366 elapsed_trace_start, acpi_wake_abstime);
367
368 /* Restore power management register state */
369 pmCPUMarkRunning(current_cpu_datap());
370
371 /* Restore power management timer state */
372 pmTimerRestore();
373
374 /* Restart timer interrupts */
375 rtc_timer_start();
376
377 #if MONOTONIC
378 mt_cpu_up(cdp);
379 #endif /* MONOTONIC */
380 #if KPERF
381 kptimer_curcpu_up();
382 #endif /* KPERF */
383
384 #if HIBERNATION
385 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
386 #endif /* HIBERNATION */
387
388 #if CONFIG_SLEEP
389 /* Because we don't save the bootstrap page, and we share it
390 * between sleep and mp slave init, we need to recreate it
391 * after coming back from sleep or hibernate */
392 install_real_mode_bootstrap(slave_pstart);
393 #endif /* CONFIG_SLEEP */
394 }
395
396 void
ml_hibernate_active_pre(void)397 ml_hibernate_active_pre(void)
398 {
399 #if HIBERNATION
400 hibernate_rebuild_vm_structs();
401 #endif /* HIBERNATION */
402 }
403
404 void
ml_hibernate_active_post(void)405 ml_hibernate_active_post(void)
406 {
407 #if HIBERNATION
408 if (current_cpu_datap()->cpu_hibernate) {
409 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START);
410 hibernate_machine_init();
411 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END);
412 current_cpu_datap()->cpu_hibernate = 0;
413 }
414 #endif /* HIBERNATION */
415 }
416
417 /*
418 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
419 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
420 * processors are expected already to have been offlined in the deepest C-state.
421 *
422 * The contract with ACPI is that although the kernel is called with interrupts
423 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
424 * interrupt. However, the callback function will be called once this has
425 * occurred and interrupts are guaranteed to be disabled at that time,
426 * and to remain disabled during C-state entry, exit (wake) and return
427 * from acpi_idle_kernel.
428 */
429 void
acpi_idle_kernel(acpi_sleep_callback func,void * refcon)430 acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
431 {
432 boolean_t istate = ml_get_interrupts_enabled();
433
434 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
435 cpu_number(), istate ? "enabled" : "disabled");
436
437 assert(cpu_number() == master_cpu);
438
439 #if MONOTONIC
440 mt_cpu_down(cpu_datap(0));
441 #endif /* MONOTONIC */
442 #if KPERF
443 kptimer_stop_curcpu();
444 #endif /* KPERF */
445
446 /* Cancel any pending deadline */
447 setPop(0);
448 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)
449 #if MONOTONIC
450 || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT))
451 #endif /* MONOTONIC */
452 ) {
453 (void) ml_set_interrupts_enabled(TRUE);
454 setPop(0);
455 ml_set_interrupts_enabled(FALSE);
456 }
457
458 if (current_cpu_datap()->cpu_hibernate) {
459 /* Call hibernate_write_image() to put disk to low power state */
460 hibernate_write_image();
461 cpu_datap(0)->cpu_hibernate = 0;
462 }
463
464 /*
465 * Call back to caller to indicate that interrupts will remain
466 * disabled while we deep idle, wake and return.
467 */
468 IOCPURunPlatformQuiesceActions();
469
470 func(refcon);
471
472 acpi_idle_abstime = mach_absolute_time();
473
474 KERNEL_DEBUG_CONSTANT(
475 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
476 acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
477
478 /*
479 * Disable tracing during S0-sleep
480 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
481 */
482 if (deep_idle_rebase) {
483 save_kdebug_enable = kdebug_enable;
484 kdebug_enable = 0;
485 }
486
487 /*
488 * Call into power-management to enter the lowest C-state.
489 * Note when called on the boot processor this routine will
490 * return directly when awoken.
491 */
492 pmCPUHalt(PM_HALT_SLEEP);
493
494 /*
495 * Get wakeup time relative to the TSC which has progressed.
496 * Then rebase nanotime to reflect time not progressing over sleep
497 * - unless overriden so that tracing can occur during deep_idle.
498 */
499 acpi_wake_abstime = mach_absolute_time();
500 if (deep_idle_rebase) {
501 rtc_sleep_wakeup(acpi_idle_abstime);
502 kdebug_enable = save_kdebug_enable;
503 }
504 acpi_wake_postrebase_abstime = mach_absolute_time();
505 assert(mach_absolute_time() >= acpi_idle_abstime);
506
507 KERNEL_DEBUG_CONSTANT(
508 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
509 acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
510
511 #if MONOTONIC
512 mt_cpu_up(cpu_datap(0));
513 #endif /* MONOTONIC */
514
515 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
516 if (kdebug_enable == 0) {
517 kdebug_wake();
518 }
519
520 IOCPURunPlatformActiveActions();
521
522 /* Restart timer interrupts */
523 rtc_timer_start();
524 }
525
526 extern char real_mode_bootstrap_end[];
527 extern char real_mode_bootstrap_base[];
528
529 void
install_real_mode_bootstrap(void * prot_entry)530 install_real_mode_bootstrap(void *prot_entry)
531 {
532 /*
533 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
534 * This is in page 1 which has been reserved for this purpose by
535 * machine_startup() from the boot processor.
536 * The slave boot code is responsible for switching to protected
537 * mode and then jumping to the common startup, _start().
538 */
539 bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
540 (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
541 real_mode_bootstrap_end - real_mode_bootstrap_base);
542
543 /*
544 * Set the location at the base of the stack to point to the
545 * common startup entry.
546 */
547 ml_phys_write_word(
548 PROT_MODE_START + REAL_MODE_BOOTSTRAP_OFFSET,
549 (unsigned int)kvtophys((vm_offset_t)prot_entry));
550
551 /* Flush caches */
552 __asm__("wbinvd");
553 }
554
555 boolean_t
ml_recent_wake(void)556 ml_recent_wake(void)
557 {
558 uint64_t ctime = mach_absolute_time();
559 assert(ctime > acpi_wake_postrebase_abstime);
560 return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC;
561 }
562
563 static uint8_t
cksum8(uint8_t * ptr,uint32_t size)564 cksum8(uint8_t *ptr, uint32_t size)
565 {
566 uint8_t sum = 0;
567 uint32_t i;
568
569 for (i = 0; i < size; i++) {
570 sum += ptr[i];
571 }
572
573 return sum;
574 }
575
576 /*
577 * Parameterized search for a specified table given an sdtp (either RSDT or XSDT).
578 * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT
579 * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to
580 * physical.
581 */
582 #define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \
583 { \
584 uint32_t i, pointer_count; \
585 \
586 /* Walk the list of tables in the *SDT, looking for the signature passed in */ \
587 pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type); \
588 \
589 for (i = 0; i < pointer_count; i++) { \
590 ACPI_TABLE_HEADER *next_table = \
591 (ACPI_TABLE_HEADER *)PHYSMAP_PTOV( \
592 (uintptr_t)(sdtp)->TableOffsetEntry[i]); \
593 if (strncmp(&next_table->Signature[0], (signature), 4) == 0) { \
594 /* \
595 * Checksum the table first, then return it if the checksum \
596 * is valid. \
597 */ \
598 if (cksum8((uint8_t *)next_table, next_table->Length) == 0) { \
599 return next_table; \
600 } else { \
601 DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature), \
602 (unsigned long)(sdtp)->TableOffsetEntry[i]); \
603 return NULL; \
604 } \
605 } \
606 } \
607 \
608 return NULL; \
609 }
610
611 static ACPI_TABLE_HEADER *
acpi_find_table_via_xsdt(XSDT_DESCRIPTOR * xsdtp,const char * signature)612 acpi_find_table_via_xsdt(XSDT_DESCRIPTOR *xsdtp, const char *signature)
613 {
614 SEARCH_FOR_ACPI_TABLE(xsdtp, signature, UINT64);
615 }
616
617 static ACPI_TABLE_HEADER *
acpi_find_table_via_rsdt(RSDT_DESCRIPTOR * rsdtp,const char * signature)618 acpi_find_table_via_rsdt(RSDT_DESCRIPTOR *rsdtp, const char *signature)
619 {
620 SEARCH_FOR_ACPI_TABLE(rsdtp, signature, UINT32);
621 }
622
623 /*
624 * Returns a pointer to an ACPI table header corresponding to the table
625 * whose signature is passed in, or NULL if no such table could be found.
626 */
627 static ACPI_TABLE_HEADER *
acpi_find_table(uintptr_t rsdp_physaddr,const char * signature)628 acpi_find_table(uintptr_t rsdp_physaddr, const char *signature)
629 {
630 static RSDP_DESCRIPTOR *rsdp = NULL;
631 static XSDT_DESCRIPTOR *xsdtp = NULL;
632 static RSDT_DESCRIPTOR *rsdtp = NULL;
633
634 if (signature == NULL) {
635 DBG("Invalid NULL signature passed to acpi_find_table\n");
636 return NULL;
637 }
638
639 /*
640 * RSDT or XSDT is required; without it, we cannot locate other tables.
641 */
642 if (__improbable(rsdp == NULL || (rsdtp == NULL && xsdtp == NULL))) {
643 rsdp = PHYSMAP_PTOV(rsdp_physaddr);
644
645 /* Verify RSDP signature */
646 if (__improbable(strncmp((void *)rsdp, "RSD PTR ", 8) != 0)) {
647 DBG("RSDP signature mismatch: Aborting acpi_find_table\n");
648 rsdp = NULL;
649 return NULL;
650 }
651
652 /* Verify RSDP checksum */
653 if (__improbable(cksum8((uint8_t *)rsdp, sizeof(RSDP_DESCRIPTOR)) != 0)) {
654 DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n",
655 (unsigned long)rsdp_physaddr);
656 rsdp = NULL;
657 return NULL;
658 }
659
660 /* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */
661 if (__improbable(rsdp->Revision >= RSDP_VERSION_ACPI20 && rsdp->XsdtPhysicalAddress == 0ULL)) {
662 DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n");
663 rsdp = NULL;
664 return NULL;
665 } else if (__probable(rsdp->Revision >= RSDP_VERSION_ACPI20)) {
666 /* XSDT (with 64-bit pointers to tables) */
667 rsdtp = NULL;
668 xsdtp = PHYSMAP_PTOV(rsdp->XsdtPhysicalAddress);
669 if (cksum8((uint8_t *)xsdtp, xsdtp->Length) != 0) {
670 DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n",
671 (unsigned long)rsdp->XsdtPhysicalAddress);
672 xsdtp = NULL;
673 return NULL;
674 }
675 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10 && rsdp->RsdtPhysicalAddress == 0)) {
676 DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n");
677 rsdp = NULL;
678 return NULL;
679 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10)) {
680 /* RSDT (with 32-bit pointers to tables) */
681 xsdtp = NULL;
682 rsdtp = PHYSMAP_PTOV((uintptr_t)rsdp->RsdtPhysicalAddress);
683 if (cksum8((uint8_t *)rsdtp, rsdtp->Length) != 0) {
684 DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n",
685 (unsigned long)rsdp->RsdtPhysicalAddress);
686 rsdtp = NULL;
687 return NULL;
688 }
689 } else {
690 DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n",
691 rsdp->Revision);
692 rsdp = NULL;
693 return NULL;
694 }
695 }
696
697 assert(xsdtp != NULL || rsdtp != NULL);
698
699 if (__probable(xsdtp != NULL)) {
700 return acpi_find_table_via_xsdt(xsdtp, signature);
701 } else if (rsdtp != NULL) {
702 return acpi_find_table_via_rsdt(rsdtp, signature);
703 }
704
705 return NULL;
706 }
707
708 /*
709 * Returns the count of enabled logical processors present in the ACPI
710 * MADT, or 0 if the MADT could not be located.
711 */
712 uint32_t
acpi_count_enabled_logical_processors(void)713 acpi_count_enabled_logical_processors(void)
714 {
715 MULTIPLE_APIC_TABLE *madtp;
716 void *end_ptr;
717 APIC_HEADER *next_apic_entryp;
718 uint32_t enabled_cpu_count = 0;
719 uint64_t rsdp_physaddr;
720
721 rsdp_physaddr = efi_get_rsdp_physaddr();
722 if (__improbable(rsdp_physaddr == 0)) {
723 DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n");
724 return 0;
725 }
726
727 madtp = (MULTIPLE_APIC_TABLE *)acpi_find_table(rsdp_physaddr, ACPI_SIG_MADT);
728
729 if (__improbable(madtp == NULL)) {
730 DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n");
731 return 0;
732 }
733
734 end_ptr = (void *)((uintptr_t)madtp + madtp->Length);
735 next_apic_entryp = (APIC_HEADER *)((uintptr_t)madtp + sizeof(MULTIPLE_APIC_TABLE));
736
737 while ((void *)next_apic_entryp < end_ptr) {
738 switch (next_apic_entryp->Type) {
739 case APIC_PROCESSOR:
740 {
741 MADT_PROCESSOR_APIC *madt_procp = (MADT_PROCESSOR_APIC *)next_apic_entryp;
742 if (madt_procp->ProcessorEnabled) {
743 enabled_cpu_count++;
744 }
745
746 break;
747 }
748
749 default:
750 DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp->Type,
751 next_apic_entryp->Length);
752 break;
753 }
754
755 next_apic_entryp = (APIC_HEADER *)((uintptr_t)next_apic_entryp + next_apic_entryp->Length);
756 }
757
758 return enabled_cpu_count;
759 }
760