xref: /xnu-12377.81.4/osfmk/i386/acpi.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
33 #include <i386/mp.h>
34 #include <i386/cpu_data.h>
35 #if CONFIG_MTRR
36 #include <i386/mtrr.h>
37 #endif
38 #if HYPERVISOR
39 #include <kern/hv_support.h>
40 #endif
41 #if CONFIG_VMX
42 #include <i386/vmx/vmx_cpu.h>
43 #endif
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
46 #include <i386/fpu.h>
47 #include <i386/lapic.h>
48 #include <i386/mp.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
51 #if CONFIG_MCA
52 #include <i386/machine_check.h>
53 #endif
54 #include <i386/pmCPU.h>
55 
56 #include <i386/tsc.h>
57 
58 #define UINT64 uint64_t
59 #define UINT32 uint32_t
60 #define UINT16 uint16_t
61 #define UINT8 uint8_t
62 #define RSDP_VERSION_ACPI10     0
63 #define RSDP_VERSION_ACPI20     2
64 #include <acpi/Acpi.h>
65 #include <acpi/Acpi_v1.h>
66 #include <pexpert/i386/efi.h>
67 
68 #include <kern/cpu_data.h>
69 #include <kern/machine.h>
70 #include <kern/monotonic.h>
71 #include <kern/timer_queue.h>
72 #include <console/serial_protos.h>
73 #include <machine/pal_routines.h>
74 #include <vm/vm_page.h>
75 
76 #if HIBERNATION
77 #include <IOKit/IOHibernatePrivate.h>
78 #include <vm/vm_kern_xnu.h>
79 #endif
80 #include <IOKit/IOPlatformExpert.h>
81 #include <sys/kdebug.h>
82 
83 #if KPERF
84 #include <kperf/kptimer.h>
85 #endif /* KPERF */
86 
87 #if CONFIG_SLEEP
88 extern void     acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
89 extern void     acpi_wake_prot(void);
90 #endif
91 extern kern_return_t IOCPURunPlatformQuiesceActions(void);
92 extern kern_return_t IOCPURunPlatformActiveActions(void);
93 extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
94 
95 extern void     fpinit(void);
96 
97 #if DEVELOPMENT || DEBUG
98 #define DBG(x...) kprintf(x)
99 #else
100 #define DBG(x...)
101 #endif
102 
103 vm_offset_t
acpi_install_wake_handler(void)104 acpi_install_wake_handler(void)
105 {
106 #if CONFIG_SLEEP
107 	install_real_mode_bootstrap(acpi_wake_prot);
108 	return REAL_MODE_BOOTSTRAP_OFFSET;
109 #else
110 	return 0;
111 #endif
112 }
113 
114 #if CONFIG_SLEEP
115 
116 unsigned int            save_kdebug_enable = 0;
117 static uint64_t         acpi_sleep_abstime;
118 static uint64_t         acpi_idle_abstime;
119 static uint64_t         acpi_wake_abstime, acpi_wake_postrebase_abstime;
120 boolean_t               deep_idle_rebase = TRUE;
121 
122 #if HIBERNATION
123 struct acpi_hibernate_callback_data {
124 	acpi_sleep_callback func;
125 	void *refcon;
126 };
127 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
128 
129 static void
acpi_hibernate(void * refcon)130 acpi_hibernate(void *refcon)
131 {
132 	uint32_t mode;
133 
134 	acpi_hibernate_callback_data_t *data =
135 	    (acpi_hibernate_callback_data_t *)refcon;
136 
137 	if (current_cpu_datap()->cpu_hibernate) {
138 		mode = hibernate_write_image();
139 
140 		if (mode == kIOHibernatePostWriteHalt) {
141 			// off
142 			HIBLOG("power off\n");
143 			IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
144 			if (PE_halt_restart) {
145 				(*PE_halt_restart)(kPEHaltCPU);
146 			}
147 		} else if (mode == kIOHibernatePostWriteRestart) {
148 			// restart
149 			HIBLOG("restart\n");
150 			IOCPURunPlatformHaltRestartActions(kPERestartCPU);
151 			if (PE_halt_restart) {
152 				(*PE_halt_restart)(kPERestartCPU);
153 			}
154 		} else {
155 			// sleep
156 			HIBLOG("sleep\n");
157 
158 			// should we come back via regular wake, set the state in memory.
159 			cpu_datap(0)->cpu_hibernate = 0;
160 		}
161 	}
162 
163 #if CONFIG_VMX
164 	vmx_suspend();
165 #endif
166 	kdebug_enable = 0;
167 
168 	IOCPURunPlatformQuiesceActions();
169 
170 	acpi_sleep_abstime = mach_absolute_time();
171 
172 	(data->func)(data->refcon);
173 
174 	/* should never get here! */
175 }
176 #endif /* HIBERNATION */
177 #endif /* CONFIG_SLEEP */
178 
179 extern void                     slave_pstart(void);
180 
181 void
acpi_sleep_kernel(acpi_sleep_callback func,void * refcon)182 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
183 {
184 #if HIBERNATION
185 	acpi_hibernate_callback_data_t data;
186 #endif
187 	boolean_t did_hibernate;
188 	cpu_data_t *cdp = current_cpu_datap();
189 	unsigned int    cpu;
190 	kern_return_t   rc;
191 	unsigned int    my_cpu;
192 	uint64_t        start;
193 	uint64_t        elapsed = 0;
194 	uint64_t        elapsed_trace_start = 0;
195 
196 	my_cpu = cpu_number();
197 	kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate,
198 	    my_cpu);
199 
200 	/* Get all CPUs to be in the "off" state */
201 	for (cpu = 0; cpu < real_ncpus; cpu += 1) {
202 		if (cpu == my_cpu) {
203 			continue;
204 		}
205 		rc = pmCPUExitHaltToOff(cpu);
206 		if (rc != KERN_SUCCESS) {
207 			panic("Error %d trying to transition CPU %d to OFF",
208 			    rc, cpu);
209 		}
210 	}
211 
212 	/* shutdown local APIC before passing control to firmware */
213 	lapic_shutdown(true);
214 
215 #if HIBERNATION
216 	data.func = func;
217 	data.refcon = refcon;
218 #endif
219 
220 #if CONFIG_CPU_COUNTERS
221 	mt_cpu_down(cdp);
222 #endif /* CONFIG_CPU_COUNTERS */
223 #if KPERF
224 	kptimer_stop_curcpu();
225 #endif /* KPERF */
226 
227 	/* Save power management timer state */
228 	pmTimerSave();
229 
230 #if HYPERVISOR
231 	/* Notify hypervisor that we are about to sleep */
232 	hv_suspend();
233 #endif
234 
235 	/*
236 	 * Enable FPU/SIMD unit for potential hibernate acceleration
237 	 */
238 	clear_ts();
239 
240 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START);
241 
242 	save_kdebug_enable = kdebug_enable;
243 	kdebug_enable = 0;
244 
245 	acpi_sleep_abstime = mach_absolute_time();
246 
247 #if CONFIG_SLEEP
248 	/*
249 	 * Save master CPU state and sleep platform.
250 	 * Will not return until platform is woken up,
251 	 * or if sleep failed.
252 	 */
253 	uint64_t old_cr3 = x86_64_pre_sleep();
254 #if HIBERNATION
255 	acpi_sleep_cpu(acpi_hibernate, &data);
256 #else
257 #if CONFIG_VMX
258 	vmx_suspend();
259 #endif
260 	acpi_sleep_cpu(func, refcon);
261 #endif
262 
263 	acpi_wake_abstime = mach_absolute_time();
264 	/* Rebase TSC->absolute time conversion, using timestamp
265 	 * recorded before sleep.
266 	 */
267 	rtc_nanotime_init(acpi_sleep_abstime);
268 	acpi_wake_postrebase_abstime = start = mach_absolute_time();
269 	assert(start >= acpi_sleep_abstime);
270 
271 	x86_64_post_sleep(old_cr3);
272 
273 #endif /* CONFIG_SLEEP */
274 
275 	/* Reset UART if kprintf is enabled.
276 	 * However kprintf should not be used before rtc_sleep_wakeup()
277 	 * for compatibility with firewire kprintf.
278 	 */
279 
280 	if (false == disable_serial_output) {
281 		pal_serial_init();
282 	}
283 
284 #if HIBERNATION
285 	if (current_cpu_datap()->cpu_hibernate) {
286 		did_hibernate = TRUE;
287 	} else
288 #endif
289 	{
290 		did_hibernate = FALSE;
291 	}
292 
293 	/* Re-enable fast syscall */
294 	cpu_syscall_init(current_cpu_datap());
295 
296 #if CONFIG_MCA
297 	/* Re-enable machine check handling */
298 	mca_cpu_init();
299 #endif
300 
301 #if CONFIG_MTRR
302 	/* restore MTRR settings */
303 	mtrr_update_cpu();
304 #endif
305 
306 	/* update CPU microcode and apply CPU workarounds */
307 	ucode_update_wake_and_apply_cpu_was();
308 
309 #if CONFIG_MTRR
310 	/* set up PAT following boot processor power up */
311 	pat_init();
312 #endif
313 
314 #if CONFIG_VMX
315 	/*
316 	 * Restore VT mode
317 	 */
318 	vmx_resume(did_hibernate);
319 #endif
320 
321 	/*
322 	 * Go through all of the CPUs and mark them as requiring
323 	 * a full restart.
324 	 */
325 	pmMarkAllCPUsOff();
326 
327 
328 	/* re-enable and re-init local apic (prior to starting timers) */
329 	if (lapic_probe()) {
330 		lapic_configure(true);
331 	}
332 
333 #if KASAN
334 	/*
335 	 * The sleep implementation uses indirect noreturn calls, so we miss stack
336 	 * unpoisoning. Do it explicitly.
337 	 */
338 	kasan_unpoison_curstack(true);
339 #endif
340 
341 	elapsed += mach_absolute_time() - start;
342 
343 	rtc_decrementer_configure();
344 	kdebug_enable = save_kdebug_enable;
345 
346 	if (kdebug_enable == 0) {
347 		elapsed_trace_start += kdebug_wake();
348 	}
349 	start = mach_absolute_time();
350 
351 	/* Reconfigure FP/SIMD unit */
352 	init_fpu();
353 	clear_ts();
354 
355 
356 #if HYPERVISOR
357 	/* Notify hypervisor that we are about to resume */
358 	hv_resume();
359 #endif
360 
361 	IOCPURunPlatformActiveActions();
362 
363 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed,
364 	    elapsed_trace_start, acpi_wake_abstime);
365 
366 	/* Restore power management register state */
367 	pmCPUMarkRunning(current_cpu_datap());
368 
369 	/* Restore power management timer state */
370 	pmTimerRestore();
371 
372 	/* Restart timer interrupts */
373 	rtc_timer_start();
374 
375 #if CONFIG_CPU_COUNTERS
376 	mt_cpu_up(cdp);
377 #endif /* CONFIG_CPU_COUNTERS */
378 #if KPERF
379 	kptimer_curcpu_up();
380 #endif /* KPERF */
381 
382 #if HIBERNATION
383 	kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
384 #endif /* HIBERNATION */
385 
386 #if CONFIG_SLEEP
387 	/* Because we don't save the bootstrap page, and we share it
388 	 * between sleep and mp slave init, we need to recreate it
389 	 * after coming back from sleep or hibernate */
390 	install_real_mode_bootstrap(slave_pstart);
391 #endif /* CONFIG_SLEEP */
392 }
393 
394 void
ml_hibernate_active_pre(void)395 ml_hibernate_active_pre(void)
396 {
397 #if HIBERNATION
398 	hibernate_rebuild_vm_structs();
399 #endif /* HIBERNATION */
400 }
401 
402 void
ml_hibernate_active_post(void)403 ml_hibernate_active_post(void)
404 {
405 #if HIBERNATION
406 	if (current_cpu_datap()->cpu_hibernate) {
407 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START);
408 		hibernate_machine_init();
409 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END);
410 		current_cpu_datap()->cpu_hibernate = 0;
411 	}
412 #endif /* HIBERNATION */
413 }
414 
415 /*
416  * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
417  * to idle the boot processor in the deepest C-state for S0 sleep. All slave
418  * processors are expected already to have been offlined in the deepest C-state.
419  *
420  * The contract with ACPI is that although the kernel is called with interrupts
421  * disabled, interrupts may need to be re-enabled to dismiss any pending timer
422  * interrupt. However, the callback function will be called once this has
423  * occurred and interrupts are guaranteed to be disabled at that time,
424  * and to remain disabled during C-state entry, exit (wake) and return
425  * from acpi_idle_kernel.
426  */
427 void
acpi_idle_kernel(acpi_sleep_callback func,void * refcon)428 acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
429 {
430 	boolean_t       istate = ml_get_interrupts_enabled();
431 
432 	kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
433 	    cpu_number(), istate ? "enabled" : "disabled");
434 
435 	assert(cpu_number() == master_cpu);
436 
437 #if CONFIG_CPU_COUNTERS
438 	mt_cpu_down(cpu_datap(0));
439 #endif /* CONFIG_CPU_COUNTERS */
440 #if KPERF
441 	kptimer_stop_curcpu();
442 #endif /* KPERF */
443 
444 	/* Cancel any pending deadline */
445 	setPop(0);
446 	while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)
447 #if CONFIG_CPU_COUNTERS
448 	    || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT))
449 #endif /* CONFIG_CPU_COUNTERS */
450 	    ) {
451 		(void) ml_set_interrupts_enabled(TRUE);
452 		setPop(0);
453 		ml_set_interrupts_enabled(FALSE);
454 	}
455 
456 	if (current_cpu_datap()->cpu_hibernate) {
457 		/* Call hibernate_write_image() to put disk to low power state */
458 		hibernate_write_image();
459 		cpu_datap(0)->cpu_hibernate = 0;
460 	}
461 
462 	/*
463 	 * Call back to caller to indicate that interrupts will remain
464 	 * disabled while we deep idle, wake and return.
465 	 */
466 	IOCPURunPlatformQuiesceActions();
467 
468 	func(refcon);
469 
470 	acpi_idle_abstime = mach_absolute_time();
471 
472 	KERNEL_DEBUG_CONSTANT(
473 		MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
474 		acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
475 
476 	/*
477 	 * Disable tracing during S0-sleep
478 	 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
479 	 */
480 	if (deep_idle_rebase) {
481 		save_kdebug_enable = kdebug_enable;
482 		kdebug_enable = 0;
483 	}
484 
485 	/*
486 	 * Call into power-management to enter the lowest C-state.
487 	 * Note when called on the boot processor this routine will
488 	 * return directly when awoken.
489 	 */
490 	pmCPUHalt(PM_HALT_SLEEP);
491 
492 	/*
493 	 * Get wakeup time relative to the TSC which has progressed.
494 	 * Then rebase nanotime to reflect time not progressing over sleep
495 	 * - unless overriden so that tracing can occur during deep_idle.
496 	 */
497 	acpi_wake_abstime = mach_absolute_time();
498 	if (deep_idle_rebase) {
499 		rtc_sleep_wakeup(acpi_idle_abstime);
500 		kdebug_enable = save_kdebug_enable;
501 	}
502 	acpi_wake_postrebase_abstime = mach_absolute_time();
503 	assert(mach_absolute_time() >= acpi_idle_abstime);
504 
505 	KERNEL_DEBUG_CONSTANT(
506 		MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
507 		acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
508 
509 #if CONFIG_CPU_COUNTERS
510 	mt_cpu_up(cpu_datap(0));
511 #endif /* CONFIG_CPU_COUNTERS */
512 
513 	/* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
514 	if (kdebug_enable == 0) {
515 		kdebug_wake();
516 	}
517 
518 	IOCPURunPlatformActiveActions();
519 
520 	/* Restart timer interrupts */
521 	rtc_timer_start();
522 }
523 
524 extern char real_mode_bootstrap_end[];
525 extern char real_mode_bootstrap_base[];
526 
527 void
install_real_mode_bootstrap(void * prot_entry)528 install_real_mode_bootstrap(void *prot_entry)
529 {
530 	/*
531 	 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
532 	 * This is in page 1 which has been reserved for this purpose by
533 	 * machine_startup() from the boot processor.
534 	 * The slave boot code is responsible for switching to protected
535 	 * mode and then jumping to the common startup, _start().
536 	 */
537 	bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
538 	    (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
539 	    real_mode_bootstrap_end - real_mode_bootstrap_base);
540 
541 	/*
542 	 * Set the location at the base of the stack to point to the
543 	 * common startup entry.
544 	 */
545 	ml_phys_write_word(
546 		PROT_MODE_START + REAL_MODE_BOOTSTRAP_OFFSET,
547 		(unsigned int)kvtophys((vm_offset_t)prot_entry));
548 
549 	/* Flush caches */
550 	__asm__("wbinvd");
551 }
552 
553 boolean_t
ml_recent_wake(void)554 ml_recent_wake(void)
555 {
556 	uint64_t ctime = mach_absolute_time();
557 	assert(ctime > acpi_wake_postrebase_abstime);
558 	return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC;
559 }
560 
561 static uint8_t
cksum8(uint8_t * ptr,uint32_t size)562 cksum8(uint8_t *ptr, uint32_t size)
563 {
564 	uint8_t sum = 0;
565 	uint32_t i;
566 
567 	for (i = 0; i < size; i++) {
568 		sum += ptr[i];
569 	}
570 
571 	return sum;
572 }
573 
574 /*
575  * Parameterized search for a specified table given an sdtp (either RSDT or XSDT).
576  * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT
577  * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to
578  * physical.
579  */
580 #define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \
581 {                                                                                               \
582 	uint32_t i, pointer_count;                                                              \
583                                                                                                 \
584 	/* Walk the list of tables in the *SDT, looking for the signature passed in */          \
585 	pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type);      \
586                                                                                                 \
587 	for (i = 0; i < pointer_count; i++) {                                                   \
588 	        ACPI_TABLE_HEADER *next_table =                                                 \
589 	                (ACPI_TABLE_HEADER *)PHYSMAP_PTOV(                                      \
590 	                        (uintptr_t)(sdtp)->TableOffsetEntry[i]);                        \
591 	        if (strncmp(&next_table->Signature[0], (signature), 4) == 0) {                  \
592 	/* \
593 	 * Checksum the table first, then return it if the checksum \
594 	 * is valid. \
595 	 */                                                                                     \
596 	                if (cksum8((uint8_t *)next_table, next_table->Length) == 0) {           \
597 	                        return next_table;                                              \
598 	                } else {                                                                \
599 	                        DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature),    \
600 	                            (unsigned long)(sdtp)->TableOffsetEntry[i]);                \
601 	                        return NULL;                                                    \
602 	                }                                                                       \
603 	        }                                                                               \
604 	}                                                                                       \
605                                                                                                 \
606 	return NULL;                                                                            \
607 }
608 
609 static ACPI_TABLE_HEADER *
acpi_find_table_via_xsdt(XSDT_DESCRIPTOR * xsdtp,const char * signature)610 acpi_find_table_via_xsdt(XSDT_DESCRIPTOR *xsdtp, const char *signature)
611 {
612 	SEARCH_FOR_ACPI_TABLE(xsdtp, signature, UINT64);
613 }
614 
615 static ACPI_TABLE_HEADER *
acpi_find_table_via_rsdt(RSDT_DESCRIPTOR * rsdtp,const char * signature)616 acpi_find_table_via_rsdt(RSDT_DESCRIPTOR *rsdtp, const char *signature)
617 {
618 	SEARCH_FOR_ACPI_TABLE(rsdtp, signature, UINT32);
619 }
620 
621 /*
622  * Returns a pointer to an ACPI table header corresponding to the table
623  * whose signature is passed in, or NULL if no such table could be found.
624  */
625 static ACPI_TABLE_HEADER *
acpi_find_table(uintptr_t rsdp_physaddr,const char * signature)626 acpi_find_table(uintptr_t rsdp_physaddr, const char *signature)
627 {
628 	static RSDP_DESCRIPTOR *rsdp = NULL;
629 	static XSDT_DESCRIPTOR *xsdtp = NULL;
630 	static RSDT_DESCRIPTOR *rsdtp = NULL;
631 
632 	if (signature == NULL) {
633 		DBG("Invalid NULL signature passed to acpi_find_table\n");
634 		return NULL;
635 	}
636 
637 	/*
638 	 * RSDT or XSDT is required; without it, we cannot locate other tables.
639 	 */
640 	if (__improbable(rsdp == NULL || (rsdtp == NULL && xsdtp == NULL))) {
641 		rsdp = PHYSMAP_PTOV(rsdp_physaddr);
642 
643 		/* Verify RSDP signature */
644 		if (__improbable(strncmp((void *)rsdp, "RSD PTR ", 8) != 0)) {
645 			DBG("RSDP signature mismatch: Aborting acpi_find_table\n");
646 			rsdp = NULL;
647 			return NULL;
648 		}
649 
650 		/* Verify RSDP checksum */
651 		if (__improbable(cksum8((uint8_t *)rsdp, sizeof(RSDP_DESCRIPTOR)) != 0)) {
652 			DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n",
653 			    (unsigned long)rsdp_physaddr);
654 			rsdp = NULL;
655 			return NULL;
656 		}
657 
658 		/* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */
659 		if (__improbable(rsdp->Revision >= RSDP_VERSION_ACPI20 && rsdp->XsdtPhysicalAddress == 0ULL)) {
660 			DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n");
661 			rsdp = NULL;
662 			return NULL;
663 		} else if (__probable(rsdp->Revision >= RSDP_VERSION_ACPI20)) {
664 			/* XSDT (with 64-bit pointers to tables) */
665 			rsdtp = NULL;
666 			xsdtp = PHYSMAP_PTOV(rsdp->XsdtPhysicalAddress);
667 			if (cksum8((uint8_t *)xsdtp, xsdtp->Length) != 0) {
668 				DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n",
669 				    (unsigned long)rsdp->XsdtPhysicalAddress);
670 				xsdtp = NULL;
671 				return NULL;
672 			}
673 		} else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10 && rsdp->RsdtPhysicalAddress == 0)) {
674 			DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n");
675 			rsdp = NULL;
676 			return NULL;
677 		} else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10)) {
678 			/* RSDT (with 32-bit pointers to tables) */
679 			xsdtp = NULL;
680 			rsdtp = PHYSMAP_PTOV((uintptr_t)rsdp->RsdtPhysicalAddress);
681 			if (cksum8((uint8_t *)rsdtp, rsdtp->Length) != 0) {
682 				DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n",
683 				    (unsigned long)rsdp->RsdtPhysicalAddress);
684 				rsdtp = NULL;
685 				return NULL;
686 			}
687 		} else {
688 			DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n",
689 			    rsdp->Revision);
690 			rsdp = NULL;
691 			return NULL;
692 		}
693 	}
694 
695 	assert(xsdtp != NULL || rsdtp != NULL);
696 
697 	if (__probable(xsdtp != NULL)) {
698 		return acpi_find_table_via_xsdt(xsdtp, signature);
699 	} else if (rsdtp != NULL) {
700 		return acpi_find_table_via_rsdt(rsdtp, signature);
701 	}
702 
703 	return NULL;
704 }
705 
706 /*
707  * Returns the count of enabled logical processors present in the ACPI
708  * MADT, or 0 if the MADT could not be located.
709  */
710 uint32_t
acpi_count_enabled_logical_processors(void)711 acpi_count_enabled_logical_processors(void)
712 {
713 	MULTIPLE_APIC_TABLE *madtp;
714 	void *end_ptr;
715 	APIC_HEADER *next_apic_entryp;
716 	uint32_t enabled_cpu_count = 0;
717 	uint64_t rsdp_physaddr;
718 
719 	rsdp_physaddr = efi_get_rsdp_physaddr();
720 	if (__improbable(rsdp_physaddr == 0)) {
721 		DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n");
722 		return 0;
723 	}
724 
725 	madtp = (MULTIPLE_APIC_TABLE *)acpi_find_table(rsdp_physaddr, ACPI_SIG_MADT);
726 
727 	if (__improbable(madtp == NULL)) {
728 		DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n");
729 		return 0;
730 	}
731 
732 	end_ptr = (void *)((uintptr_t)madtp + madtp->Length);
733 	next_apic_entryp = (APIC_HEADER *)((uintptr_t)madtp + sizeof(MULTIPLE_APIC_TABLE));
734 
735 	while ((void *)next_apic_entryp < end_ptr) {
736 		switch (next_apic_entryp->Type) {
737 		case APIC_PROCESSOR:
738 		{
739 			MADT_PROCESSOR_APIC *madt_procp = (MADT_PROCESSOR_APIC *)next_apic_entryp;
740 			if (madt_procp->ProcessorEnabled) {
741 				enabled_cpu_count++;
742 			}
743 
744 			break;
745 		}
746 
747 		default:
748 			DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp->Type,
749 			    next_apic_entryp->Length);
750 			break;
751 		}
752 
753 		next_apic_entryp = (APIC_HEADER *)((uintptr_t)next_apic_entryp + next_apic_entryp->Length);
754 	}
755 
756 	return enabled_cpu_count;
757 }
758