xref: /xnu-8019.80.24/osfmk/i386/lapic_native.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea) !
1 /*
2  * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41 
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
44 
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #include <i386/bit_routines.h>
56 #if CONFIG_MCA
57 #include <i386/machine_check.h>
58 #endif
59 
60 #include <sys/kdebug.h>
61 
62 #if     MP_DEBUG
63 #define PAUSE           delay(1000000)
64 #define DBG(x...)       kprintf(x)
65 #else
66 #define DBG(x...)
67 #define PAUSE
68 #endif  /* MP_DEBUG */
69 
70 lapic_ops_table_t       *lapic_ops;     /* Lapic operations switch */
71 
72 static vm_map_offset_t  lapic_pbase;    /* Physical base memory-mapped regs */
73 static vm_offset_t      lapic_vbase;    /* Virtual base memory-mapped regs */
74 
75 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
76 
77 /* TRUE if local APIC was enabled by the OS not by the BIOS */
78 static boolean_t lapic_os_enabled = FALSE;
79 
80 static boolean_t lapic_errors_masked = FALSE;
81 static uint64_t lapic_last_master_error = 0;
82 static uint64_t lapic_error_time_threshold = 0;
83 static unsigned lapic_master_error_count = 0;
84 static unsigned lapic_error_count_threshold = 5;
85 static boolean_t lapic_dont_panic = FALSE;
86 int lapic_max_interrupt_cpunum = 0;
87 long lapic_icr_pending_timeouts = 0;
88 
89 typedef enum {
90 	APIC_MODE_UNKNOWN = 0,
91 	APIC_MODE_XAPIC = 1,
92 	APIC_MODE_X2APIC = 2
93 } apic_mode_t;
94 
95 static apic_mode_t apic_mode_before_sleep = APIC_MODE_UNKNOWN;
96 
97 #ifdef MP_DEBUG
98 void
lapic_cpu_map_dump(void)99 lapic_cpu_map_dump(void)
100 {
101 	int     i;
102 
103 	for (i = 0; i < MAX_CPUS; i++) {
104 		if (cpu_to_lapic[i] == -1) {
105 			continue;
106 		}
107 		kprintf("cpu_to_lapic[%d]: %d\n",
108 		    i, cpu_to_lapic[i]);
109 	}
110 	for (i = 0; i < MAX_LAPICIDS; i++) {
111 		if (lapic_to_cpu[i] == -1) {
112 			continue;
113 		}
114 		kprintf("lapic_to_cpu[%d]: %d\n",
115 		    i, lapic_to_cpu[i]);
116 	}
117 }
118 #endif /* MP_DEBUG */
119 
120 static void
map_local_apic(void)121 map_local_apic(void)
122 {
123 	vm_map_offset_t lapic_vbase64;
124 	int             result;
125 	kern_return_t   kr;
126 	vm_map_entry_t  entry;
127 
128 	if (lapic_vbase == 0) {
129 		lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
130 		result = vm_map_find_space(kernel_map,
131 		    &lapic_vbase64,
132 		    round_page(LAPIC_SIZE), 0,
133 		    0,
134 		    VM_MAP_KERNEL_FLAGS_NONE,
135 		    VM_KERN_MEMORY_IOKIT,
136 		    &entry);
137 		/* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
138 		 */
139 		lapic_vbase = (vm_offset_t) lapic_vbase64;
140 		if (result != KERN_SUCCESS) {
141 			panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
142 		}
143 		vm_map_unlock(kernel_map);
144 
145 		/*
146 		 * Map in the local APIC non-cacheable, as recommended by Intel
147 		 * in section 8.4.1 of the "System Programming Guide".
148 		 * In fact, this is redundant because EFI will have assigned an
149 		 * MTRR physical range containing the local APIC's MMIO space as
150 		 * UC and this will override the default PAT setting.
151 		 */
152 		kr = pmap_enter(pmap_kernel(),
153 		    lapic_vbase,
154 		    (ppnum_t) i386_btop(lapic_pbase),
155 		    VM_PROT_READ | VM_PROT_WRITE,
156 		    VM_PROT_NONE,
157 		    VM_WIMG_IO,
158 		    TRUE);
159 
160 		assert(kr == KERN_SUCCESS);
161 	}
162 }
163 
164 static void
legacy_init(void)165 legacy_init(void)
166 {
167 	uint32_t        lo, hi;
168 
169 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
170 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) != 0) {
171 		/*
172 		 * If we're already in x2APIC mode, we MUST disable the local APIC
173 		 * before transitioning back to legacy APIC mode.
174 		 */
175 		lo &= ~(MSR_IA32_APIC_BASE_ENABLE | MSR_IA32_APIC_BASE_EXTENDED);
176 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo);
177 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo | MSR_IA32_APIC_BASE_ENABLE);
178 	}
179 	/*
180 	 * Set flat delivery model, logical processor id
181 	 * This should already be the default set.
182 	 */
183 	LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
184 	LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
185 }
186 
187 
188 static uint32_t
legacy_read(lapic_register_t reg)189 legacy_read(lapic_register_t reg)
190 {
191 	return *LAPIC_MMIO(reg);
192 }
193 
194 static void
legacy_write(lapic_register_t reg,uint32_t value)195 legacy_write(lapic_register_t reg, uint32_t value)
196 {
197 	*LAPIC_MMIO(reg) = value;
198 }
199 
200 static uint64_t
legacy_read_icr(void)201 legacy_read_icr(void)
202 {
203 	return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
204 }
205 
206 static void
legacy_write_icr(uint32_t dst,uint32_t cmd)207 legacy_write_icr(uint32_t dst, uint32_t cmd)
208 {
209 	*LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
210 	*LAPIC_MMIO(ICR) = cmd;
211 }
212 
213 static lapic_ops_table_t legacy_ops = {
214 	legacy_init,
215 	legacy_read,
216 	legacy_write,
217 	legacy_read_icr,
218 	legacy_write_icr
219 };
220 
221 boolean_t is_x2apic = FALSE;
222 
223 static void
x2apic_init(void)224 x2apic_init(void)
225 {
226 	uint32_t        lo;
227 	uint32_t        hi;
228 
229 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
230 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
231 		lo |= MSR_IA32_APIC_BASE_EXTENDED;
232 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
233 		kprintf("x2APIC mode enabled\n");
234 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
235 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
236 	}
237 }
238 
239 static uint32_t
x2apic_read(lapic_register_t reg)240 x2apic_read(lapic_register_t reg)
241 {
242 	uint32_t        lo;
243 	uint32_t        hi;
244 
245 	if (LVT_TIMER == reg) {
246 		// avoid frequent APIC access VM-exit
247 		return current_cpu_datap()->cpu_soft_apic_lvt_timer;
248 	}
249 	rdmsr(LAPIC_MSR(reg), lo, hi);
250 	return lo;
251 }
252 
253 static void
x2apic_write(lapic_register_t reg,uint32_t value)254 x2apic_write(lapic_register_t reg, uint32_t value)
255 {
256 	if (LVT_TIMER == reg) {
257 		current_cpu_datap()->cpu_soft_apic_lvt_timer = value;
258 	}
259 	wrmsr(LAPIC_MSR(reg), value, 0);
260 }
261 
262 static uint64_t
x2apic_read_icr(void)263 x2apic_read_icr(void)
264 {
265 	return rdmsr64(LAPIC_MSR(ICR));
266 }
267 
268 static void
x2apic_write_icr(uint32_t dst,uint32_t cmd)269 x2apic_write_icr(uint32_t dst, uint32_t cmd)
270 {
271 	wrmsr(LAPIC_MSR(ICR), cmd, dst);
272 }
273 
274 static lapic_ops_table_t x2apic_ops = {
275 	x2apic_init,
276 	x2apic_read,
277 	x2apic_write,
278 	x2apic_read_icr,
279 	x2apic_write_icr
280 };
281 
282 /*
283  * Used by APs to determine their APIC IDs; assumes master CPU has initialized
284  * the local APIC interfaces.
285  */
286 uint32_t
lapic_safe_apicid(void)287 lapic_safe_apicid(void)
288 {
289 	uint32_t        lo;
290 	uint32_t        hi;
291 	boolean_t       is_lapic_enabled, is_local_x2apic;
292 
293 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
294 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
295 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
296 
297 	if (is_lapic_enabled && is_local_x2apic) {
298 		return x2apic_read(ID);
299 	} else if (is_lapic_enabled) {
300 		return (*LAPIC_MMIO(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK;
301 	} else {
302 		panic("Unknown Local APIC state!");
303 		/*NORETURN*/
304 	}
305 }
306 
307 static void
lapic_reinit(bool for_wake)308 lapic_reinit(bool for_wake)
309 {
310 	uint32_t        lo;
311 	uint32_t        hi;
312 	boolean_t       is_boot_processor;
313 	boolean_t       is_lapic_enabled;
314 	boolean_t       is_local_x2apic;
315 
316 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
317 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
318 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
319 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
320 
321 	/*
322 	 * If we're configured for x2apic mode and we're being asked to transition
323 	 * to legacy APIC mode, OR if we're in legacy APIC mode and we're being
324 	 * asked to transition to x2apic mode, call LAPIC_INIT().
325 	 */
326 	if ((!is_local_x2apic && is_x2apic) || (is_local_x2apic && !is_x2apic)) {
327 		LAPIC_INIT();
328 		/* Now re-read after LAPIC_INIT() */
329 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
330 		is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
331 		is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
332 	}
333 
334 	if ((!is_lapic_enabled && !is_local_x2apic)) {
335 		panic("Unexpected local APIC state");
336 	}
337 
338 	/*
339 	 * If we did not select the same APIC mode as we had before sleep, flag
340 	 * that as an error (and panic on debug/development kernels).  Note that
341 	 * we might get here with for_wake == true for the first boot case.  In
342 	 * that case, apic_mode_before_sleep will be UNKNOWN (since we haven't
343 	 * slept yet), so we do not need to do any APIC checks.
344 	 */
345 	if (for_wake &&
346 	    ((apic_mode_before_sleep == APIC_MODE_XAPIC && !is_lapic_enabled) ||
347 	    (apic_mode_before_sleep == APIC_MODE_X2APIC && !is_local_x2apic))) {
348 		kprintf("Inconsistent APIC state after wake (was %d before sleep, "
349 		    "now is %d)", apic_mode_before_sleep,
350 		    is_lapic_enabled ? APIC_MODE_XAPIC : APIC_MODE_X2APIC);
351 #if DEBUG || DEVELOPMENT
352 		kprintf("HALTING.\n");
353 		/*
354 		 * Unfortunately, we cannot safely panic here because the
355 		 * executing CPU might not be fully initialized.  The best
356 		 * we can do is just print a message to the console and
357 		 * halt.
358 		 */
359 		asm volatile ("cli; hlt;" ::: "memory");
360 #endif
361 	}
362 
363 	if (is_local_x2apic) {
364 		/* ensure the soft copy is up-to-date */
365 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
366 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
367 	}
368 }
369 
370 void
lapic_init_slave(void)371 lapic_init_slave(void)
372 {
373 	lapic_reinit(false);
374 #if DEBUG || DEVELOPMENT
375 	if (rdmsr64(MSR_IA32_APIC_BASE) & MSR_IA32_APIC_BASE_BSP) {
376 		panic("Calling lapic_init_slave() on the boot processor");
377 	}
378 #endif
379 }
380 
381 void
lapic_init(void)382 lapic_init(void)
383 {
384 	uint32_t        lo;
385 	uint32_t        hi;
386 	boolean_t       is_boot_processor;
387 	boolean_t       is_lapic_enabled;
388 
389 	/* Examine the local APIC state */
390 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
391 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
392 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
393 	is_x2apic         = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
394 	lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
395 	kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
396 	    is_lapic_enabled ? "enabled" : "disabled",
397 	    is_x2apic ? "extended" : "legacy",
398 	    is_boot_processor ? "BSP" : "AP");
399 	if (!is_boot_processor || !is_lapic_enabled) {
400 		panic("Unexpected local APIC state");
401 	}
402 
403 	/*
404 	 * If x2APIC is available and not already enabled, enable it.
405 	 * Unless overriden by boot-arg.
406 	 */
407 	if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
408 		/*
409 		 * If no x2apic boot-arg was set and if we're running under a VMM,
410 		 * autoenable x2APIC mode.
411 		 */
412 		if (PE_parse_boot_argn("x2apic", &is_x2apic, sizeof(is_x2apic)) == FALSE &&
413 		    cpuid_vmm_info()->cpuid_vmm_family != CPUID_VMM_FAMILY_NONE) {
414 			is_x2apic = TRUE;
415 		}
416 		kprintf("x2APIC supported %s be enabled\n",
417 		    is_x2apic ? "and will" : "but will not");
418 	}
419 
420 	lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
421 
422 	if (lapic_pbase != 0) {
423 		/*
424 		 * APs might need to consult the local APIC via the MMIO interface
425 		 * to get their APIC IDs.
426 		 */
427 		map_local_apic();
428 	} else if (!is_x2apic) {
429 		panic("Local APIC physical address was not set.");
430 	}
431 
432 	LAPIC_INIT();
433 
434 	kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
435 	if ((LAPIC_READ(VERSION) & LAPIC_VERSION_MASK) < 0x14) {
436 		panic("Local APIC version 0x%x, 0x14 or more expected",
437 		    (LAPIC_READ(VERSION) & LAPIC_VERSION_MASK));
438 	}
439 
440 	/* Set up the lapic_id <-> cpu_number map and add this boot processor */
441 	lapic_cpu_map_init();
442 	lapic_cpu_map(lapic_safe_apicid(), 0);
443 	current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
444 	kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
445 }
446 
447 
448 static int
lapic_esr_read(void)449 lapic_esr_read(void)
450 {
451 	/* write-read register */
452 	LAPIC_WRITE(ERROR_STATUS, 0);
453 	return LAPIC_READ(ERROR_STATUS);
454 }
455 
456 static void
lapic_esr_clear(void)457 lapic_esr_clear(void)
458 {
459 	LAPIC_WRITE(ERROR_STATUS, 0);
460 	LAPIC_WRITE(ERROR_STATUS, 0);
461 }
462 
463 static const char *DM_str[8] = {
464 	"Fixed",
465 	"Lowest Priority",
466 	"Invalid",
467 	"Invalid",
468 	"NMI",
469 	"Reset",
470 	"Invalid",
471 	"ExtINT"
472 };
473 
474 static const char *TMR_str[] = {
475 	"OneShot",
476 	"Periodic",
477 	"TSC-Deadline",
478 	"Illegal"
479 };
480 
481 void
lapic_dump(void)482 lapic_dump(void)
483 {
484 	int     i;
485 
486 #define BOOL(a) ((a)?' ':'!')
487 #define VEC(lvt) \
488 	LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
489 #define DS(lvt) \
490 	(LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
491 #define DM(lvt) \
492 	DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
493 #define MASK(lvt) \
494 	BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
495 #define TM(lvt) \
496 	(LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
497 #define IP(lvt) \
498 	(LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
499 
500 	kprintf("LAPIC %d at %p version 0x%x\n",
501 	    lapic_safe_apicid(),
502 	    (void *) lapic_vbase,
503 	    LAPIC_READ(VERSION) & LAPIC_VERSION_MASK);
504 	kprintf("Priorities: Task 0x%x  Arbitration 0x%x  Processor 0x%x\n",
505 	    LAPIC_READ(TPR) & LAPIC_TPR_MASK,
506 	    LAPIC_READ(APR) & LAPIC_APR_MASK,
507 	    LAPIC_READ(PPR) & LAPIC_PPR_MASK);
508 	kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
509 	    is_x2apic ? 0 : LAPIC_READ(DFR) >> LAPIC_DFR_SHIFT,
510 	    LAPIC_READ(LDR) >> LAPIC_LDR_SHIFT);
511 	kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
512 	    BOOL(LAPIC_READ(SVR) & LAPIC_SVR_ENABLE),
513 	    BOOL(!(LAPIC_READ(SVR) & LAPIC_SVR_FOCUS_OFF)),
514 	    LAPIC_READ(SVR) & LAPIC_SVR_MASK);
515 #if CONFIG_MCA
516 	if (mca_is_cmci_present()) {
517 		kprintf("LVT_CMCI:    Vector 0x%02x [%s] %s %cmasked\n",
518 		    VEC(LVT_CMCI),
519 		    DM(LVT_CMCI),
520 		    DS(LVT_CMCI),
521 		    MASK(LVT_CMCI));
522 	}
523 #endif
524 	kprintf("LVT_TIMER:   Vector 0x%02x %s %cmasked %s\n",
525 	    VEC(LVT_TIMER),
526 	    DS(LVT_TIMER),
527 	    MASK(LVT_TIMER),
528 	    TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
529 	    &  LAPIC_LVT_TMR_MASK]);
530 	kprintf("  Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
531 	kprintf("  Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
532 	kprintf("  Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
533 	kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
534 	    VEC(LVT_PERFCNT),
535 	    DM(LVT_PERFCNT),
536 	    DS(LVT_PERFCNT),
537 	    MASK(LVT_PERFCNT));
538 	kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
539 	    VEC(LVT_THERMAL),
540 	    DM(LVT_THERMAL),
541 	    DS(LVT_THERMAL),
542 	    MASK(LVT_THERMAL));
543 	kprintf("LVT_LINT0:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
544 	    VEC(LVT_LINT0),
545 	    DM(LVT_LINT0),
546 	    TM(LVT_LINT0),
547 	    IP(LVT_LINT0),
548 	    DS(LVT_LINT0),
549 	    MASK(LVT_LINT0));
550 	kprintf("LVT_LINT1:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
551 	    VEC(LVT_LINT1),
552 	    DM(LVT_LINT1),
553 	    TM(LVT_LINT1),
554 	    IP(LVT_LINT1),
555 	    DS(LVT_LINT1),
556 	    MASK(LVT_LINT1));
557 	kprintf("LVT_ERROR:   Vector 0x%02x %s %cmasked\n",
558 	    VEC(LVT_ERROR),
559 	    DS(LVT_ERROR),
560 	    MASK(LVT_ERROR));
561 	kprintf("ESR: %08x \n", lapic_esr_read());
562 	kprintf("       ");
563 	for (i = 0xf; i >= 0; i--) {
564 		kprintf("%x%x%x%x", i, i, i, i);
565 	}
566 	kprintf("\n");
567 	kprintf("TMR: 0x");
568 	for (i = 7; i >= 0; i--) {
569 		kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE, i));
570 	}
571 	kprintf("\n");
572 	kprintf("IRR: 0x");
573 	for (i = 7; i >= 0; i--) {
574 		kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE, i));
575 	}
576 	kprintf("\n");
577 	kprintf("ISR: 0x");
578 	for (i = 7; i >= 0; i--) {
579 		kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE, i));
580 	}
581 	kprintf("\n");
582 }
583 
584 boolean_t
lapic_probe(void)585 lapic_probe(void)
586 {
587 	uint32_t        lo;
588 	uint32_t        hi;
589 
590 	if (cpuid_features() & CPUID_FEATURE_APIC) {
591 		return TRUE;
592 	}
593 
594 	if (cpuid_family() == 6 || cpuid_family() == 15) {
595 		/*
596 		 * Mobile Pentiums:
597 		 * There may be a local APIC which wasn't enabled by BIOS.
598 		 * So we try to enable it explicitly.
599 		 */
600 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
601 		lo &= ~MSR_IA32_APIC_BASE_BASE;
602 		lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
603 		lo |= MSR_IA32_APIC_BASE_ENABLE;
604 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
605 
606 		/*
607 		 * Re-initialize cpu features info and re-check.
608 		 */
609 		cpuid_set_info();
610 		/* We expect this codepath will never be traversed
611 		 * due to EFI enabling the APIC. Reducing the APIC
612 		 * interrupt base dynamically is not supported.
613 		 */
614 		if (cpuid_features() & CPUID_FEATURE_APIC) {
615 			printf("Local APIC discovered and enabled\n");
616 			lapic_os_enabled = TRUE;
617 			lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
618 			return TRUE;
619 		}
620 	}
621 
622 	return FALSE;
623 }
624 
625 void
lapic_shutdown(bool for_sleep)626 lapic_shutdown(bool for_sleep)
627 {
628 	uint32_t lo;
629 	uint32_t hi;
630 	uint32_t value;
631 
632 	if (for_sleep == true) {
633 		apic_mode_before_sleep = (is_x2apic ? APIC_MODE_X2APIC : APIC_MODE_XAPIC);
634 	}
635 
636 	/* Shutdown if local APIC was enabled by OS */
637 	if (lapic_os_enabled == FALSE) {
638 		return;
639 	}
640 
641 	mp_disable_preemption();
642 
643 	/* ExtINT: masked */
644 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
645 		value = LAPIC_READ(LVT_LINT0);
646 		value |= LAPIC_LVT_MASKED;
647 		LAPIC_WRITE(LVT_LINT0, value);
648 	}
649 
650 	/* Error: masked */
651 	LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
652 
653 	/* Timer: masked */
654 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
655 
656 	/* Perfmon: masked */
657 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
658 
659 	/* APIC software disabled */
660 	LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
661 
662 	/* Bypass the APIC completely and update cpu features */
663 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
664 	lo &= ~MSR_IA32_APIC_BASE_ENABLE;
665 	wrmsr(MSR_IA32_APIC_BASE, lo, hi);
666 	cpuid_set_info();
667 
668 	mp_enable_preemption();
669 }
670 
671 boolean_t
cpu_can_exit(int cpu)672 cpu_can_exit(int cpu)
673 {
674 	return cpu > lapic_max_interrupt_cpunum;
675 }
676 
677 void
lapic_configure(bool for_wake)678 lapic_configure(bool for_wake)
679 {
680 	int     value;
681 
682 	if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
683 		nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
684 		if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
685 			lapic_dont_panic = FALSE;
686 		}
687 	}
688 
689 	if (cpu_number() == 0) {
690 		if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum, sizeof(lapic_max_interrupt_cpunum))) {
691 			lapic_max_interrupt_cpunum = ((cpuid_features() & CPUID_FEATURE_HTT) ? 1 : 0);
692 		}
693 	}
694 
695 	/*
696 	 * Reinitialize the APIC (handles the case where we're configured to use the X2APIC
697 	 * but firmware configured the Legacy APIC):
698 	 */
699 	lapic_reinit(for_wake);
700 
701 	/* Accept all */
702 	LAPIC_WRITE(TPR, 0);
703 
704 	LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
705 
706 	/* ExtINT */
707 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
708 		value = LAPIC_READ(LVT_LINT0);
709 		value &= ~LAPIC_LVT_MASKED;
710 		value |= LAPIC_LVT_DM_EXTINT;
711 		LAPIC_WRITE(LVT_LINT0, value);
712 	}
713 
714 	/* Timer: unmasked, one-shot */
715 	LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
716 
717 	/* Perfmon: unmasked */
718 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
719 
720 	/* Thermal: unmasked */
721 	LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
722 
723 #if CONFIG_MCA
724 	/* CMCI, if available */
725 	if (mca_is_cmci_present()) {
726 		LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
727 	}
728 #endif
729 
730 	if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
731 	    (cpu_number() != master_cpu)) {
732 		lapic_esr_clear();
733 		LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
734 	}
735 }
736 
737 void
lapic_set_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor,lapic_timer_count_t initial_count)738 lapic_set_timer(
739 	boolean_t               interrupt_unmasked,
740 	lapic_timer_mode_t      mode,
741 	lapic_timer_divide_t    divisor,
742 	lapic_timer_count_t     initial_count)
743 {
744 	uint32_t        timer_vector;
745 
746 	mp_disable_preemption();
747 	timer_vector = LAPIC_READ(LVT_TIMER);
748 	timer_vector &= ~(LAPIC_LVT_MASKED | LAPIC_LVT_PERIODIC);
749 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
750 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
751 	LAPIC_WRITE(LVT_TIMER, timer_vector);
752 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
753 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
754 	mp_enable_preemption();
755 }
756 
757 void
lapic_config_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor)758 lapic_config_timer(
759 	boolean_t               interrupt_unmasked,
760 	lapic_timer_mode_t      mode,
761 	lapic_timer_divide_t    divisor)
762 {
763 	uint32_t        timer_vector;
764 
765 	mp_disable_preemption();
766 	timer_vector = LAPIC_READ(LVT_TIMER);
767 	timer_vector &= ~(LAPIC_LVT_MASKED |
768 	    LAPIC_LVT_PERIODIC |
769 	    LAPIC_LVT_TSC_DEADLINE);
770 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
771 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
772 	LAPIC_WRITE(LVT_TIMER, timer_vector);
773 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
774 	mp_enable_preemption();
775 }
776 
777 /*
778  * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
779  */
780 void
lapic_config_tsc_deadline_timer(void)781 lapic_config_tsc_deadline_timer(void)
782 {
783 	uint32_t        timer_vector;
784 
785 	DBG("lapic_config_tsc_deadline_timer()\n");
786 	mp_disable_preemption();
787 	timer_vector = LAPIC_READ(LVT_TIMER);
788 	timer_vector &= ~(LAPIC_LVT_MASKED |
789 	    LAPIC_LVT_PERIODIC);
790 	timer_vector |= LAPIC_LVT_TSC_DEADLINE;
791 	LAPIC_WRITE(LVT_TIMER, timer_vector);
792 
793 	/* Serialize writes per Intel OSWG */
794 	do {
795 		lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
796 	} while (lapic_get_tsc_deadline_timer() == 0);
797 	lapic_set_tsc_deadline_timer(0);
798 
799 	mp_enable_preemption();
800 	DBG("lapic_config_tsc_deadline_timer() done\n");
801 }
802 
803 void
lapic_set_timer_fast(lapic_timer_count_t initial_count)804 lapic_set_timer_fast(
805 	lapic_timer_count_t     initial_count)
806 {
807 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
808 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
809 }
810 
811 void
lapic_set_tsc_deadline_timer(uint64_t deadline)812 lapic_set_tsc_deadline_timer(uint64_t deadline)
813 {
814 	/* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
815 	wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
816 }
817 
818 uint64_t
lapic_get_tsc_deadline_timer(void)819 lapic_get_tsc_deadline_timer(void)
820 {
821 	return rdmsr64(MSR_IA32_TSC_DEADLINE);
822 }
823 
824 void
lapic_get_timer(lapic_timer_mode_t * mode,lapic_timer_divide_t * divisor,lapic_timer_count_t * initial_count,lapic_timer_count_t * current_count)825 lapic_get_timer(
826 	lapic_timer_mode_t      *mode,
827 	lapic_timer_divide_t    *divisor,
828 	lapic_timer_count_t     *initial_count,
829 	lapic_timer_count_t     *current_count)
830 {
831 	mp_disable_preemption();
832 	if (mode) {
833 		*mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
834 		    periodic : one_shot;
835 	}
836 	if (divisor) {
837 		*divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
838 	}
839 	if (initial_count) {
840 		*initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
841 	}
842 	if (current_count) {
843 		*current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
844 	}
845 	mp_enable_preemption();
846 }
847 
848 static inline void
_lapic_end_of_interrupt(void)849 _lapic_end_of_interrupt(void)
850 {
851 	LAPIC_WRITE(EOI, 0);
852 }
853 
854 void
lapic_end_of_interrupt(void)855 lapic_end_of_interrupt(void)
856 {
857 	_lapic_end_of_interrupt();
858 }
859 
860 void
lapic_unmask_perfcnt_interrupt(void)861 lapic_unmask_perfcnt_interrupt(void)
862 {
863 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
864 }
865 
866 void
lapic_set_perfcnt_interrupt_mask(boolean_t mask)867 lapic_set_perfcnt_interrupt_mask(boolean_t mask)
868 {
869 	uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
870 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
871 }
872 
873 void
lapic_set_intr_func(int vector,i386_intr_func_t func)874 lapic_set_intr_func(int vector, i386_intr_func_t func)
875 {
876 	if (vector > lapic_interrupt_base) {
877 		vector -= lapic_interrupt_base;
878 	}
879 
880 	switch (vector) {
881 	case LAPIC_NMI_INTERRUPT:
882 	case LAPIC_INTERPROCESSOR_INTERRUPT:
883 	case LAPIC_TIMER_INTERRUPT:
884 	case LAPIC_THERMAL_INTERRUPT:
885 	case LAPIC_PERFCNT_INTERRUPT:
886 	case LAPIC_CMCI_INTERRUPT:
887 	case LAPIC_PM_INTERRUPT:
888 		lapic_intr_func[vector] = func;
889 		break;
890 	default:
891 		panic("lapic_set_intr_func(%d,%p) invalid vector",
892 		    vector, func);
893 	}
894 }
895 
896 void
lapic_set_pmi_func(i386_intr_func_t func)897 lapic_set_pmi_func(i386_intr_func_t func)
898 {
899 	lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
900 }
901 
902 int
lapic_interrupt(int interrupt_num,x86_saved_state_t * state)903 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
904 {
905 	int     retval = 0;
906 	int     esr = -1;
907 
908 	interrupt_num -= lapic_interrupt_base;
909 	if (interrupt_num < 0) {
910 		if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
911 		    lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
912 			retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
913 			return retval;
914 		} else {
915 			return 0;
916 		}
917 	}
918 
919 	switch (interrupt_num) {
920 	case LAPIC_TIMER_INTERRUPT:
921 	case LAPIC_THERMAL_INTERRUPT:
922 	case LAPIC_INTERPROCESSOR_INTERRUPT:
923 	case LAPIC_PM_INTERRUPT:
924 		if (lapic_intr_func[interrupt_num] != NULL) {
925 			(void) (*lapic_intr_func[interrupt_num])(state);
926 		}
927 		_lapic_end_of_interrupt();
928 		retval = 1;
929 		break;
930 	case LAPIC_PERFCNT_INTERRUPT:
931 		/* If a function has been registered, invoke it.  Otherwise,
932 		 * pass up to IOKit.
933 		 */
934 		if (lapic_intr_func[interrupt_num] != NULL) {
935 			(void) (*lapic_intr_func[interrupt_num])(state);
936 			/* Unmask the interrupt since we don't expect legacy users
937 			 * to be responsible for it.
938 			 */
939 			lapic_unmask_perfcnt_interrupt();
940 			_lapic_end_of_interrupt();
941 			retval = 1;
942 		}
943 		break;
944 	case LAPIC_CMCI_INTERRUPT:
945 		if (lapic_intr_func[interrupt_num] != NULL) {
946 			(void) (*lapic_intr_func[interrupt_num])(state);
947 		}
948 		/* return 0 for plaform expert to handle */
949 		break;
950 	case LAPIC_ERROR_INTERRUPT:
951 		/* We treat error interrupts on APs as fatal.
952 		 * The current interrupt steering scheme directs most
953 		 * external interrupts to the BSP (HPET interrupts being
954 		 * a notable exception); hence, such an error
955 		 * on an AP may signify LVT corruption (with "may" being
956 		 * the operative word). On the BSP, we adopt a more
957 		 * lenient approach, in the interests of enhancing
958 		 * debuggability and reducing fragility.
959 		 * If "lapic_error_count_threshold" error interrupts
960 		 * occur within "lapic_error_time_threshold" absolute
961 		 * time units, we mask the error vector and log. The
962 		 * error interrupts themselves are likely
963 		 * side effects of issues which are beyond the purview of
964 		 * the local APIC interrupt handler, however. The Error
965 		 * Status Register value (the illegal destination
966 		 * vector code is one observed in practice) indicates
967 		 * the immediate cause of the error.
968 		 */
969 		esr = lapic_esr_read();
970 		lapic_dump();
971 
972 		if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
973 		    cpu_number() != master_cpu) {
974 			panic("Local APIC error, ESR: %d", esr);
975 		}
976 
977 		if (cpu_number() == master_cpu) {
978 			uint64_t abstime = mach_absolute_time();
979 			if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
980 				if (lapic_master_error_count++ > lapic_error_count_threshold) {
981 					lapic_errors_masked = TRUE;
982 					LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
983 					printf("Local APIC: errors masked\n");
984 				}
985 			} else {
986 				lapic_last_master_error = abstime;
987 				lapic_master_error_count = 0;
988 			}
989 			printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
990 		}
991 
992 		_lapic_end_of_interrupt();
993 		retval = 1;
994 		break;
995 	case LAPIC_SPURIOUS_INTERRUPT:
996 		kprintf("SPIV\n");
997 		/* No EOI required here */
998 		retval = 1;
999 		break;
1000 	case LAPIC_PMC_SW_INTERRUPT:
1001 	{
1002 	}
1003 	break;
1004 	case LAPIC_KICK_INTERRUPT:
1005 		_lapic_end_of_interrupt();
1006 		retval = 1;
1007 		break;
1008 	}
1009 
1010 	return retval;
1011 }
1012 
1013 void
lapic_smm_restore(void)1014 lapic_smm_restore(void)
1015 {
1016 	boolean_t state;
1017 
1018 	if (lapic_os_enabled == FALSE) {
1019 		return;
1020 	}
1021 
1022 	state = ml_set_interrupts_enabled(FALSE);
1023 
1024 	if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
1025 		/*
1026 		 * Bogus SMI handler enables interrupts but does not know about
1027 		 * local APIC interrupt sources. When APIC timer counts down to
1028 		 * zero while in SMM, local APIC will end up waiting for an EOI
1029 		 * but no interrupt was delivered to the OS.
1030 		 */
1031 		_lapic_end_of_interrupt();
1032 
1033 		/*
1034 		 * timer is one-shot, trigger another quick countdown to trigger
1035 		 * another timer interrupt.
1036 		 */
1037 		if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
1038 			LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
1039 		}
1040 
1041 		kprintf("lapic_smm_restore\n");
1042 	}
1043 
1044 	ml_set_interrupts_enabled(state);
1045 }
1046 
1047 void
lapic_send_ipi(int cpu,int vector)1048 lapic_send_ipi(int cpu, int vector)
1049 {
1050 	boolean_t       state;
1051 
1052 	if (vector < lapic_interrupt_base) {
1053 		vector += lapic_interrupt_base;
1054 	}
1055 
1056 	state = ml_set_interrupts_enabled(FALSE);
1057 
1058 	/* X2APIC's ICR doesn't have a pending bit. */
1059 	if (!is_x2apic) {
1060 		/* Wait for pending outgoing send to complete */
1061 		while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1062 			cpu_pause();
1063 		}
1064 	}
1065 
1066 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
1067 
1068 	(void) ml_set_interrupts_enabled(state);
1069 }
1070 
1071 void
lapic_send_nmi(int cpu)1072 lapic_send_nmi(int cpu)
1073 {
1074 	if (!is_x2apic) {
1075 		if (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1076 			uint64_t now = mach_absolute_time();
1077 			/* Wait up to 10ms for the pending outgoing send (if any) to complete */
1078 			while ((LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) &&
1079 			    (mach_absolute_time() - now) < (10 * NSEC_PER_MSEC)) {
1080 				cpu_pause();
1081 			}
1082 		}
1083 #if DEVELOPMENT || DEBUG
1084 		if (__improbable(LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING)) {
1085 			/* Since it's not safe to invoke printf here, kprintf and counting is the best we can do */
1086 			kprintf("WARNING: Wait for lapic ICR pending bit timed-out!\n");
1087 			atomic_incl((volatile long *)&lapic_icr_pending_timeouts, 1);
1088 		}
1089 #endif
1090 	}
1091 
1092 	/* Program the interrupt command register */
1093 	/* The vector is ignored in this case--the target CPU will enter on the
1094 	 * NMI vector.
1095 	 */
1096 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu],
1097 	    LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI);
1098 }
1099 
1100 /*
1101  * The following interfaces are privately exported to AICPM.
1102  */
1103 
1104 boolean_t
lapic_is_interrupt_pending(void)1105 lapic_is_interrupt_pending(void)
1106 {
1107 	int             i;
1108 
1109 	for (i = 0; i < 8; i += 1) {
1110 		if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
1111 		    (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) {
1112 			return TRUE;
1113 		}
1114 	}
1115 
1116 	return FALSE;
1117 }
1118 
1119 boolean_t
lapic_is_interrupting(uint8_t vector)1120 lapic_is_interrupting(uint8_t vector)
1121 {
1122 	int             i;
1123 	int             bit;
1124 	uint32_t        irr;
1125 	uint32_t        isr;
1126 
1127 	i = vector / 32;
1128 	bit = 1 << (vector % 32);
1129 
1130 	irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1131 	isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1132 
1133 	if ((irr | isr) & bit) {
1134 		return TRUE;
1135 	}
1136 
1137 	return FALSE;
1138 }
1139 
1140 void
lapic_interrupt_counts(uint64_t intrs[256])1141 lapic_interrupt_counts(uint64_t intrs[256])
1142 {
1143 	int             i;
1144 	int             j;
1145 	int             bit;
1146 	uint32_t        irr;
1147 	uint32_t        isr;
1148 
1149 	if (intrs == NULL) {
1150 		return;
1151 	}
1152 
1153 	for (i = 0; i < 8; i += 1) {
1154 		irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1155 		isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1156 
1157 		if ((isr | irr) == 0) {
1158 			continue;
1159 		}
1160 
1161 		for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
1162 			bit = (32 * i) + j;
1163 			if ((isr | irr) & (1 << j)) {
1164 				intrs[bit] += 1;
1165 			}
1166 		}
1167 	}
1168 }
1169 
1170 void
lapic_disable_timer(void)1171 lapic_disable_timer(void)
1172 {
1173 	uint32_t        lvt_timer;
1174 
1175 	/*
1176 	 * If we're in deadline timer mode,
1177 	 * simply clear the deadline timer, otherwise
1178 	 * mask the timer interrupt and clear the countdown.
1179 	 */
1180 	lvt_timer = LAPIC_READ(LVT_TIMER);
1181 	if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
1182 		wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
1183 	} else {
1184 		LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
1185 		LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
1186 		lvt_timer = LAPIC_READ(LVT_TIMER);
1187 	}
1188 }
1189 
1190 /* SPI returning the CMCI vector */
1191 uint8_t
lapic_get_cmci_vector(void)1192 lapic_get_cmci_vector(void)
1193 {
1194 	uint8_t cmci_vector = 0;
1195 #if CONFIG_MCA
1196 	/* CMCI, if available */
1197 	if (mca_is_cmci_present()) {
1198 		cmci_vector = LAPIC_VECTOR(CMCI);
1199 	}
1200 #endif
1201 	return cmci_vector;
1202 }
1203 
1204 #if DEVELOPMENT || DEBUG
1205 extern void lapic_trigger_MC(void);
1206 void
lapic_trigger_MC(void)1207 lapic_trigger_MC(void)
1208 {
1209 	/* A 64-bit access to any register will do it. */
1210 	volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID);
1211 	dummy++;
1212 }
1213 #endif
1214