xref: /xnu-8020.101.4/osfmk/i386/lapic_native.c (revision e7776783b89a353188416a9a346c6cdb4928faad) !
1 /*
2  * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41 
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
44 
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #include <i386/bit_routines.h>
56 #if CONFIG_MCA
57 #include <i386/machine_check.h>
58 #endif
59 
60 #include <sys/kdebug.h>
61 
62 #if     MP_DEBUG
63 #define PAUSE           delay(1000000)
64 #define DBG(x...)       kprintf(x)
65 #else
66 #define DBG(x...)
67 #define PAUSE
68 #endif  /* MP_DEBUG */
69 
70 lapic_ops_table_t       *lapic_ops;     /* Lapic operations switch */
71 
72 static vm_map_offset_t  lapic_pbase;    /* Physical base memory-mapped regs */
73 static vm_offset_t      lapic_vbase;    /* Virtual base memory-mapped regs */
74 
75 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
76 
77 /* TRUE if local APIC was enabled by the OS not by the BIOS */
78 static boolean_t lapic_os_enabled = FALSE;
79 
80 static boolean_t lapic_errors_masked = FALSE;
81 static uint64_t lapic_last_master_error = 0;
82 static uint64_t lapic_error_time_threshold = 0;
83 static unsigned lapic_master_error_count = 0;
84 static unsigned lapic_error_count_threshold = 5;
85 static boolean_t lapic_dont_panic = FALSE;
86 int lapic_max_interrupt_cpunum = 0;
87 long lapic_icr_pending_timeouts = 0;
88 
89 typedef enum {
90 	APIC_MODE_UNKNOWN = 0,
91 	APIC_MODE_XAPIC = 1,
92 	APIC_MODE_X2APIC = 2
93 } apic_mode_t;
94 
95 static apic_mode_t apic_mode_before_sleep = APIC_MODE_UNKNOWN;
96 
97 #ifdef MP_DEBUG
98 void
lapic_cpu_map_dump(void)99 lapic_cpu_map_dump(void)
100 {
101 	int     i;
102 
103 	for (i = 0; i < MAX_CPUS; i++) {
104 		if (cpu_to_lapic[i] == -1) {
105 			continue;
106 		}
107 		kprintf("cpu_to_lapic[%d]: %d\n",
108 		    i, cpu_to_lapic[i]);
109 	}
110 	for (i = 0; i < MAX_LAPICIDS; i++) {
111 		if (lapic_to_cpu[i] == -1) {
112 			continue;
113 		}
114 		kprintf("lapic_to_cpu[%d]: %d\n",
115 		    i, lapic_to_cpu[i]);
116 	}
117 }
118 #endif /* MP_DEBUG */
119 
120 static void
map_local_apic(void)121 map_local_apic(void)
122 {
123 	vm_map_offset_t lapic_vbase64;
124 	int             result;
125 	kern_return_t   kr;
126 	vm_map_entry_t  entry;
127 
128 	if (lapic_vbase == 0) {
129 		lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
130 		result = vm_map_find_space(kernel_map,
131 		    &lapic_vbase64,
132 		    round_page(LAPIC_SIZE), 0,
133 		    VM_MAP_KERNEL_FLAGS_NONE,
134 		    VM_KERN_MEMORY_IOKIT,
135 		    &entry);
136 		/* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
137 		 */
138 		lapic_vbase = (vm_offset_t) lapic_vbase64;
139 		if (result != KERN_SUCCESS) {
140 			panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
141 		}
142 		vm_map_unlock(kernel_map);
143 
144 		/*
145 		 * Map in the local APIC non-cacheable, as recommended by Intel
146 		 * in section 8.4.1 of the "System Programming Guide".
147 		 * In fact, this is redundant because EFI will have assigned an
148 		 * MTRR physical range containing the local APIC's MMIO space as
149 		 * UC and this will override the default PAT setting.
150 		 */
151 		kr = pmap_enter(pmap_kernel(),
152 		    lapic_vbase,
153 		    (ppnum_t) i386_btop(lapic_pbase),
154 		    VM_PROT_READ | VM_PROT_WRITE,
155 		    VM_PROT_NONE,
156 		    VM_WIMG_IO,
157 		    TRUE);
158 
159 		assert(kr == KERN_SUCCESS);
160 	}
161 }
162 
163 static void
legacy_init(void)164 legacy_init(void)
165 {
166 	uint32_t        lo, hi;
167 
168 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
169 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) != 0) {
170 		/*
171 		 * If we're already in x2APIC mode, we MUST disable the local APIC
172 		 * before transitioning back to legacy APIC mode.
173 		 */
174 		lo &= ~(MSR_IA32_APIC_BASE_ENABLE | MSR_IA32_APIC_BASE_EXTENDED);
175 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo);
176 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo | MSR_IA32_APIC_BASE_ENABLE);
177 	}
178 	/*
179 	 * Set flat delivery model, logical processor id
180 	 * This should already be the default set.
181 	 */
182 	LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
183 	LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
184 }
185 
186 
187 static uint32_t
legacy_read(lapic_register_t reg)188 legacy_read(lapic_register_t reg)
189 {
190 	return *LAPIC_MMIO(reg);
191 }
192 
193 static void
legacy_write(lapic_register_t reg,uint32_t value)194 legacy_write(lapic_register_t reg, uint32_t value)
195 {
196 	*LAPIC_MMIO(reg) = value;
197 }
198 
199 static uint64_t
legacy_read_icr(void)200 legacy_read_icr(void)
201 {
202 	return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
203 }
204 
205 static void
legacy_write_icr(uint32_t dst,uint32_t cmd)206 legacy_write_icr(uint32_t dst, uint32_t cmd)
207 {
208 	*LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
209 	*LAPIC_MMIO(ICR) = cmd;
210 }
211 
212 static lapic_ops_table_t legacy_ops = {
213 	legacy_init,
214 	legacy_read,
215 	legacy_write,
216 	legacy_read_icr,
217 	legacy_write_icr
218 };
219 
220 boolean_t is_x2apic = FALSE;
221 
222 static void
x2apic_init(void)223 x2apic_init(void)
224 {
225 	uint32_t        lo;
226 	uint32_t        hi;
227 
228 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
229 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
230 		lo |= MSR_IA32_APIC_BASE_EXTENDED;
231 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
232 		kprintf("x2APIC mode enabled\n");
233 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
234 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
235 	}
236 }
237 
238 static uint32_t
x2apic_read(lapic_register_t reg)239 x2apic_read(lapic_register_t reg)
240 {
241 	uint32_t        lo;
242 	uint32_t        hi;
243 
244 	if (LVT_TIMER == reg) {
245 		// avoid frequent APIC access VM-exit
246 		return current_cpu_datap()->cpu_soft_apic_lvt_timer;
247 	}
248 	rdmsr(LAPIC_MSR(reg), lo, hi);
249 	return lo;
250 }
251 
252 static void
x2apic_write(lapic_register_t reg,uint32_t value)253 x2apic_write(lapic_register_t reg, uint32_t value)
254 {
255 	if (LVT_TIMER == reg) {
256 		current_cpu_datap()->cpu_soft_apic_lvt_timer = value;
257 	}
258 	wrmsr(LAPIC_MSR(reg), value, 0);
259 }
260 
261 static uint64_t
x2apic_read_icr(void)262 x2apic_read_icr(void)
263 {
264 	return rdmsr64(LAPIC_MSR(ICR));
265 }
266 
267 static void
x2apic_write_icr(uint32_t dst,uint32_t cmd)268 x2apic_write_icr(uint32_t dst, uint32_t cmd)
269 {
270 	wrmsr(LAPIC_MSR(ICR), cmd, dst);
271 }
272 
273 static lapic_ops_table_t x2apic_ops = {
274 	x2apic_init,
275 	x2apic_read,
276 	x2apic_write,
277 	x2apic_read_icr,
278 	x2apic_write_icr
279 };
280 
281 /*
282  * Used by APs to determine their APIC IDs; assumes master CPU has initialized
283  * the local APIC interfaces.
284  */
285 uint32_t
lapic_safe_apicid(void)286 lapic_safe_apicid(void)
287 {
288 	uint32_t        lo;
289 	uint32_t        hi;
290 	boolean_t       is_lapic_enabled, is_local_x2apic;
291 
292 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
293 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
294 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
295 
296 	if (is_lapic_enabled && is_local_x2apic) {
297 		return x2apic_read(ID);
298 	} else if (is_lapic_enabled) {
299 		return (*LAPIC_MMIO(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK;
300 	} else {
301 		panic("Unknown Local APIC state!");
302 		/*NORETURN*/
303 	}
304 }
305 
306 static void
lapic_reinit(bool for_wake)307 lapic_reinit(bool for_wake)
308 {
309 	uint32_t        lo;
310 	uint32_t        hi;
311 	boolean_t       is_boot_processor;
312 	boolean_t       is_lapic_enabled;
313 	boolean_t       is_local_x2apic;
314 
315 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
316 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
317 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
318 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
319 
320 	/*
321 	 * If we're configured for x2apic mode and we're being asked to transition
322 	 * to legacy APIC mode, OR if we're in legacy APIC mode and we're being
323 	 * asked to transition to x2apic mode, call LAPIC_INIT().
324 	 */
325 	if ((!is_local_x2apic && is_x2apic) || (is_local_x2apic && !is_x2apic)) {
326 		LAPIC_INIT();
327 		/* Now re-read after LAPIC_INIT() */
328 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
329 		is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
330 		is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
331 	}
332 
333 	if ((!is_lapic_enabled && !is_local_x2apic)) {
334 		panic("Unexpected local APIC state");
335 	}
336 
337 	/*
338 	 * If we did not select the same APIC mode as we had before sleep, flag
339 	 * that as an error (and panic on debug/development kernels).  Note that
340 	 * we might get here with for_wake == true for the first boot case.  In
341 	 * that case, apic_mode_before_sleep will be UNKNOWN (since we haven't
342 	 * slept yet), so we do not need to do any APIC checks.
343 	 */
344 	if (for_wake &&
345 	    ((apic_mode_before_sleep == APIC_MODE_XAPIC && !is_lapic_enabled) ||
346 	    (apic_mode_before_sleep == APIC_MODE_X2APIC && !is_local_x2apic))) {
347 		kprintf("Inconsistent APIC state after wake (was %d before sleep, "
348 		    "now is %d)", apic_mode_before_sleep,
349 		    is_lapic_enabled ? APIC_MODE_XAPIC : APIC_MODE_X2APIC);
350 #if DEBUG || DEVELOPMENT
351 		kprintf("HALTING.\n");
352 		/*
353 		 * Unfortunately, we cannot safely panic here because the
354 		 * executing CPU might not be fully initialized.  The best
355 		 * we can do is just print a message to the console and
356 		 * halt.
357 		 */
358 		asm volatile ("cli; hlt;" ::: "memory");
359 #endif
360 	}
361 
362 	if (is_local_x2apic) {
363 		/* ensure the soft copy is up-to-date */
364 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
365 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
366 	}
367 }
368 
369 void
lapic_init_slave(void)370 lapic_init_slave(void)
371 {
372 	lapic_reinit(false);
373 #if DEBUG || DEVELOPMENT
374 	if (rdmsr64(MSR_IA32_APIC_BASE) & MSR_IA32_APIC_BASE_BSP) {
375 		panic("Calling lapic_init_slave() on the boot processor");
376 	}
377 #endif
378 }
379 
380 void
lapic_init(void)381 lapic_init(void)
382 {
383 	uint32_t        lo;
384 	uint32_t        hi;
385 	boolean_t       is_boot_processor;
386 	boolean_t       is_lapic_enabled;
387 
388 	/* Examine the local APIC state */
389 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
390 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
391 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
392 	is_x2apic         = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
393 	lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
394 	kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
395 	    is_lapic_enabled ? "enabled" : "disabled",
396 	    is_x2apic ? "extended" : "legacy",
397 	    is_boot_processor ? "BSP" : "AP");
398 	if (!is_boot_processor || !is_lapic_enabled) {
399 		panic("Unexpected local APIC state");
400 	}
401 
402 	/*
403 	 * If x2APIC is available and not already enabled, enable it.
404 	 * Unless overriden by boot-arg.
405 	 */
406 	if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
407 		/*
408 		 * If no x2apic boot-arg was set and if we're running under a VMM,
409 		 * autoenable x2APIC mode.
410 		 */
411 		if (PE_parse_boot_argn("x2apic", &is_x2apic, sizeof(is_x2apic)) == FALSE &&
412 		    cpuid_vmm_info()->cpuid_vmm_family != CPUID_VMM_FAMILY_NONE) {
413 			is_x2apic = TRUE;
414 		}
415 		kprintf("x2APIC supported %s be enabled\n",
416 		    is_x2apic ? "and will" : "but will not");
417 	}
418 
419 	lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
420 
421 	if (lapic_pbase != 0) {
422 		/*
423 		 * APs might need to consult the local APIC via the MMIO interface
424 		 * to get their APIC IDs.
425 		 */
426 		map_local_apic();
427 	} else if (!is_x2apic) {
428 		panic("Local APIC physical address was not set.");
429 	}
430 
431 	LAPIC_INIT();
432 
433 	kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
434 	if ((LAPIC_READ(VERSION) & LAPIC_VERSION_MASK) < 0x14) {
435 		panic("Local APIC version 0x%x, 0x14 or more expected",
436 		    (LAPIC_READ(VERSION) & LAPIC_VERSION_MASK));
437 	}
438 
439 	/* Set up the lapic_id <-> cpu_number map and add this boot processor */
440 	lapic_cpu_map_init();
441 	lapic_cpu_map(lapic_safe_apicid(), 0);
442 	current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
443 	kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
444 }
445 
446 
447 static int
lapic_esr_read(void)448 lapic_esr_read(void)
449 {
450 	/* write-read register */
451 	LAPIC_WRITE(ERROR_STATUS, 0);
452 	return LAPIC_READ(ERROR_STATUS);
453 }
454 
455 static void
lapic_esr_clear(void)456 lapic_esr_clear(void)
457 {
458 	LAPIC_WRITE(ERROR_STATUS, 0);
459 	LAPIC_WRITE(ERROR_STATUS, 0);
460 }
461 
462 static const char *DM_str[8] = {
463 	"Fixed",
464 	"Lowest Priority",
465 	"Invalid",
466 	"Invalid",
467 	"NMI",
468 	"Reset",
469 	"Invalid",
470 	"ExtINT"
471 };
472 
473 static const char *TMR_str[] = {
474 	"OneShot",
475 	"Periodic",
476 	"TSC-Deadline",
477 	"Illegal"
478 };
479 
480 void
lapic_dump(void)481 lapic_dump(void)
482 {
483 	int     i;
484 
485 #define BOOL(a) ((a)?' ':'!')
486 #define VEC(lvt) \
487 	LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
488 #define DS(lvt) \
489 	(LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
490 #define DM(lvt) \
491 	DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
492 #define MASK(lvt) \
493 	BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
494 #define TM(lvt) \
495 	(LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
496 #define IP(lvt) \
497 	(LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
498 
499 	kprintf("LAPIC %d at %p version 0x%x\n",
500 	    lapic_safe_apicid(),
501 	    (void *) lapic_vbase,
502 	    LAPIC_READ(VERSION) & LAPIC_VERSION_MASK);
503 	kprintf("Priorities: Task 0x%x  Arbitration 0x%x  Processor 0x%x\n",
504 	    LAPIC_READ(TPR) & LAPIC_TPR_MASK,
505 	    LAPIC_READ(APR) & LAPIC_APR_MASK,
506 	    LAPIC_READ(PPR) & LAPIC_PPR_MASK);
507 	kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
508 	    is_x2apic ? 0 : LAPIC_READ(DFR) >> LAPIC_DFR_SHIFT,
509 	    LAPIC_READ(LDR) >> LAPIC_LDR_SHIFT);
510 	kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
511 	    BOOL(LAPIC_READ(SVR) & LAPIC_SVR_ENABLE),
512 	    BOOL(!(LAPIC_READ(SVR) & LAPIC_SVR_FOCUS_OFF)),
513 	    LAPIC_READ(SVR) & LAPIC_SVR_MASK);
514 #if CONFIG_MCA
515 	if (mca_is_cmci_present()) {
516 		kprintf("LVT_CMCI:    Vector 0x%02x [%s] %s %cmasked\n",
517 		    VEC(LVT_CMCI),
518 		    DM(LVT_CMCI),
519 		    DS(LVT_CMCI),
520 		    MASK(LVT_CMCI));
521 	}
522 #endif
523 	kprintf("LVT_TIMER:   Vector 0x%02x %s %cmasked %s\n",
524 	    VEC(LVT_TIMER),
525 	    DS(LVT_TIMER),
526 	    MASK(LVT_TIMER),
527 	    TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
528 	    &  LAPIC_LVT_TMR_MASK]);
529 	kprintf("  Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
530 	kprintf("  Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
531 	kprintf("  Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
532 	kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
533 	    VEC(LVT_PERFCNT),
534 	    DM(LVT_PERFCNT),
535 	    DS(LVT_PERFCNT),
536 	    MASK(LVT_PERFCNT));
537 	kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
538 	    VEC(LVT_THERMAL),
539 	    DM(LVT_THERMAL),
540 	    DS(LVT_THERMAL),
541 	    MASK(LVT_THERMAL));
542 	kprintf("LVT_LINT0:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
543 	    VEC(LVT_LINT0),
544 	    DM(LVT_LINT0),
545 	    TM(LVT_LINT0),
546 	    IP(LVT_LINT0),
547 	    DS(LVT_LINT0),
548 	    MASK(LVT_LINT0));
549 	kprintf("LVT_LINT1:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
550 	    VEC(LVT_LINT1),
551 	    DM(LVT_LINT1),
552 	    TM(LVT_LINT1),
553 	    IP(LVT_LINT1),
554 	    DS(LVT_LINT1),
555 	    MASK(LVT_LINT1));
556 	kprintf("LVT_ERROR:   Vector 0x%02x %s %cmasked\n",
557 	    VEC(LVT_ERROR),
558 	    DS(LVT_ERROR),
559 	    MASK(LVT_ERROR));
560 	kprintf("ESR: %08x \n", lapic_esr_read());
561 	kprintf("       ");
562 	for (i = 0xf; i >= 0; i--) {
563 		kprintf("%x%x%x%x", i, i, i, i);
564 	}
565 	kprintf("\n");
566 	kprintf("TMR: 0x");
567 	for (i = 7; i >= 0; i--) {
568 		kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE, i));
569 	}
570 	kprintf("\n");
571 	kprintf("IRR: 0x");
572 	for (i = 7; i >= 0; i--) {
573 		kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE, i));
574 	}
575 	kprintf("\n");
576 	kprintf("ISR: 0x");
577 	for (i = 7; i >= 0; i--) {
578 		kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE, i));
579 	}
580 	kprintf("\n");
581 }
582 
583 boolean_t
lapic_probe(void)584 lapic_probe(void)
585 {
586 	uint32_t        lo;
587 	uint32_t        hi;
588 
589 	if (cpuid_features() & CPUID_FEATURE_APIC) {
590 		return TRUE;
591 	}
592 
593 	if (cpuid_family() == 6 || cpuid_family() == 15) {
594 		/*
595 		 * Mobile Pentiums:
596 		 * There may be a local APIC which wasn't enabled by BIOS.
597 		 * So we try to enable it explicitly.
598 		 */
599 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
600 		lo &= ~MSR_IA32_APIC_BASE_BASE;
601 		lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
602 		lo |= MSR_IA32_APIC_BASE_ENABLE;
603 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
604 
605 		/*
606 		 * Re-initialize cpu features info and re-check.
607 		 */
608 		cpuid_set_info();
609 		/* We expect this codepath will never be traversed
610 		 * due to EFI enabling the APIC. Reducing the APIC
611 		 * interrupt base dynamically is not supported.
612 		 */
613 		if (cpuid_features() & CPUID_FEATURE_APIC) {
614 			printf("Local APIC discovered and enabled\n");
615 			lapic_os_enabled = TRUE;
616 			lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
617 			return TRUE;
618 		}
619 	}
620 
621 	return FALSE;
622 }
623 
624 void
lapic_shutdown(bool for_sleep)625 lapic_shutdown(bool for_sleep)
626 {
627 	uint32_t lo;
628 	uint32_t hi;
629 	uint32_t value;
630 
631 	if (for_sleep == true) {
632 		apic_mode_before_sleep = (is_x2apic ? APIC_MODE_X2APIC : APIC_MODE_XAPIC);
633 	}
634 
635 	/* Shutdown if local APIC was enabled by OS */
636 	if (lapic_os_enabled == FALSE) {
637 		return;
638 	}
639 
640 	mp_disable_preemption();
641 
642 	/* ExtINT: masked */
643 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
644 		value = LAPIC_READ(LVT_LINT0);
645 		value |= LAPIC_LVT_MASKED;
646 		LAPIC_WRITE(LVT_LINT0, value);
647 	}
648 
649 	/* Error: masked */
650 	LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
651 
652 	/* Timer: masked */
653 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
654 
655 	/* Perfmon: masked */
656 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
657 
658 	/* APIC software disabled */
659 	LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
660 
661 	/* Bypass the APIC completely and update cpu features */
662 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
663 	lo &= ~MSR_IA32_APIC_BASE_ENABLE;
664 	wrmsr(MSR_IA32_APIC_BASE, lo, hi);
665 	cpuid_set_info();
666 
667 	mp_enable_preemption();
668 }
669 
670 boolean_t
cpu_can_exit(int cpu)671 cpu_can_exit(int cpu)
672 {
673 	return cpu > lapic_max_interrupt_cpunum;
674 }
675 
676 void
lapic_configure(bool for_wake)677 lapic_configure(bool for_wake)
678 {
679 	int     value;
680 
681 	if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
682 		nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
683 		if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
684 			lapic_dont_panic = FALSE;
685 		}
686 	}
687 
688 	if (cpu_number() == 0) {
689 		if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum, sizeof(lapic_max_interrupt_cpunum))) {
690 			lapic_max_interrupt_cpunum = ((cpuid_features() & CPUID_FEATURE_HTT) ? 1 : 0);
691 		}
692 	}
693 
694 	/*
695 	 * Reinitialize the APIC (handles the case where we're configured to use the X2APIC
696 	 * but firmware configured the Legacy APIC):
697 	 */
698 	lapic_reinit(for_wake);
699 
700 	/* Accept all */
701 	LAPIC_WRITE(TPR, 0);
702 
703 	LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
704 
705 	/* ExtINT */
706 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
707 		value = LAPIC_READ(LVT_LINT0);
708 		value &= ~LAPIC_LVT_MASKED;
709 		value |= LAPIC_LVT_DM_EXTINT;
710 		LAPIC_WRITE(LVT_LINT0, value);
711 	}
712 
713 	/* Timer: unmasked, one-shot */
714 	LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
715 
716 	/* Perfmon: unmasked */
717 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
718 
719 	/* Thermal: unmasked */
720 	LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
721 
722 #if CONFIG_MCA
723 	/* CMCI, if available */
724 	if (mca_is_cmci_present()) {
725 		LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
726 	}
727 #endif
728 
729 	if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
730 	    (cpu_number() != master_cpu)) {
731 		lapic_esr_clear();
732 		LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
733 	}
734 }
735 
736 void
lapic_set_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor,lapic_timer_count_t initial_count)737 lapic_set_timer(
738 	boolean_t               interrupt_unmasked,
739 	lapic_timer_mode_t      mode,
740 	lapic_timer_divide_t    divisor,
741 	lapic_timer_count_t     initial_count)
742 {
743 	uint32_t        timer_vector;
744 
745 	mp_disable_preemption();
746 	timer_vector = LAPIC_READ(LVT_TIMER);
747 	timer_vector &= ~(LAPIC_LVT_MASKED | LAPIC_LVT_PERIODIC);
748 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
749 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
750 	LAPIC_WRITE(LVT_TIMER, timer_vector);
751 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
752 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
753 	mp_enable_preemption();
754 }
755 
756 void
lapic_config_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor)757 lapic_config_timer(
758 	boolean_t               interrupt_unmasked,
759 	lapic_timer_mode_t      mode,
760 	lapic_timer_divide_t    divisor)
761 {
762 	uint32_t        timer_vector;
763 
764 	mp_disable_preemption();
765 	timer_vector = LAPIC_READ(LVT_TIMER);
766 	timer_vector &= ~(LAPIC_LVT_MASKED |
767 	    LAPIC_LVT_PERIODIC |
768 	    LAPIC_LVT_TSC_DEADLINE);
769 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
770 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
771 	LAPIC_WRITE(LVT_TIMER, timer_vector);
772 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
773 	mp_enable_preemption();
774 }
775 
776 /*
777  * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
778  */
779 void
lapic_config_tsc_deadline_timer(void)780 lapic_config_tsc_deadline_timer(void)
781 {
782 	uint32_t        timer_vector;
783 
784 	DBG("lapic_config_tsc_deadline_timer()\n");
785 	mp_disable_preemption();
786 	timer_vector = LAPIC_READ(LVT_TIMER);
787 	timer_vector &= ~(LAPIC_LVT_MASKED |
788 	    LAPIC_LVT_PERIODIC);
789 	timer_vector |= LAPIC_LVT_TSC_DEADLINE;
790 	LAPIC_WRITE(LVT_TIMER, timer_vector);
791 
792 	/* Serialize writes per Intel OSWG */
793 	do {
794 		lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
795 	} while (lapic_get_tsc_deadline_timer() == 0);
796 	lapic_set_tsc_deadline_timer(0);
797 
798 	mp_enable_preemption();
799 	DBG("lapic_config_tsc_deadline_timer() done\n");
800 }
801 
802 void
lapic_set_timer_fast(lapic_timer_count_t initial_count)803 lapic_set_timer_fast(
804 	lapic_timer_count_t     initial_count)
805 {
806 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
807 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
808 }
809 
810 void
lapic_set_tsc_deadline_timer(uint64_t deadline)811 lapic_set_tsc_deadline_timer(uint64_t deadline)
812 {
813 	/* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
814 	wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
815 }
816 
817 uint64_t
lapic_get_tsc_deadline_timer(void)818 lapic_get_tsc_deadline_timer(void)
819 {
820 	return rdmsr64(MSR_IA32_TSC_DEADLINE);
821 }
822 
823 void
lapic_get_timer(lapic_timer_mode_t * mode,lapic_timer_divide_t * divisor,lapic_timer_count_t * initial_count,lapic_timer_count_t * current_count)824 lapic_get_timer(
825 	lapic_timer_mode_t      *mode,
826 	lapic_timer_divide_t    *divisor,
827 	lapic_timer_count_t     *initial_count,
828 	lapic_timer_count_t     *current_count)
829 {
830 	mp_disable_preemption();
831 	if (mode) {
832 		*mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
833 		    periodic : one_shot;
834 	}
835 	if (divisor) {
836 		*divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
837 	}
838 	if (initial_count) {
839 		*initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
840 	}
841 	if (current_count) {
842 		*current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
843 	}
844 	mp_enable_preemption();
845 }
846 
847 static inline void
_lapic_end_of_interrupt(void)848 _lapic_end_of_interrupt(void)
849 {
850 	LAPIC_WRITE(EOI, 0);
851 }
852 
853 void
lapic_end_of_interrupt(void)854 lapic_end_of_interrupt(void)
855 {
856 	_lapic_end_of_interrupt();
857 }
858 
859 void
lapic_unmask_perfcnt_interrupt(void)860 lapic_unmask_perfcnt_interrupt(void)
861 {
862 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
863 }
864 
865 void
lapic_set_perfcnt_interrupt_mask(boolean_t mask)866 lapic_set_perfcnt_interrupt_mask(boolean_t mask)
867 {
868 	uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
869 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
870 }
871 
872 void
lapic_set_intr_func(int vector,i386_intr_func_t func)873 lapic_set_intr_func(int vector, i386_intr_func_t func)
874 {
875 	if (vector > lapic_interrupt_base) {
876 		vector -= lapic_interrupt_base;
877 	}
878 
879 	switch (vector) {
880 	case LAPIC_NMI_INTERRUPT:
881 	case LAPIC_INTERPROCESSOR_INTERRUPT:
882 	case LAPIC_TIMER_INTERRUPT:
883 	case LAPIC_THERMAL_INTERRUPT:
884 	case LAPIC_PERFCNT_INTERRUPT:
885 	case LAPIC_CMCI_INTERRUPT:
886 	case LAPIC_PM_INTERRUPT:
887 		lapic_intr_func[vector] = func;
888 		break;
889 	default:
890 		panic("lapic_set_intr_func(%d,%p) invalid vector",
891 		    vector, func);
892 	}
893 }
894 
895 void
lapic_set_pmi_func(i386_intr_func_t func)896 lapic_set_pmi_func(i386_intr_func_t func)
897 {
898 	lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
899 }
900 
901 int
lapic_interrupt(int interrupt_num,x86_saved_state_t * state)902 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
903 {
904 	int     retval = 0;
905 	int     esr = -1;
906 
907 	interrupt_num -= lapic_interrupt_base;
908 	if (interrupt_num < 0) {
909 		if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
910 		    lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
911 			retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
912 			return retval;
913 		} else {
914 			return 0;
915 		}
916 	}
917 
918 	switch (interrupt_num) {
919 	case LAPIC_TIMER_INTERRUPT:
920 	case LAPIC_THERMAL_INTERRUPT:
921 	case LAPIC_INTERPROCESSOR_INTERRUPT:
922 	case LAPIC_PM_INTERRUPT:
923 		if (lapic_intr_func[interrupt_num] != NULL) {
924 			(void) (*lapic_intr_func[interrupt_num])(state);
925 		}
926 		_lapic_end_of_interrupt();
927 		retval = 1;
928 		break;
929 	case LAPIC_PERFCNT_INTERRUPT:
930 		/* If a function has been registered, invoke it.  Otherwise,
931 		 * pass up to IOKit.
932 		 */
933 		if (lapic_intr_func[interrupt_num] != NULL) {
934 			(void) (*lapic_intr_func[interrupt_num])(state);
935 			/* Unmask the interrupt since we don't expect legacy users
936 			 * to be responsible for it.
937 			 */
938 			lapic_unmask_perfcnt_interrupt();
939 			_lapic_end_of_interrupt();
940 			retval = 1;
941 		}
942 		break;
943 	case LAPIC_CMCI_INTERRUPT:
944 		if (lapic_intr_func[interrupt_num] != NULL) {
945 			(void) (*lapic_intr_func[interrupt_num])(state);
946 		}
947 		/* return 0 for plaform expert to handle */
948 		break;
949 	case LAPIC_ERROR_INTERRUPT:
950 		/* We treat error interrupts on APs as fatal.
951 		 * The current interrupt steering scheme directs most
952 		 * external interrupts to the BSP (HPET interrupts being
953 		 * a notable exception); hence, such an error
954 		 * on an AP may signify LVT corruption (with "may" being
955 		 * the operative word). On the BSP, we adopt a more
956 		 * lenient approach, in the interests of enhancing
957 		 * debuggability and reducing fragility.
958 		 * If "lapic_error_count_threshold" error interrupts
959 		 * occur within "lapic_error_time_threshold" absolute
960 		 * time units, we mask the error vector and log. The
961 		 * error interrupts themselves are likely
962 		 * side effects of issues which are beyond the purview of
963 		 * the local APIC interrupt handler, however. The Error
964 		 * Status Register value (the illegal destination
965 		 * vector code is one observed in practice) indicates
966 		 * the immediate cause of the error.
967 		 */
968 		esr = lapic_esr_read();
969 		lapic_dump();
970 
971 		if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
972 		    cpu_number() != master_cpu) {
973 			panic("Local APIC error, ESR: %d", esr);
974 		}
975 
976 		if (cpu_number() == master_cpu) {
977 			uint64_t abstime = mach_absolute_time();
978 			if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
979 				if (lapic_master_error_count++ > lapic_error_count_threshold) {
980 					lapic_errors_masked = TRUE;
981 					LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
982 					printf("Local APIC: errors masked\n");
983 				}
984 			} else {
985 				lapic_last_master_error = abstime;
986 				lapic_master_error_count = 0;
987 			}
988 			printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
989 		}
990 
991 		_lapic_end_of_interrupt();
992 		retval = 1;
993 		break;
994 	case LAPIC_SPURIOUS_INTERRUPT:
995 		kprintf("SPIV\n");
996 		/* No EOI required here */
997 		retval = 1;
998 		break;
999 	case LAPIC_PMC_SW_INTERRUPT:
1000 	{
1001 	}
1002 	break;
1003 	case LAPIC_KICK_INTERRUPT:
1004 		_lapic_end_of_interrupt();
1005 		retval = 1;
1006 		break;
1007 	}
1008 
1009 	return retval;
1010 }
1011 
1012 void
lapic_smm_restore(void)1013 lapic_smm_restore(void)
1014 {
1015 	boolean_t state;
1016 
1017 	if (lapic_os_enabled == FALSE) {
1018 		return;
1019 	}
1020 
1021 	state = ml_set_interrupts_enabled(FALSE);
1022 
1023 	if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
1024 		/*
1025 		 * Bogus SMI handler enables interrupts but does not know about
1026 		 * local APIC interrupt sources. When APIC timer counts down to
1027 		 * zero while in SMM, local APIC will end up waiting for an EOI
1028 		 * but no interrupt was delivered to the OS.
1029 		 */
1030 		_lapic_end_of_interrupt();
1031 
1032 		/*
1033 		 * timer is one-shot, trigger another quick countdown to trigger
1034 		 * another timer interrupt.
1035 		 */
1036 		if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
1037 			LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
1038 		}
1039 
1040 		kprintf("lapic_smm_restore\n");
1041 	}
1042 
1043 	ml_set_interrupts_enabled(state);
1044 }
1045 
1046 void
lapic_send_ipi(int cpu,int vector)1047 lapic_send_ipi(int cpu, int vector)
1048 {
1049 	boolean_t       state;
1050 
1051 	if (vector < lapic_interrupt_base) {
1052 		vector += lapic_interrupt_base;
1053 	}
1054 
1055 	state = ml_set_interrupts_enabled(FALSE);
1056 
1057 	/* X2APIC's ICR doesn't have a pending bit. */
1058 	if (!is_x2apic) {
1059 		/* Wait for pending outgoing send to complete */
1060 		while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1061 			cpu_pause();
1062 		}
1063 	}
1064 
1065 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
1066 
1067 	(void) ml_set_interrupts_enabled(state);
1068 }
1069 
1070 void
lapic_send_nmi(int cpu)1071 lapic_send_nmi(int cpu)
1072 {
1073 	if (!is_x2apic) {
1074 		if (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1075 			uint64_t now = mach_absolute_time();
1076 			/* Wait up to 10ms for the pending outgoing send (if any) to complete */
1077 			while ((LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) &&
1078 			    (mach_absolute_time() - now) < (10 * NSEC_PER_MSEC)) {
1079 				cpu_pause();
1080 			}
1081 		}
1082 #if DEVELOPMENT || DEBUG
1083 		if (__improbable(LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING)) {
1084 			/* Since it's not safe to invoke printf here, kprintf and counting is the best we can do */
1085 			kprintf("WARNING: Wait for lapic ICR pending bit timed-out!\n");
1086 			atomic_incl((volatile long *)&lapic_icr_pending_timeouts, 1);
1087 		}
1088 #endif
1089 	}
1090 
1091 	/* Program the interrupt command register */
1092 	/* The vector is ignored in this case--the target CPU will enter on the
1093 	 * NMI vector.
1094 	 */
1095 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu],
1096 	    LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI);
1097 }
1098 
1099 /*
1100  * The following interfaces are privately exported to AICPM.
1101  */
1102 
1103 boolean_t
lapic_is_interrupt_pending(void)1104 lapic_is_interrupt_pending(void)
1105 {
1106 	int             i;
1107 
1108 	for (i = 0; i < 8; i += 1) {
1109 		if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
1110 		    (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) {
1111 			return TRUE;
1112 		}
1113 	}
1114 
1115 	return FALSE;
1116 }
1117 
1118 boolean_t
lapic_is_interrupting(uint8_t vector)1119 lapic_is_interrupting(uint8_t vector)
1120 {
1121 	int             i;
1122 	int             bit;
1123 	uint32_t        irr;
1124 	uint32_t        isr;
1125 
1126 	i = vector / 32;
1127 	bit = 1 << (vector % 32);
1128 
1129 	irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1130 	isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1131 
1132 	if ((irr | isr) & bit) {
1133 		return TRUE;
1134 	}
1135 
1136 	return FALSE;
1137 }
1138 
1139 void
lapic_interrupt_counts(uint64_t intrs[256])1140 lapic_interrupt_counts(uint64_t intrs[256])
1141 {
1142 	int             i;
1143 	int             j;
1144 	int             bit;
1145 	uint32_t        irr;
1146 	uint32_t        isr;
1147 
1148 	if (intrs == NULL) {
1149 		return;
1150 	}
1151 
1152 	for (i = 0; i < 8; i += 1) {
1153 		irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1154 		isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1155 
1156 		if ((isr | irr) == 0) {
1157 			continue;
1158 		}
1159 
1160 		for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
1161 			bit = (32 * i) + j;
1162 			if ((isr | irr) & (1 << j)) {
1163 				intrs[bit] += 1;
1164 			}
1165 		}
1166 	}
1167 }
1168 
1169 void
lapic_disable_timer(void)1170 lapic_disable_timer(void)
1171 {
1172 	uint32_t        lvt_timer;
1173 
1174 	/*
1175 	 * If we're in deadline timer mode,
1176 	 * simply clear the deadline timer, otherwise
1177 	 * mask the timer interrupt and clear the countdown.
1178 	 */
1179 	lvt_timer = LAPIC_READ(LVT_TIMER);
1180 	if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
1181 		wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
1182 	} else {
1183 		LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
1184 		LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
1185 		lvt_timer = LAPIC_READ(LVT_TIMER);
1186 	}
1187 }
1188 
1189 /* SPI returning the CMCI vector */
1190 uint8_t
lapic_get_cmci_vector(void)1191 lapic_get_cmci_vector(void)
1192 {
1193 	uint8_t cmci_vector = 0;
1194 #if CONFIG_MCA
1195 	/* CMCI, if available */
1196 	if (mca_is_cmci_present()) {
1197 		cmci_vector = LAPIC_VECTOR(CMCI);
1198 	}
1199 #endif
1200 	return cmci_vector;
1201 }
1202 
1203 #if DEVELOPMENT || DEBUG
1204 extern void lapic_trigger_MC(void);
1205 void
lapic_trigger_MC(void)1206 lapic_trigger_MC(void)
1207 {
1208 	/* A 64-bit access to any register will do it. */
1209 	volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID);
1210 	dummy++;
1211 }
1212 #endif
1213