xref: /xnu-10002.41.9/osfmk/i386/lapic_native.c (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41 
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
44 
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #include <i386/bit_routines.h>
56 #if CONFIG_MCA
57 #include <i386/machine_check.h>
58 #endif
59 
60 #include <sys/kdebug.h>
61 
62 #if     MP_DEBUG
63 #define PAUSE           delay(1000000)
64 #define DBG(x...)       kprintf(x)
65 #else
66 #define DBG(x...)
67 #define PAUSE
68 #endif  /* MP_DEBUG */
69 
70 lapic_ops_table_t       *lapic_ops;     /* Lapic operations switch */
71 
72 static vm_map_offset_t  lapic_pbase;    /* Physical base memory-mapped regs */
73 static vm_offset_t      lapic_vbase;    /* Virtual base memory-mapped regs */
74 
75 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
76 
77 /* TRUE if local APIC was enabled by the OS not by the BIOS */
78 static boolean_t lapic_os_enabled = FALSE;
79 
80 static boolean_t lapic_errors_masked = FALSE;
81 static uint64_t lapic_last_master_error = 0;
82 static uint64_t lapic_error_time_threshold = 0;
83 static unsigned lapic_master_error_count = 0;
84 static unsigned lapic_error_count_threshold = 5;
85 static boolean_t lapic_dont_panic = FALSE;
86 int lapic_max_interrupt_cpunum = 0;
87 long lapic_icr_pending_timeouts = 0;
88 
89 typedef enum {
90 	APIC_MODE_UNKNOWN = 0,
91 	APIC_MODE_XAPIC = 1,
92 	APIC_MODE_X2APIC = 2
93 } apic_mode_t;
94 
95 static apic_mode_t apic_mode_before_sleep = APIC_MODE_UNKNOWN;
96 
97 #ifdef MP_DEBUG
98 void
lapic_cpu_map_dump(void)99 lapic_cpu_map_dump(void)
100 {
101 	int     i;
102 
103 	for (i = 0; i < MAX_CPUS; i++) {
104 		if (cpu_to_lapic[i] == -1) {
105 			continue;
106 		}
107 		kprintf("cpu_to_lapic[%d]: %d\n",
108 		    i, cpu_to_lapic[i]);
109 	}
110 	for (i = 0; i < MAX_LAPICIDS; i++) {
111 		if (lapic_to_cpu[i] == -1) {
112 			continue;
113 		}
114 		kprintf("lapic_to_cpu[%d]: %d\n",
115 		    i, lapic_to_cpu[i]);
116 	}
117 }
118 #endif /* MP_DEBUG */
119 
120 static void
map_local_apic(void)121 map_local_apic(void)
122 {
123 	kern_return_t   kr;
124 
125 	if (lapic_vbase == 0) {
126 		kmem_alloc(kernel_map, &lapic_vbase, round_page(LAPIC_SIZE),
127 		    KMA_PERMANENT | KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY,
128 		    VM_KERN_MEMORY_IOKIT);
129 
130 		/*
131 		 * Map in the local APIC non-cacheable, as recommended by Intel
132 		 * in section 8.4.1 of the "System Programming Guide".
133 		 * In fact, this is redundant because EFI will have assigned an
134 		 * MTRR physical range containing the local APIC's MMIO space as
135 		 * UC and this will override the default PAT setting.
136 		 */
137 		kr = pmap_enter(pmap_kernel(),
138 		    lapic_vbase,
139 		    (ppnum_t) i386_btop(lapic_pbase),
140 		    VM_PROT_READ | VM_PROT_WRITE,
141 		    VM_PROT_NONE,
142 		    VM_WIMG_IO,
143 		    TRUE);
144 
145 		assert(kr == KERN_SUCCESS);
146 	}
147 }
148 
149 static void
legacy_init(void)150 legacy_init(void)
151 {
152 	uint32_t        lo, hi;
153 
154 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
155 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) != 0) {
156 		/*
157 		 * If we're already in x2APIC mode, we MUST disable the local APIC
158 		 * before transitioning back to legacy APIC mode.
159 		 */
160 		lo &= ~(MSR_IA32_APIC_BASE_ENABLE | MSR_IA32_APIC_BASE_EXTENDED);
161 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo);
162 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo | MSR_IA32_APIC_BASE_ENABLE);
163 	}
164 	/*
165 	 * Set flat delivery model, logical processor id
166 	 * This should already be the default set.
167 	 */
168 	LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
169 	LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
170 }
171 
172 
173 static uint32_t
legacy_read(lapic_register_t reg)174 legacy_read(lapic_register_t reg)
175 {
176 	return *LAPIC_MMIO(reg);
177 }
178 
179 static void
legacy_write(lapic_register_t reg,uint32_t value)180 legacy_write(lapic_register_t reg, uint32_t value)
181 {
182 	*LAPIC_MMIO(reg) = value;
183 }
184 
185 static uint64_t
legacy_read_icr(void)186 legacy_read_icr(void)
187 {
188 	return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
189 }
190 
191 static void
legacy_write_icr(uint32_t dst,uint32_t cmd)192 legacy_write_icr(uint32_t dst, uint32_t cmd)
193 {
194 	*LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
195 	*LAPIC_MMIO(ICR) = cmd;
196 }
197 
198 static lapic_ops_table_t legacy_ops = {
199 	legacy_init,
200 	legacy_read,
201 	legacy_write,
202 	legacy_read_icr,
203 	legacy_write_icr
204 };
205 
206 boolean_t is_x2apic = FALSE;
207 
208 static void
x2apic_init(void)209 x2apic_init(void)
210 {
211 	uint32_t        lo;
212 	uint32_t        hi;
213 
214 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
215 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
216 		lo |= MSR_IA32_APIC_BASE_EXTENDED;
217 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
218 		kprintf("x2APIC mode enabled\n");
219 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
220 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
221 	}
222 }
223 
224 static uint32_t
x2apic_read(lapic_register_t reg)225 x2apic_read(lapic_register_t reg)
226 {
227 	uint32_t        lo;
228 	uint32_t        hi;
229 
230 	if (LVT_TIMER == reg) {
231 		// avoid frequent APIC access VM-exit
232 		return current_cpu_datap()->cpu_soft_apic_lvt_timer;
233 	}
234 	rdmsr(LAPIC_MSR(reg), lo, hi);
235 	return lo;
236 }
237 
238 static void
x2apic_write(lapic_register_t reg,uint32_t value)239 x2apic_write(lapic_register_t reg, uint32_t value)
240 {
241 	if (LVT_TIMER == reg) {
242 		current_cpu_datap()->cpu_soft_apic_lvt_timer = value;
243 	}
244 	wrmsr(LAPIC_MSR(reg), value, 0);
245 }
246 
247 static uint64_t
x2apic_read_icr(void)248 x2apic_read_icr(void)
249 {
250 	return rdmsr64(LAPIC_MSR(ICR));
251 }
252 
253 static void
x2apic_write_icr(uint32_t dst,uint32_t cmd)254 x2apic_write_icr(uint32_t dst, uint32_t cmd)
255 {
256 	wrmsr(LAPIC_MSR(ICR), cmd, dst);
257 }
258 
259 static lapic_ops_table_t x2apic_ops = {
260 	x2apic_init,
261 	x2apic_read,
262 	x2apic_write,
263 	x2apic_read_icr,
264 	x2apic_write_icr
265 };
266 
267 /*
268  * Used by APs to determine their APIC IDs; assumes master CPU has initialized
269  * the local APIC interfaces.
270  */
271 uint32_t
lapic_safe_apicid(void)272 lapic_safe_apicid(void)
273 {
274 	uint32_t        lo;
275 	uint32_t        hi;
276 	boolean_t       is_lapic_enabled, is_local_x2apic;
277 
278 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
279 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
280 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
281 
282 	if (is_lapic_enabled && is_local_x2apic) {
283 		return x2apic_read(ID);
284 	} else if (is_lapic_enabled) {
285 		return (*LAPIC_MMIO(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK;
286 	} else {
287 		panic("Unknown Local APIC state!");
288 		/*NORETURN*/
289 	}
290 }
291 
292 static void
lapic_reinit(bool for_wake)293 lapic_reinit(bool for_wake)
294 {
295 	uint32_t        lo;
296 	uint32_t        hi;
297 	boolean_t       is_boot_processor;
298 	boolean_t       is_lapic_enabled;
299 	boolean_t       is_local_x2apic;
300 
301 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
302 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
303 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
304 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
305 
306 	/*
307 	 * If we're configured for x2apic mode and we're being asked to transition
308 	 * to legacy APIC mode, OR if we're in legacy APIC mode and we're being
309 	 * asked to transition to x2apic mode, call LAPIC_INIT().
310 	 */
311 	if ((!is_local_x2apic && is_x2apic) || (is_local_x2apic && !is_x2apic)) {
312 		LAPIC_INIT();
313 		/* Now re-read after LAPIC_INIT() */
314 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
315 		is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
316 		is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
317 	}
318 
319 	if ((!is_lapic_enabled && !is_local_x2apic)) {
320 		panic("Unexpected local APIC state");
321 	}
322 
323 	/*
324 	 * If we did not select the same APIC mode as we had before sleep, flag
325 	 * that as an error (and panic on debug/development kernels).  Note that
326 	 * we might get here with for_wake == true for the first boot case.  In
327 	 * that case, apic_mode_before_sleep will be UNKNOWN (since we haven't
328 	 * slept yet), so we do not need to do any APIC checks.
329 	 */
330 	if (for_wake &&
331 	    ((apic_mode_before_sleep == APIC_MODE_XAPIC && !is_lapic_enabled) ||
332 	    (apic_mode_before_sleep == APIC_MODE_X2APIC && !is_local_x2apic))) {
333 		kprintf("Inconsistent APIC state after wake (was %d before sleep, "
334 		    "now is %d)", apic_mode_before_sleep,
335 		    is_lapic_enabled ? APIC_MODE_XAPIC : APIC_MODE_X2APIC);
336 #if DEBUG || DEVELOPMENT
337 		kprintf("HALTING.\n");
338 		/*
339 		 * Unfortunately, we cannot safely panic here because the
340 		 * executing CPU might not be fully initialized.  The best
341 		 * we can do is just print a message to the console and
342 		 * halt.
343 		 */
344 		asm volatile ("cli; hlt;" ::: "memory");
345 #endif
346 	}
347 
348 	if (is_local_x2apic) {
349 		/* ensure the soft copy is up-to-date */
350 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
351 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
352 	}
353 }
354 
355 void
lapic_init_slave(void)356 lapic_init_slave(void)
357 {
358 	lapic_reinit(false);
359 #if DEBUG || DEVELOPMENT
360 	if (rdmsr64(MSR_IA32_APIC_BASE) & MSR_IA32_APIC_BASE_BSP) {
361 		panic("Calling lapic_init_slave() on the boot processor");
362 	}
363 #endif
364 }
365 
366 void
lapic_init(void)367 lapic_init(void)
368 {
369 	uint32_t        lo;
370 	uint32_t        hi;
371 	boolean_t       is_boot_processor;
372 	boolean_t       is_lapic_enabled;
373 
374 	/* Examine the local APIC state */
375 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
376 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
377 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
378 	is_x2apic         = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
379 	lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
380 	kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
381 	    is_lapic_enabled ? "enabled" : "disabled",
382 	    is_x2apic ? "extended" : "legacy",
383 	    is_boot_processor ? "BSP" : "AP");
384 	if (!is_boot_processor || !is_lapic_enabled) {
385 		panic("Unexpected local APIC state");
386 	}
387 
388 	/*
389 	 * If x2APIC is available and not already enabled, enable it.
390 	 * Unless overriden by boot-arg.
391 	 */
392 	if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
393 		/*
394 		 * If no x2apic boot-arg was set and if we're running under a VMM,
395 		 * autoenable x2APIC mode.
396 		 */
397 		if (PE_parse_boot_argn("x2apic", &is_x2apic, sizeof(is_x2apic)) == FALSE &&
398 		    cpuid_vmm_info()->cpuid_vmm_family != CPUID_VMM_FAMILY_NONE) {
399 			is_x2apic = TRUE;
400 		}
401 		kprintf("x2APIC supported %s be enabled\n",
402 		    is_x2apic ? "and will" : "but will not");
403 	}
404 
405 	lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
406 
407 	if (lapic_pbase != 0) {
408 		/*
409 		 * APs might need to consult the local APIC via the MMIO interface
410 		 * to get their APIC IDs.
411 		 */
412 		map_local_apic();
413 	} else if (!is_x2apic) {
414 		panic("Local APIC physical address was not set.");
415 	}
416 
417 	LAPIC_INIT();
418 
419 	kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
420 	if ((LAPIC_READ(VERSION) & LAPIC_VERSION_MASK) < 0x14) {
421 		panic("Local APIC version 0x%x, 0x14 or more expected",
422 		    (LAPIC_READ(VERSION) & LAPIC_VERSION_MASK));
423 	}
424 
425 	/* Set up the lapic_id <-> cpu_number map and add this boot processor */
426 	lapic_cpu_map_init();
427 	lapic_cpu_map(lapic_safe_apicid(), 0);
428 	current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
429 	kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
430 }
431 
432 
433 static int
lapic_esr_read(void)434 lapic_esr_read(void)
435 {
436 	/* write-read register */
437 	LAPIC_WRITE(ERROR_STATUS, 0);
438 	return LAPIC_READ(ERROR_STATUS);
439 }
440 
441 static void
lapic_esr_clear(void)442 lapic_esr_clear(void)
443 {
444 	LAPIC_WRITE(ERROR_STATUS, 0);
445 	LAPIC_WRITE(ERROR_STATUS, 0);
446 }
447 
448 static const char *DM_str[8] = {
449 	"Fixed",
450 	"Lowest Priority",
451 	"Invalid",
452 	"Invalid",
453 	"NMI",
454 	"Reset",
455 	"Invalid",
456 	"ExtINT"
457 };
458 
459 static const char *TMR_str[] = {
460 	"OneShot",
461 	"Periodic",
462 	"TSC-Deadline",
463 	"Illegal"
464 };
465 
466 void
lapic_dump(void)467 lapic_dump(void)
468 {
469 	int     i;
470 
471 #define BOOL(a) ((a)?' ':'!')
472 #define VEC(lvt) \
473 	LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
474 #define DS(lvt) \
475 	(LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
476 #define DM(lvt) \
477 	DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
478 #define MASK(lvt) \
479 	BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
480 #define TM(lvt) \
481 	(LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
482 #define IP(lvt) \
483 	(LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
484 
485 	kprintf("LAPIC %d at %p version 0x%x\n",
486 	    lapic_safe_apicid(),
487 	    (void *) lapic_vbase,
488 	    LAPIC_READ(VERSION) & LAPIC_VERSION_MASK);
489 	kprintf("Priorities: Task 0x%x  Arbitration 0x%x  Processor 0x%x\n",
490 	    LAPIC_READ(TPR) & LAPIC_TPR_MASK,
491 	    LAPIC_READ(APR) & LAPIC_APR_MASK,
492 	    LAPIC_READ(PPR) & LAPIC_PPR_MASK);
493 	kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
494 	    is_x2apic ? 0 : LAPIC_READ(DFR) >> LAPIC_DFR_SHIFT,
495 	    LAPIC_READ(LDR) >> LAPIC_LDR_SHIFT);
496 	kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
497 	    BOOL(LAPIC_READ(SVR) & LAPIC_SVR_ENABLE),
498 	    BOOL(!(LAPIC_READ(SVR) & LAPIC_SVR_FOCUS_OFF)),
499 	    LAPIC_READ(SVR) & LAPIC_SVR_MASK);
500 #if CONFIG_MCA
501 	if (mca_is_cmci_present()) {
502 		kprintf("LVT_CMCI:    Vector 0x%02x [%s] %s %cmasked\n",
503 		    VEC(LVT_CMCI),
504 		    DM(LVT_CMCI),
505 		    DS(LVT_CMCI),
506 		    MASK(LVT_CMCI));
507 	}
508 #endif
509 	kprintf("LVT_TIMER:   Vector 0x%02x %s %cmasked %s\n",
510 	    VEC(LVT_TIMER),
511 	    DS(LVT_TIMER),
512 	    MASK(LVT_TIMER),
513 	    TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
514 	    &  LAPIC_LVT_TMR_MASK]);
515 	kprintf("  Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
516 	kprintf("  Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
517 	kprintf("  Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
518 	kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
519 	    VEC(LVT_PERFCNT),
520 	    DM(LVT_PERFCNT),
521 	    DS(LVT_PERFCNT),
522 	    MASK(LVT_PERFCNT));
523 	kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
524 	    VEC(LVT_THERMAL),
525 	    DM(LVT_THERMAL),
526 	    DS(LVT_THERMAL),
527 	    MASK(LVT_THERMAL));
528 	kprintf("LVT_LINT0:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
529 	    VEC(LVT_LINT0),
530 	    DM(LVT_LINT0),
531 	    TM(LVT_LINT0),
532 	    IP(LVT_LINT0),
533 	    DS(LVT_LINT0),
534 	    MASK(LVT_LINT0));
535 	kprintf("LVT_LINT1:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
536 	    VEC(LVT_LINT1),
537 	    DM(LVT_LINT1),
538 	    TM(LVT_LINT1),
539 	    IP(LVT_LINT1),
540 	    DS(LVT_LINT1),
541 	    MASK(LVT_LINT1));
542 	kprintf("LVT_ERROR:   Vector 0x%02x %s %cmasked\n",
543 	    VEC(LVT_ERROR),
544 	    DS(LVT_ERROR),
545 	    MASK(LVT_ERROR));
546 	kprintf("ESR: %08x \n", lapic_esr_read());
547 	kprintf("       ");
548 	for (i = 0xf; i >= 0; i--) {
549 		kprintf("%x%x%x%x", i, i, i, i);
550 	}
551 	kprintf("\n");
552 	kprintf("TMR: 0x");
553 	for (i = 7; i >= 0; i--) {
554 		kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE, i));
555 	}
556 	kprintf("\n");
557 	kprintf("IRR: 0x");
558 	for (i = 7; i >= 0; i--) {
559 		kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE, i));
560 	}
561 	kprintf("\n");
562 	kprintf("ISR: 0x");
563 	for (i = 7; i >= 0; i--) {
564 		kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE, i));
565 	}
566 	kprintf("\n");
567 }
568 
569 boolean_t
lapic_probe(void)570 lapic_probe(void)
571 {
572 	uint32_t        lo;
573 	uint32_t        hi;
574 
575 	if (cpuid_features() & CPUID_FEATURE_APIC) {
576 		return TRUE;
577 	}
578 
579 	if (cpuid_family() == 6 || cpuid_family() == 15) {
580 		/*
581 		 * Mobile Pentiums:
582 		 * There may be a local APIC which wasn't enabled by BIOS.
583 		 * So we try to enable it explicitly.
584 		 */
585 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
586 		lo &= ~MSR_IA32_APIC_BASE_BASE;
587 		lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
588 		lo |= MSR_IA32_APIC_BASE_ENABLE;
589 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
590 
591 		/*
592 		 * Re-initialize cpu features info and re-check.
593 		 */
594 		cpuid_set_info();
595 		/* We expect this codepath will never be traversed
596 		 * due to EFI enabling the APIC. Reducing the APIC
597 		 * interrupt base dynamically is not supported.
598 		 */
599 		if (cpuid_features() & CPUID_FEATURE_APIC) {
600 			printf("Local APIC discovered and enabled\n");
601 			lapic_os_enabled = TRUE;
602 			lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
603 			return TRUE;
604 		}
605 	}
606 
607 	return FALSE;
608 }
609 
610 void
lapic_shutdown(bool for_sleep)611 lapic_shutdown(bool for_sleep)
612 {
613 	uint32_t lo;
614 	uint32_t hi;
615 	uint32_t value;
616 
617 	if (for_sleep == true) {
618 		apic_mode_before_sleep = (is_x2apic ? APIC_MODE_X2APIC : APIC_MODE_XAPIC);
619 	}
620 
621 	/* Shutdown if local APIC was enabled by OS */
622 	if (lapic_os_enabled == FALSE) {
623 		return;
624 	}
625 
626 	mp_disable_preemption();
627 
628 	/* ExtINT: masked */
629 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
630 		value = LAPIC_READ(LVT_LINT0);
631 		value |= LAPIC_LVT_MASKED;
632 		LAPIC_WRITE(LVT_LINT0, value);
633 	}
634 
635 	/* Error: masked */
636 	LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
637 
638 	/* Timer: masked */
639 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
640 
641 	/* Perfmon: masked */
642 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
643 
644 	/* APIC software disabled */
645 	LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
646 
647 	/* Bypass the APIC completely and update cpu features */
648 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
649 	lo &= ~MSR_IA32_APIC_BASE_ENABLE;
650 	wrmsr(MSR_IA32_APIC_BASE, lo, hi);
651 	cpuid_set_info();
652 
653 	mp_enable_preemption();
654 }
655 
656 boolean_t
cpu_can_exit(int cpu)657 cpu_can_exit(int cpu)
658 {
659 	return cpu > lapic_max_interrupt_cpunum;
660 }
661 
662 void
lapic_configure(bool for_wake)663 lapic_configure(bool for_wake)
664 {
665 	int     value;
666 
667 	if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
668 		nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
669 		if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
670 			lapic_dont_panic = FALSE;
671 		}
672 	}
673 
674 	if (cpu_number() == 0) {
675 		if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum, sizeof(lapic_max_interrupt_cpunum))) {
676 			lapic_max_interrupt_cpunum = ((cpuid_features() & CPUID_FEATURE_HTT) ? 1 : 0);
677 		}
678 	}
679 
680 	/*
681 	 * Reinitialize the APIC (handles the case where we're configured to use the X2APIC
682 	 * but firmware configured the Legacy APIC):
683 	 */
684 	lapic_reinit(for_wake);
685 
686 	/* Accept all */
687 	LAPIC_WRITE(TPR, 0);
688 
689 	LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
690 
691 	/* ExtINT */
692 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
693 		value = LAPIC_READ(LVT_LINT0);
694 		value &= ~LAPIC_LVT_MASKED;
695 		value |= LAPIC_LVT_DM_EXTINT;
696 		LAPIC_WRITE(LVT_LINT0, value);
697 	}
698 
699 	/* Timer: unmasked, one-shot */
700 	LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
701 
702 	/* Perfmon: unmasked */
703 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
704 
705 	/* Thermal: unmasked */
706 	LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
707 
708 #if CONFIG_MCA
709 	/* CMCI, if available */
710 	if (mca_is_cmci_present()) {
711 		LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
712 	}
713 #endif
714 
715 	if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
716 	    (cpu_number() != master_cpu)) {
717 		lapic_esr_clear();
718 		LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
719 	}
720 }
721 
722 void
lapic_set_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor,lapic_timer_count_t initial_count)723 lapic_set_timer(
724 	boolean_t               interrupt_unmasked,
725 	lapic_timer_mode_t      mode,
726 	lapic_timer_divide_t    divisor,
727 	lapic_timer_count_t     initial_count)
728 {
729 	uint32_t        timer_vector;
730 
731 	mp_disable_preemption();
732 	timer_vector = LAPIC_READ(LVT_TIMER);
733 	timer_vector &= ~(LAPIC_LVT_MASKED | LAPIC_LVT_PERIODIC);
734 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
735 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
736 	LAPIC_WRITE(LVT_TIMER, timer_vector);
737 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
738 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
739 	mp_enable_preemption();
740 }
741 
742 void
lapic_config_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor)743 lapic_config_timer(
744 	boolean_t               interrupt_unmasked,
745 	lapic_timer_mode_t      mode,
746 	lapic_timer_divide_t    divisor)
747 {
748 	uint32_t        timer_vector;
749 
750 	mp_disable_preemption();
751 	timer_vector = LAPIC_READ(LVT_TIMER);
752 	timer_vector &= ~(LAPIC_LVT_MASKED |
753 	    LAPIC_LVT_PERIODIC |
754 	    LAPIC_LVT_TSC_DEADLINE);
755 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
756 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
757 	LAPIC_WRITE(LVT_TIMER, timer_vector);
758 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
759 	mp_enable_preemption();
760 }
761 
762 /*
763  * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
764  */
765 void
lapic_config_tsc_deadline_timer(void)766 lapic_config_tsc_deadline_timer(void)
767 {
768 	uint32_t        timer_vector;
769 
770 	DBG("lapic_config_tsc_deadline_timer()\n");
771 	mp_disable_preemption();
772 	timer_vector = LAPIC_READ(LVT_TIMER);
773 	timer_vector &= ~(LAPIC_LVT_MASKED |
774 	    LAPIC_LVT_PERIODIC);
775 	timer_vector |= LAPIC_LVT_TSC_DEADLINE;
776 	LAPIC_WRITE(LVT_TIMER, timer_vector);
777 
778 	/* Serialize writes per Intel OSWG */
779 	do {
780 		lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
781 	} while (lapic_get_tsc_deadline_timer() == 0);
782 	lapic_set_tsc_deadline_timer(0);
783 
784 	mp_enable_preemption();
785 	DBG("lapic_config_tsc_deadline_timer() done\n");
786 }
787 
788 void
lapic_set_timer_fast(lapic_timer_count_t initial_count)789 lapic_set_timer_fast(
790 	lapic_timer_count_t     initial_count)
791 {
792 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
793 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
794 }
795 
796 void
lapic_set_tsc_deadline_timer(uint64_t deadline)797 lapic_set_tsc_deadline_timer(uint64_t deadline)
798 {
799 	/* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
800 	wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
801 }
802 
803 uint64_t
lapic_get_tsc_deadline_timer(void)804 lapic_get_tsc_deadline_timer(void)
805 {
806 	return rdmsr64(MSR_IA32_TSC_DEADLINE);
807 }
808 
809 void
lapic_get_timer(lapic_timer_mode_t * mode,lapic_timer_divide_t * divisor,lapic_timer_count_t * initial_count,lapic_timer_count_t * current_count)810 lapic_get_timer(
811 	lapic_timer_mode_t      *mode,
812 	lapic_timer_divide_t    *divisor,
813 	lapic_timer_count_t     *initial_count,
814 	lapic_timer_count_t     *current_count)
815 {
816 	mp_disable_preemption();
817 	if (mode) {
818 		*mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
819 		    periodic : one_shot;
820 	}
821 	if (divisor) {
822 		*divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
823 	}
824 	if (initial_count) {
825 		*initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
826 	}
827 	if (current_count) {
828 		*current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
829 	}
830 	mp_enable_preemption();
831 }
832 
833 static inline void
_lapic_end_of_interrupt(void)834 _lapic_end_of_interrupt(void)
835 {
836 	LAPIC_WRITE(EOI, 0);
837 }
838 
839 void
lapic_end_of_interrupt(void)840 lapic_end_of_interrupt(void)
841 {
842 	_lapic_end_of_interrupt();
843 }
844 
845 void
lapic_unmask_perfcnt_interrupt(void)846 lapic_unmask_perfcnt_interrupt(void)
847 {
848 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
849 }
850 
851 void
lapic_set_perfcnt_interrupt_mask(boolean_t mask)852 lapic_set_perfcnt_interrupt_mask(boolean_t mask)
853 {
854 	uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
855 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
856 }
857 
858 void
lapic_set_intr_func(int vector,i386_intr_func_t func)859 lapic_set_intr_func(int vector, i386_intr_func_t func)
860 {
861 	if (vector > lapic_interrupt_base) {
862 		vector -= lapic_interrupt_base;
863 	}
864 
865 	switch (vector) {
866 	case LAPIC_NMI_INTERRUPT:
867 	case LAPIC_INTERPROCESSOR_INTERRUPT:
868 	case LAPIC_TIMER_INTERRUPT:
869 	case LAPIC_THERMAL_INTERRUPT:
870 	case LAPIC_PERFCNT_INTERRUPT:
871 	case LAPIC_CMCI_INTERRUPT:
872 	case LAPIC_PM_INTERRUPT:
873 		lapic_intr_func[vector] = func;
874 		break;
875 	default:
876 		panic("lapic_set_intr_func(%d,%p) invalid vector",
877 		    vector, func);
878 	}
879 }
880 
881 void
lapic_set_pmi_func(i386_intr_func_t func)882 lapic_set_pmi_func(i386_intr_func_t func)
883 {
884 	lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
885 }
886 
887 int
lapic_interrupt(int interrupt_num,x86_saved_state_t * state)888 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
889 {
890 	int     retval = 0;
891 	int     esr = -1;
892 
893 	interrupt_num -= lapic_interrupt_base;
894 	if (interrupt_num < 0) {
895 		if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
896 		    lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
897 			retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
898 			return retval;
899 		} else {
900 			return 0;
901 		}
902 	}
903 
904 	switch (interrupt_num) {
905 	case LAPIC_TIMER_INTERRUPT:
906 	case LAPIC_THERMAL_INTERRUPT:
907 	case LAPIC_INTERPROCESSOR_INTERRUPT:
908 	case LAPIC_PM_INTERRUPT:
909 		if (lapic_intr_func[interrupt_num] != NULL) {
910 			(void) (*lapic_intr_func[interrupt_num])(state);
911 		}
912 		_lapic_end_of_interrupt();
913 		retval = 1;
914 		break;
915 	case LAPIC_PERFCNT_INTERRUPT:
916 		/* If a function has been registered, invoke it.  Otherwise,
917 		 * pass up to IOKit.
918 		 */
919 		if (lapic_intr_func[interrupt_num] != NULL) {
920 			(void) (*lapic_intr_func[interrupt_num])(state);
921 			/* Unmask the interrupt since we don't expect legacy users
922 			 * to be responsible for it.
923 			 */
924 			lapic_unmask_perfcnt_interrupt();
925 			_lapic_end_of_interrupt();
926 			retval = 1;
927 		}
928 		break;
929 	case LAPIC_CMCI_INTERRUPT:
930 		if (lapic_intr_func[interrupt_num] != NULL) {
931 			(void) (*lapic_intr_func[interrupt_num])(state);
932 		}
933 		/* return 0 for plaform expert to handle */
934 		break;
935 	case LAPIC_ERROR_INTERRUPT:
936 		/* We treat error interrupts on APs as fatal.
937 		 * The current interrupt steering scheme directs most
938 		 * external interrupts to the BSP (HPET interrupts being
939 		 * a notable exception); hence, such an error
940 		 * on an AP may signify LVT corruption (with "may" being
941 		 * the operative word). On the BSP, we adopt a more
942 		 * lenient approach, in the interests of enhancing
943 		 * debuggability and reducing fragility.
944 		 * If "lapic_error_count_threshold" error interrupts
945 		 * occur within "lapic_error_time_threshold" absolute
946 		 * time units, we mask the error vector and log. The
947 		 * error interrupts themselves are likely
948 		 * side effects of issues which are beyond the purview of
949 		 * the local APIC interrupt handler, however. The Error
950 		 * Status Register value (the illegal destination
951 		 * vector code is one observed in practice) indicates
952 		 * the immediate cause of the error.
953 		 */
954 		esr = lapic_esr_read();
955 		lapic_dump();
956 
957 		if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
958 		    cpu_number() != master_cpu) {
959 			panic("Local APIC error, ESR: %d", esr);
960 		}
961 
962 		if (cpu_number() == master_cpu) {
963 			uint64_t abstime = mach_absolute_time();
964 			if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
965 				if (lapic_master_error_count++ > lapic_error_count_threshold) {
966 					lapic_errors_masked = TRUE;
967 					LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
968 					printf("Local APIC: errors masked\n");
969 				}
970 			} else {
971 				lapic_last_master_error = abstime;
972 				lapic_master_error_count = 0;
973 			}
974 			printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
975 		}
976 
977 		_lapic_end_of_interrupt();
978 		retval = 1;
979 		break;
980 	case LAPIC_SPURIOUS_INTERRUPT:
981 		kprintf("SPIV\n");
982 		/* No EOI required here */
983 		retval = 1;
984 		break;
985 	case LAPIC_PMC_SW_INTERRUPT:
986 	{
987 	}
988 	break;
989 	case LAPIC_KICK_INTERRUPT:
990 		_lapic_end_of_interrupt();
991 		retval = 1;
992 		break;
993 	}
994 
995 	return retval;
996 }
997 
998 void
lapic_smm_restore(void)999 lapic_smm_restore(void)
1000 {
1001 	boolean_t state;
1002 
1003 	if (lapic_os_enabled == FALSE) {
1004 		return;
1005 	}
1006 
1007 	state = ml_set_interrupts_enabled(FALSE);
1008 
1009 	if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
1010 		/*
1011 		 * Bogus SMI handler enables interrupts but does not know about
1012 		 * local APIC interrupt sources. When APIC timer counts down to
1013 		 * zero while in SMM, local APIC will end up waiting for an EOI
1014 		 * but no interrupt was delivered to the OS.
1015 		 */
1016 		_lapic_end_of_interrupt();
1017 
1018 		/*
1019 		 * timer is one-shot, trigger another quick countdown to trigger
1020 		 * another timer interrupt.
1021 		 */
1022 		if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
1023 			LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
1024 		}
1025 
1026 		kprintf("lapic_smm_restore\n");
1027 	}
1028 
1029 	ml_set_interrupts_enabled(state);
1030 }
1031 
1032 void
lapic_send_ipi(int cpu,int vector)1033 lapic_send_ipi(int cpu, int vector)
1034 {
1035 	boolean_t       state;
1036 
1037 	if (vector < lapic_interrupt_base) {
1038 		vector += lapic_interrupt_base;
1039 	}
1040 
1041 	state = ml_set_interrupts_enabled(FALSE);
1042 
1043 	/* X2APIC's ICR doesn't have a pending bit. */
1044 	if (!is_x2apic) {
1045 		/* Wait for pending outgoing send to complete */
1046 		while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1047 			cpu_pause();
1048 		}
1049 	}
1050 
1051 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
1052 
1053 	(void) ml_set_interrupts_enabled(state);
1054 }
1055 
1056 void
lapic_send_nmi(int cpu)1057 lapic_send_nmi(int cpu)
1058 {
1059 	if (!is_x2apic) {
1060 		if (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1061 			uint64_t now = mach_absolute_time();
1062 			/* Wait up to 10ms for the pending outgoing send (if any) to complete */
1063 			while ((LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) &&
1064 			    (mach_absolute_time() - now) < (10 * NSEC_PER_MSEC)) {
1065 				cpu_pause();
1066 			}
1067 		}
1068 #if DEVELOPMENT || DEBUG
1069 		if (__improbable(LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING)) {
1070 			/* Since it's not safe to invoke printf here, kprintf and counting is the best we can do */
1071 			kprintf("WARNING: Wait for lapic ICR pending bit timed-out!\n");
1072 			atomic_incl((volatile long *)&lapic_icr_pending_timeouts, 1);
1073 		}
1074 #endif
1075 	}
1076 
1077 	/* Program the interrupt command register */
1078 	/* The vector is ignored in this case--the target CPU will enter on the
1079 	 * NMI vector.
1080 	 */
1081 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu],
1082 	    LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI);
1083 }
1084 
1085 /*
1086  * The following interfaces are privately exported to AICPM.
1087  */
1088 
1089 boolean_t
lapic_is_interrupt_pending(void)1090 lapic_is_interrupt_pending(void)
1091 {
1092 	int             i;
1093 
1094 	for (i = 0; i < 8; i += 1) {
1095 		if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
1096 		    (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) {
1097 			return TRUE;
1098 		}
1099 	}
1100 
1101 	return FALSE;
1102 }
1103 
1104 boolean_t
lapic_is_interrupting(uint8_t vector)1105 lapic_is_interrupting(uint8_t vector)
1106 {
1107 	int             i;
1108 	int             bit;
1109 	uint32_t        irr;
1110 	uint32_t        isr;
1111 
1112 	i = vector / 32;
1113 	bit = 1 << (vector % 32);
1114 
1115 	irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1116 	isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1117 
1118 	if ((irr | isr) & bit) {
1119 		return TRUE;
1120 	}
1121 
1122 	return FALSE;
1123 }
1124 
1125 void
lapic_interrupt_counts(uint64_t intrs[256])1126 lapic_interrupt_counts(uint64_t intrs[256])
1127 {
1128 	int             i;
1129 	int             j;
1130 	int             bit;
1131 	uint32_t        irr;
1132 	uint32_t        isr;
1133 
1134 	if (intrs == NULL) {
1135 		return;
1136 	}
1137 
1138 	for (i = 0; i < 8; i += 1) {
1139 		irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1140 		isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1141 
1142 		if ((isr | irr) == 0) {
1143 			continue;
1144 		}
1145 
1146 		for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
1147 			bit = (32 * i) + j;
1148 			if ((isr | irr) & (1 << j)) {
1149 				intrs[bit] += 1;
1150 			}
1151 		}
1152 	}
1153 }
1154 
1155 void
lapic_disable_timer(void)1156 lapic_disable_timer(void)
1157 {
1158 	uint32_t        lvt_timer;
1159 
1160 	/*
1161 	 * If we're in deadline timer mode,
1162 	 * simply clear the deadline timer, otherwise
1163 	 * mask the timer interrupt and clear the countdown.
1164 	 */
1165 	lvt_timer = LAPIC_READ(LVT_TIMER);
1166 	if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
1167 		wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
1168 	} else {
1169 		LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
1170 		LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
1171 		lvt_timer = LAPIC_READ(LVT_TIMER);
1172 	}
1173 }
1174 
1175 /* SPI returning the CMCI vector */
1176 uint8_t
lapic_get_cmci_vector(void)1177 lapic_get_cmci_vector(void)
1178 {
1179 	uint8_t cmci_vector = 0;
1180 #if CONFIG_MCA
1181 	/* CMCI, if available */
1182 	if (mca_is_cmci_present()) {
1183 		cmci_vector = LAPIC_VECTOR(CMCI);
1184 	}
1185 #endif
1186 	return cmci_vector;
1187 }
1188 
1189 #if DEVELOPMENT || DEBUG
1190 extern void lapic_trigger_MC(void);
1191 void
lapic_trigger_MC(void)1192 lapic_trigger_MC(void)
1193 {
1194 	/* A 64-bit access to any register will do it. */
1195 	volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID);
1196 	dummy++;
1197 }
1198 #endif
1199