xref: /xnu-11215.41.3/osfmk/i386/lapic_native.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41 
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern_xnu.h>
44 
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #include <i386/bit_routines.h>
56 #if CONFIG_MCA
57 #include <i386/machine_check.h>
58 #endif
59 
60 #include <sys/kdebug.h>
61 
62 #if     MP_DEBUG
63 #define PAUSE           delay(1000000)
64 #define DBG(x...)       kprintf(x)
65 #else
66 #define DBG(x...)
67 #define PAUSE
68 #endif  /* MP_DEBUG */
69 
70 lapic_ops_table_t       *lapic_ops;     /* Lapic operations switch */
71 
72 static vm_map_offset_t  lapic_pbase;    /* Physical base memory-mapped regs */
73 static vm_offset_t      lapic_vbase;    /* Virtual base memory-mapped regs */
74 
75 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
76 
77 /* TRUE if local APIC was enabled by the OS not by the BIOS */
78 static boolean_t lapic_os_enabled = FALSE;
79 
80 static boolean_t lapic_errors_masked = FALSE;
81 static uint64_t lapic_last_master_error = 0;
82 static uint64_t lapic_error_time_threshold = 0;
83 static unsigned lapic_master_error_count = 0;
84 static unsigned lapic_error_count_threshold = 5;
85 static boolean_t lapic_dont_panic = FALSE;
86 int lapic_max_interrupt_cpunum = 0;
87 long lapic_icr_pending_timeouts = 0;
88 
89 typedef enum {
90 	APIC_MODE_UNKNOWN = 0,
91 	APIC_MODE_XAPIC = 1,
92 	APIC_MODE_X2APIC = 2
93 } apic_mode_t;
94 
95 static apic_mode_t apic_mode_before_sleep = APIC_MODE_UNKNOWN;
96 
97 #ifdef MP_DEBUG
98 void
lapic_cpu_map_dump(void)99 lapic_cpu_map_dump(void)
100 {
101 	int     i;
102 
103 	for (i = 0; i < MAX_CPUS; i++) {
104 		if (cpu_to_lapic[i] == -1) {
105 			continue;
106 		}
107 		kprintf("cpu_to_lapic[%d]: %d\n",
108 		    i, cpu_to_lapic[i]);
109 	}
110 	for (i = 0; i < MAX_LAPICIDS; i++) {
111 		if (lapic_to_cpu[i] == -1) {
112 			continue;
113 		}
114 		kprintf("lapic_to_cpu[%d]: %d\n",
115 		    i, lapic_to_cpu[i]);
116 	}
117 }
118 #endif /* MP_DEBUG */
119 
120 static void
map_local_apic(void)121 map_local_apic(void)
122 {
123 	kern_return_t   kr;
124 
125 	if (lapic_vbase == 0) {
126 		kmem_alloc(kernel_map, &lapic_vbase, round_page(LAPIC_SIZE),
127 		    KMA_PERMANENT | KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY,
128 		    VM_KERN_MEMORY_IOKIT);
129 
130 		/*
131 		 * Map in the local APIC non-cacheable, as recommended by Intel
132 		 * in section 8.4.1 of the "System Programming Guide".
133 		 * In fact, this is redundant because EFI will have assigned an
134 		 * MTRR physical range containing the local APIC's MMIO space as
135 		 * UC and this will override the default PAT setting.
136 		 */
137 		kr = pmap_enter(pmap_kernel(),
138 		    lapic_vbase,
139 		    (ppnum_t) i386_btop(lapic_pbase),
140 		    VM_PROT_READ | VM_PROT_WRITE,
141 		    VM_PROT_NONE,
142 		    VM_WIMG_IO,
143 		    TRUE,
144 		    PMAP_MAPPING_TYPE_INFER);
145 
146 		assert(kr == KERN_SUCCESS);
147 	}
148 }
149 
150 static void
legacy_init(void)151 legacy_init(void)
152 {
153 	uint32_t        lo, hi;
154 
155 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
156 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) != 0) {
157 		/*
158 		 * If we're already in x2APIC mode, we MUST disable the local APIC
159 		 * before transitioning back to legacy APIC mode.
160 		 */
161 		lo &= ~(MSR_IA32_APIC_BASE_ENABLE | MSR_IA32_APIC_BASE_EXTENDED);
162 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo);
163 		wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo | MSR_IA32_APIC_BASE_ENABLE);
164 	}
165 	/*
166 	 * Set flat delivery model, logical processor id
167 	 * This should already be the default set.
168 	 */
169 	LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
170 	LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
171 }
172 
173 
174 static uint32_t
legacy_read(lapic_register_t reg)175 legacy_read(lapic_register_t reg)
176 {
177 	return *LAPIC_MMIO(reg);
178 }
179 
180 static void
legacy_write(lapic_register_t reg,uint32_t value)181 legacy_write(lapic_register_t reg, uint32_t value)
182 {
183 	*LAPIC_MMIO(reg) = value;
184 }
185 
186 static uint64_t
legacy_read_icr(void)187 legacy_read_icr(void)
188 {
189 	return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
190 }
191 
192 static void
legacy_write_icr(uint32_t dst,uint32_t cmd)193 legacy_write_icr(uint32_t dst, uint32_t cmd)
194 {
195 	*LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
196 	*LAPIC_MMIO(ICR) = cmd;
197 }
198 
199 static lapic_ops_table_t legacy_ops = {
200 	legacy_init,
201 	legacy_read,
202 	legacy_write,
203 	legacy_read_icr,
204 	legacy_write_icr
205 };
206 
207 boolean_t is_x2apic = FALSE;
208 
209 static void
x2apic_init(void)210 x2apic_init(void)
211 {
212 	uint32_t        lo;
213 	uint32_t        hi;
214 
215 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
216 	if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
217 		lo |= MSR_IA32_APIC_BASE_EXTENDED;
218 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
219 		kprintf("x2APIC mode enabled\n");
220 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
221 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
222 	}
223 }
224 
225 static uint32_t
x2apic_read(lapic_register_t reg)226 x2apic_read(lapic_register_t reg)
227 {
228 	uint32_t        lo;
229 	uint32_t        hi;
230 
231 	if (LVT_TIMER == reg) {
232 		// avoid frequent APIC access VM-exit
233 		return current_cpu_datap()->cpu_soft_apic_lvt_timer;
234 	}
235 	rdmsr(LAPIC_MSR(reg), lo, hi);
236 	return lo;
237 }
238 
239 static void
x2apic_write(lapic_register_t reg,uint32_t value)240 x2apic_write(lapic_register_t reg, uint32_t value)
241 {
242 	if (LVT_TIMER == reg) {
243 		current_cpu_datap()->cpu_soft_apic_lvt_timer = value;
244 	}
245 	wrmsr(LAPIC_MSR(reg), value, 0);
246 }
247 
248 static uint64_t
x2apic_read_icr(void)249 x2apic_read_icr(void)
250 {
251 	return rdmsr64(LAPIC_MSR(ICR));
252 }
253 
254 static void
x2apic_write_icr(uint32_t dst,uint32_t cmd)255 x2apic_write_icr(uint32_t dst, uint32_t cmd)
256 {
257 	wrmsr(LAPIC_MSR(ICR), cmd, dst);
258 }
259 
260 static lapic_ops_table_t x2apic_ops = {
261 	x2apic_init,
262 	x2apic_read,
263 	x2apic_write,
264 	x2apic_read_icr,
265 	x2apic_write_icr
266 };
267 
268 /*
269  * Used by APs to determine their APIC IDs; assumes master CPU has initialized
270  * the local APIC interfaces.
271  */
272 uint32_t
lapic_safe_apicid(void)273 lapic_safe_apicid(void)
274 {
275 	uint32_t        lo;
276 	uint32_t        hi;
277 	boolean_t       is_lapic_enabled, is_local_x2apic;
278 
279 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
280 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
281 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
282 
283 	if (is_lapic_enabled && is_local_x2apic) {
284 		return x2apic_read(ID);
285 	} else if (is_lapic_enabled) {
286 		return (*LAPIC_MMIO(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK;
287 	} else {
288 		panic("Unknown Local APIC state!");
289 		/*NORETURN*/
290 	}
291 }
292 
293 static void
lapic_reinit(bool for_wake)294 lapic_reinit(bool for_wake)
295 {
296 	uint32_t        lo;
297 	uint32_t        hi;
298 	boolean_t       is_boot_processor;
299 	boolean_t       is_lapic_enabled;
300 	boolean_t       is_local_x2apic;
301 
302 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
303 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
304 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
305 	is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
306 
307 	/*
308 	 * If we're configured for x2apic mode and we're being asked to transition
309 	 * to legacy APIC mode, OR if we're in legacy APIC mode and we're being
310 	 * asked to transition to x2apic mode, call LAPIC_INIT().
311 	 */
312 	if ((!is_local_x2apic && is_x2apic) || (is_local_x2apic && !is_x2apic)) {
313 		LAPIC_INIT();
314 		/* Now re-read after LAPIC_INIT() */
315 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
316 		is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
317 		is_local_x2apic   = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
318 	}
319 
320 	if ((!is_lapic_enabled && !is_local_x2apic)) {
321 		panic("Unexpected local APIC state");
322 	}
323 
324 	/*
325 	 * If we did not select the same APIC mode as we had before sleep, flag
326 	 * that as an error (and panic on debug/development kernels).  Note that
327 	 * we might get here with for_wake == true for the first boot case.  In
328 	 * that case, apic_mode_before_sleep will be UNKNOWN (since we haven't
329 	 * slept yet), so we do not need to do any APIC checks.
330 	 */
331 	if (for_wake &&
332 	    ((apic_mode_before_sleep == APIC_MODE_XAPIC && !is_lapic_enabled) ||
333 	    (apic_mode_before_sleep == APIC_MODE_X2APIC && !is_local_x2apic))) {
334 		kprintf("Inconsistent APIC state after wake (was %d before sleep, "
335 		    "now is %d)", apic_mode_before_sleep,
336 		    is_lapic_enabled ? APIC_MODE_XAPIC : APIC_MODE_X2APIC);
337 #if DEBUG || DEVELOPMENT
338 		kprintf("HALTING.\n");
339 		/*
340 		 * Unfortunately, we cannot safely panic here because the
341 		 * executing CPU might not be fully initialized.  The best
342 		 * we can do is just print a message to the console and
343 		 * halt.
344 		 */
345 		asm volatile ("cli; hlt;" ::: "memory");
346 #endif
347 	}
348 
349 	if (is_local_x2apic) {
350 		/* ensure the soft copy is up-to-date */
351 		rdmsr(LAPIC_MSR(LVT_TIMER), lo, hi);
352 		current_cpu_datap()->cpu_soft_apic_lvt_timer = lo;
353 	}
354 }
355 
356 void
lapic_init_slave(void)357 lapic_init_slave(void)
358 {
359 	lapic_reinit(false);
360 #if DEBUG || DEVELOPMENT
361 	if (rdmsr64(MSR_IA32_APIC_BASE) & MSR_IA32_APIC_BASE_BSP) {
362 		panic("Calling lapic_init_slave() on the boot processor");
363 	}
364 #endif
365 }
366 
367 void
lapic_init(void)368 lapic_init(void)
369 {
370 	uint32_t        lo;
371 	uint32_t        hi;
372 	boolean_t       is_boot_processor;
373 	boolean_t       is_lapic_enabled;
374 
375 	/* Examine the local APIC state */
376 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
377 	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
378 	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
379 	is_x2apic         = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
380 	lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
381 	kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
382 	    is_lapic_enabled ? "enabled" : "disabled",
383 	    is_x2apic ? "extended" : "legacy",
384 	    is_boot_processor ? "BSP" : "AP");
385 	if (!is_boot_processor || !is_lapic_enabled) {
386 		panic("Unexpected local APIC state");
387 	}
388 
389 	/*
390 	 * If x2APIC is available and not already enabled, enable it.
391 	 * Unless overriden by boot-arg.
392 	 */
393 	if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
394 		/*
395 		 * If no x2apic boot-arg was set and if we're running under a VMM,
396 		 * autoenable x2APIC mode.
397 		 */
398 		if (PE_parse_boot_argn("x2apic", &is_x2apic, sizeof(is_x2apic)) == FALSE &&
399 		    cpuid_vmm_info()->cpuid_vmm_family != CPUID_VMM_FAMILY_NONE) {
400 			is_x2apic = TRUE;
401 		}
402 		kprintf("x2APIC supported %s be enabled\n",
403 		    is_x2apic ? "and will" : "but will not");
404 	}
405 
406 	lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
407 
408 	if (lapic_pbase != 0) {
409 		/*
410 		 * APs might need to consult the local APIC via the MMIO interface
411 		 * to get their APIC IDs.
412 		 */
413 		map_local_apic();
414 	} else if (!is_x2apic) {
415 		panic("Local APIC physical address was not set.");
416 	}
417 
418 	LAPIC_INIT();
419 
420 	kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
421 	if ((LAPIC_READ(VERSION) & LAPIC_VERSION_MASK) < 0x14) {
422 		panic("Local APIC version 0x%x, 0x14 or more expected",
423 		    (LAPIC_READ(VERSION) & LAPIC_VERSION_MASK));
424 	}
425 
426 	/* Set up the lapic_id <-> cpu_number map and add this boot processor */
427 	lapic_cpu_map_init();
428 	lapic_cpu_map(lapic_safe_apicid(), 0);
429 	current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
430 	kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
431 }
432 
433 
434 static int
lapic_esr_read(void)435 lapic_esr_read(void)
436 {
437 	/* write-read register */
438 	LAPIC_WRITE(ERROR_STATUS, 0);
439 	return LAPIC_READ(ERROR_STATUS);
440 }
441 
442 static void
lapic_esr_clear(void)443 lapic_esr_clear(void)
444 {
445 	LAPIC_WRITE(ERROR_STATUS, 0);
446 	LAPIC_WRITE(ERROR_STATUS, 0);
447 }
448 
449 static const char *DM_str[8] = {
450 	"Fixed",
451 	"Lowest Priority",
452 	"Invalid",
453 	"Invalid",
454 	"NMI",
455 	"Reset",
456 	"Invalid",
457 	"ExtINT"
458 };
459 
460 static const char *TMR_str[] = {
461 	"OneShot",
462 	"Periodic",
463 	"TSC-Deadline",
464 	"Illegal"
465 };
466 
467 void
lapic_dump(void)468 lapic_dump(void)
469 {
470 	int     i;
471 
472 #define BOOL(a) ((a)?' ':'!')
473 #define VEC(lvt) \
474 	LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
475 #define DS(lvt) \
476 	(LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
477 #define DM(lvt) \
478 	DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
479 #define MASK(lvt) \
480 	BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
481 #define TM(lvt) \
482 	(LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
483 #define IP(lvt) \
484 	(LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
485 
486 	kprintf("LAPIC %d at %p version 0x%x\n",
487 	    lapic_safe_apicid(),
488 	    (void *) lapic_vbase,
489 	    LAPIC_READ(VERSION) & LAPIC_VERSION_MASK);
490 	kprintf("Priorities: Task 0x%x  Arbitration 0x%x  Processor 0x%x\n",
491 	    LAPIC_READ(TPR) & LAPIC_TPR_MASK,
492 	    LAPIC_READ(APR) & LAPIC_APR_MASK,
493 	    LAPIC_READ(PPR) & LAPIC_PPR_MASK);
494 	kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
495 	    is_x2apic ? 0 : LAPIC_READ(DFR) >> LAPIC_DFR_SHIFT,
496 	    LAPIC_READ(LDR) >> LAPIC_LDR_SHIFT);
497 	kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
498 	    BOOL(LAPIC_READ(SVR) & LAPIC_SVR_ENABLE),
499 	    BOOL(!(LAPIC_READ(SVR) & LAPIC_SVR_FOCUS_OFF)),
500 	    LAPIC_READ(SVR) & LAPIC_SVR_MASK);
501 #if CONFIG_MCA
502 	if (mca_is_cmci_present()) {
503 		kprintf("LVT_CMCI:    Vector 0x%02x [%s] %s %cmasked\n",
504 		    VEC(LVT_CMCI),
505 		    DM(LVT_CMCI),
506 		    DS(LVT_CMCI),
507 		    MASK(LVT_CMCI));
508 	}
509 #endif
510 	kprintf("LVT_TIMER:   Vector 0x%02x %s %cmasked %s\n",
511 	    VEC(LVT_TIMER),
512 	    DS(LVT_TIMER),
513 	    MASK(LVT_TIMER),
514 	    TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
515 	    &  LAPIC_LVT_TMR_MASK]);
516 	kprintf("  Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
517 	kprintf("  Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
518 	kprintf("  Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
519 	kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
520 	    VEC(LVT_PERFCNT),
521 	    DM(LVT_PERFCNT),
522 	    DS(LVT_PERFCNT),
523 	    MASK(LVT_PERFCNT));
524 	kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
525 	    VEC(LVT_THERMAL),
526 	    DM(LVT_THERMAL),
527 	    DS(LVT_THERMAL),
528 	    MASK(LVT_THERMAL));
529 	kprintf("LVT_LINT0:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
530 	    VEC(LVT_LINT0),
531 	    DM(LVT_LINT0),
532 	    TM(LVT_LINT0),
533 	    IP(LVT_LINT0),
534 	    DS(LVT_LINT0),
535 	    MASK(LVT_LINT0));
536 	kprintf("LVT_LINT1:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
537 	    VEC(LVT_LINT1),
538 	    DM(LVT_LINT1),
539 	    TM(LVT_LINT1),
540 	    IP(LVT_LINT1),
541 	    DS(LVT_LINT1),
542 	    MASK(LVT_LINT1));
543 	kprintf("LVT_ERROR:   Vector 0x%02x %s %cmasked\n",
544 	    VEC(LVT_ERROR),
545 	    DS(LVT_ERROR),
546 	    MASK(LVT_ERROR));
547 	kprintf("ESR: %08x \n", lapic_esr_read());
548 	kprintf("       ");
549 	for (i = 0xf; i >= 0; i--) {
550 		kprintf("%x%x%x%x", i, i, i, i);
551 	}
552 	kprintf("\n");
553 	kprintf("TMR: 0x");
554 	for (i = 7; i >= 0; i--) {
555 		kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE, i));
556 	}
557 	kprintf("\n");
558 	kprintf("IRR: 0x");
559 	for (i = 7; i >= 0; i--) {
560 		kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE, i));
561 	}
562 	kprintf("\n");
563 	kprintf("ISR: 0x");
564 	for (i = 7; i >= 0; i--) {
565 		kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE, i));
566 	}
567 	kprintf("\n");
568 }
569 
570 boolean_t
lapic_probe(void)571 lapic_probe(void)
572 {
573 	uint32_t        lo;
574 	uint32_t        hi;
575 
576 	if (cpuid_features() & CPUID_FEATURE_APIC) {
577 		return TRUE;
578 	}
579 
580 	if (cpuid_family() == 6 || cpuid_family() == 15) {
581 		/*
582 		 * Mobile Pentiums:
583 		 * There may be a local APIC which wasn't enabled by BIOS.
584 		 * So we try to enable it explicitly.
585 		 */
586 		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
587 		lo &= ~MSR_IA32_APIC_BASE_BASE;
588 		lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
589 		lo |= MSR_IA32_APIC_BASE_ENABLE;
590 		wrmsr(MSR_IA32_APIC_BASE, lo, hi);
591 
592 		/*
593 		 * Re-initialize cpu features info and re-check.
594 		 */
595 		cpuid_set_info();
596 		/* We expect this codepath will never be traversed
597 		 * due to EFI enabling the APIC. Reducing the APIC
598 		 * interrupt base dynamically is not supported.
599 		 */
600 		if (cpuid_features() & CPUID_FEATURE_APIC) {
601 			printf("Local APIC discovered and enabled\n");
602 			lapic_os_enabled = TRUE;
603 			lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
604 			return TRUE;
605 		}
606 	}
607 
608 	return FALSE;
609 }
610 
611 void
lapic_shutdown(bool for_sleep)612 lapic_shutdown(bool for_sleep)
613 {
614 	uint32_t lo;
615 	uint32_t hi;
616 	uint32_t value;
617 
618 	if (for_sleep == true) {
619 		apic_mode_before_sleep = (is_x2apic ? APIC_MODE_X2APIC : APIC_MODE_XAPIC);
620 	}
621 
622 	/* Shutdown if local APIC was enabled by OS */
623 	if (lapic_os_enabled == FALSE) {
624 		return;
625 	}
626 
627 	mp_disable_preemption();
628 
629 	/* ExtINT: masked */
630 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
631 		value = LAPIC_READ(LVT_LINT0);
632 		value |= LAPIC_LVT_MASKED;
633 		LAPIC_WRITE(LVT_LINT0, value);
634 	}
635 
636 	/* Error: masked */
637 	LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
638 
639 	/* Timer: masked */
640 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
641 
642 	/* Perfmon: masked */
643 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
644 
645 	/* APIC software disabled */
646 	LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
647 
648 	/* Bypass the APIC completely and update cpu features */
649 	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
650 	lo &= ~MSR_IA32_APIC_BASE_ENABLE;
651 	wrmsr(MSR_IA32_APIC_BASE, lo, hi);
652 	cpuid_set_info();
653 
654 	mp_enable_preemption();
655 }
656 
657 bool
ml_cpu_can_exit(int cpu_id)658 ml_cpu_can_exit(int cpu_id)
659 {
660 	return cpu_id > lapic_max_interrupt_cpunum;
661 }
662 
663 void
lapic_configure(bool for_wake)664 lapic_configure(bool for_wake)
665 {
666 	int     value;
667 
668 	if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
669 		nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
670 		if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
671 			lapic_dont_panic = FALSE;
672 		}
673 	}
674 
675 	if (cpu_number() == 0) {
676 		if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum, sizeof(lapic_max_interrupt_cpunum))) {
677 			lapic_max_interrupt_cpunum = ((cpuid_features() & CPUID_FEATURE_HTT) ? 1 : 0);
678 		}
679 	}
680 
681 	/*
682 	 * Reinitialize the APIC (handles the case where we're configured to use the X2APIC
683 	 * but firmware configured the Legacy APIC):
684 	 */
685 	lapic_reinit(for_wake);
686 
687 	/* Accept all */
688 	LAPIC_WRITE(TPR, 0);
689 
690 	LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
691 
692 	/* ExtINT */
693 	if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
694 		value = LAPIC_READ(LVT_LINT0);
695 		value &= ~LAPIC_LVT_MASKED;
696 		value |= LAPIC_LVT_DM_EXTINT;
697 		LAPIC_WRITE(LVT_LINT0, value);
698 	}
699 
700 	/* Timer: unmasked, one-shot */
701 	LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
702 
703 	/* Perfmon: unmasked */
704 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
705 
706 	/* Thermal: unmasked */
707 	LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
708 
709 #if CONFIG_MCA
710 	/* CMCI, if available */
711 	if (mca_is_cmci_present()) {
712 		LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
713 	}
714 #endif
715 
716 	if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
717 	    (cpu_number() != master_cpu)) {
718 		lapic_esr_clear();
719 		LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
720 	}
721 }
722 
723 void
lapic_set_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor,lapic_timer_count_t initial_count)724 lapic_set_timer(
725 	boolean_t               interrupt_unmasked,
726 	lapic_timer_mode_t      mode,
727 	lapic_timer_divide_t    divisor,
728 	lapic_timer_count_t     initial_count)
729 {
730 	uint32_t        timer_vector;
731 
732 	mp_disable_preemption();
733 	timer_vector = LAPIC_READ(LVT_TIMER);
734 	timer_vector &= ~(LAPIC_LVT_MASKED | LAPIC_LVT_PERIODIC);
735 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
736 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
737 	LAPIC_WRITE(LVT_TIMER, timer_vector);
738 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
739 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
740 	mp_enable_preemption();
741 }
742 
743 void
lapic_config_timer(boolean_t interrupt_unmasked,lapic_timer_mode_t mode,lapic_timer_divide_t divisor)744 lapic_config_timer(
745 	boolean_t               interrupt_unmasked,
746 	lapic_timer_mode_t      mode,
747 	lapic_timer_divide_t    divisor)
748 {
749 	uint32_t        timer_vector;
750 
751 	mp_disable_preemption();
752 	timer_vector = LAPIC_READ(LVT_TIMER);
753 	timer_vector &= ~(LAPIC_LVT_MASKED |
754 	    LAPIC_LVT_PERIODIC |
755 	    LAPIC_LVT_TSC_DEADLINE);
756 	timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
757 	timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
758 	LAPIC_WRITE(LVT_TIMER, timer_vector);
759 	LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
760 	mp_enable_preemption();
761 }
762 
763 /*
764  * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
765  */
766 void
lapic_config_tsc_deadline_timer(void)767 lapic_config_tsc_deadline_timer(void)
768 {
769 	uint32_t        timer_vector;
770 
771 	DBG("lapic_config_tsc_deadline_timer()\n");
772 	mp_disable_preemption();
773 	timer_vector = LAPIC_READ(LVT_TIMER);
774 	timer_vector &= ~(LAPIC_LVT_MASKED |
775 	    LAPIC_LVT_PERIODIC);
776 	timer_vector |= LAPIC_LVT_TSC_DEADLINE;
777 	LAPIC_WRITE(LVT_TIMER, timer_vector);
778 
779 	/* Serialize writes per Intel OSWG */
780 	do {
781 		lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
782 	} while (lapic_get_tsc_deadline_timer() == 0);
783 	lapic_set_tsc_deadline_timer(0);
784 
785 	mp_enable_preemption();
786 	DBG("lapic_config_tsc_deadline_timer() done\n");
787 }
788 
789 void
lapic_set_timer_fast(lapic_timer_count_t initial_count)790 lapic_set_timer_fast(
791 	lapic_timer_count_t     initial_count)
792 {
793 	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
794 	LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
795 }
796 
797 void
lapic_set_tsc_deadline_timer(uint64_t deadline)798 lapic_set_tsc_deadline_timer(uint64_t deadline)
799 {
800 	/* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
801 	wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
802 }
803 
804 uint64_t
lapic_get_tsc_deadline_timer(void)805 lapic_get_tsc_deadline_timer(void)
806 {
807 	return rdmsr64(MSR_IA32_TSC_DEADLINE);
808 }
809 
810 void
lapic_get_timer(lapic_timer_mode_t * mode,lapic_timer_divide_t * divisor,lapic_timer_count_t * initial_count,lapic_timer_count_t * current_count)811 lapic_get_timer(
812 	lapic_timer_mode_t      *mode,
813 	lapic_timer_divide_t    *divisor,
814 	lapic_timer_count_t     *initial_count,
815 	lapic_timer_count_t     *current_count)
816 {
817 	mp_disable_preemption();
818 	if (mode) {
819 		*mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
820 		    periodic : one_shot;
821 	}
822 	if (divisor) {
823 		*divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
824 	}
825 	if (initial_count) {
826 		*initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
827 	}
828 	if (current_count) {
829 		*current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
830 	}
831 	mp_enable_preemption();
832 }
833 
834 static inline void
_lapic_end_of_interrupt(void)835 _lapic_end_of_interrupt(void)
836 {
837 	LAPIC_WRITE(EOI, 0);
838 }
839 
840 void
lapic_end_of_interrupt(void)841 lapic_end_of_interrupt(void)
842 {
843 	_lapic_end_of_interrupt();
844 }
845 
846 void
lapic_unmask_perfcnt_interrupt(void)847 lapic_unmask_perfcnt_interrupt(void)
848 {
849 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
850 }
851 
852 void
lapic_set_perfcnt_interrupt_mask(boolean_t mask)853 lapic_set_perfcnt_interrupt_mask(boolean_t mask)
854 {
855 	uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
856 	LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
857 }
858 
859 void
lapic_set_intr_func(int vector,i386_intr_func_t func)860 lapic_set_intr_func(int vector, i386_intr_func_t func)
861 {
862 	if (vector > lapic_interrupt_base) {
863 		vector -= lapic_interrupt_base;
864 	}
865 
866 	switch (vector) {
867 	case LAPIC_NMI_INTERRUPT:
868 	case LAPIC_INTERPROCESSOR_INTERRUPT:
869 	case LAPIC_TIMER_INTERRUPT:
870 	case LAPIC_THERMAL_INTERRUPT:
871 	case LAPIC_PERFCNT_INTERRUPT:
872 	case LAPIC_CMCI_INTERRUPT:
873 	case LAPIC_PM_INTERRUPT:
874 		lapic_intr_func[vector] = func;
875 		break;
876 	default:
877 		panic("lapic_set_intr_func(%d,%p) invalid vector",
878 		    vector, func);
879 	}
880 }
881 
882 void
lapic_set_pmi_func(i386_intr_func_t func)883 lapic_set_pmi_func(i386_intr_func_t func)
884 {
885 	lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
886 }
887 
888 int
lapic_interrupt(int interrupt_num,x86_saved_state_t * state)889 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
890 {
891 	int     retval = 0;
892 	int     esr = -1;
893 
894 	interrupt_num -= lapic_interrupt_base;
895 	if (interrupt_num < 0) {
896 		if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
897 		    lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
898 			retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
899 			return retval;
900 		} else {
901 			return 0;
902 		}
903 	}
904 
905 	switch (interrupt_num) {
906 	case LAPIC_TIMER_INTERRUPT:
907 	case LAPIC_THERMAL_INTERRUPT:
908 	case LAPIC_INTERPROCESSOR_INTERRUPT:
909 	case LAPIC_PM_INTERRUPT:
910 		if (lapic_intr_func[interrupt_num] != NULL) {
911 			(void) (*lapic_intr_func[interrupt_num])(state);
912 		}
913 		_lapic_end_of_interrupt();
914 		retval = 1;
915 		break;
916 	case LAPIC_PERFCNT_INTERRUPT:
917 		/* If a function has been registered, invoke it.  Otherwise,
918 		 * pass up to IOKit.
919 		 */
920 		if (lapic_intr_func[interrupt_num] != NULL) {
921 			(void) (*lapic_intr_func[interrupt_num])(state);
922 			/* Unmask the interrupt since we don't expect legacy users
923 			 * to be responsible for it.
924 			 */
925 			lapic_unmask_perfcnt_interrupt();
926 			_lapic_end_of_interrupt();
927 			retval = 1;
928 		}
929 		break;
930 	case LAPIC_CMCI_INTERRUPT:
931 		if (lapic_intr_func[interrupt_num] != NULL) {
932 			(void) (*lapic_intr_func[interrupt_num])(state);
933 		}
934 		/* return 0 for plaform expert to handle */
935 		break;
936 	case LAPIC_ERROR_INTERRUPT:
937 		/* We treat error interrupts on APs as fatal.
938 		 * The current interrupt steering scheme directs most
939 		 * external interrupts to the BSP (HPET interrupts being
940 		 * a notable exception); hence, such an error
941 		 * on an AP may signify LVT corruption (with "may" being
942 		 * the operative word). On the BSP, we adopt a more
943 		 * lenient approach, in the interests of enhancing
944 		 * debuggability and reducing fragility.
945 		 * If "lapic_error_count_threshold" error interrupts
946 		 * occur within "lapic_error_time_threshold" absolute
947 		 * time units, we mask the error vector and log. The
948 		 * error interrupts themselves are likely
949 		 * side effects of issues which are beyond the purview of
950 		 * the local APIC interrupt handler, however. The Error
951 		 * Status Register value (the illegal destination
952 		 * vector code is one observed in practice) indicates
953 		 * the immediate cause of the error.
954 		 */
955 		esr = lapic_esr_read();
956 		lapic_dump();
957 
958 		if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
959 		    cpu_number() != master_cpu) {
960 			panic("Local APIC error, ESR: %d", esr);
961 		}
962 
963 		if (cpu_number() == master_cpu) {
964 			uint64_t abstime = mach_absolute_time();
965 			if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
966 				if (lapic_master_error_count++ > lapic_error_count_threshold) {
967 					lapic_errors_masked = TRUE;
968 					LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
969 					printf("Local APIC: errors masked\n");
970 				}
971 			} else {
972 				lapic_last_master_error = abstime;
973 				lapic_master_error_count = 0;
974 			}
975 			printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
976 		}
977 
978 		_lapic_end_of_interrupt();
979 		retval = 1;
980 		break;
981 	case LAPIC_SPURIOUS_INTERRUPT:
982 		kprintf("SPIV\n");
983 		/* No EOI required here */
984 		retval = 1;
985 		break;
986 	case LAPIC_PMC_SW_INTERRUPT:
987 	{
988 	}
989 	break;
990 	case LAPIC_KICK_INTERRUPT:
991 		_lapic_end_of_interrupt();
992 		retval = 1;
993 		break;
994 	}
995 
996 	return retval;
997 }
998 
999 void
lapic_smm_restore(void)1000 lapic_smm_restore(void)
1001 {
1002 	boolean_t state;
1003 
1004 	if (lapic_os_enabled == FALSE) {
1005 		return;
1006 	}
1007 
1008 	state = ml_set_interrupts_enabled(FALSE);
1009 
1010 	if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
1011 		/*
1012 		 * Bogus SMI handler enables interrupts but does not know about
1013 		 * local APIC interrupt sources. When APIC timer counts down to
1014 		 * zero while in SMM, local APIC will end up waiting for an EOI
1015 		 * but no interrupt was delivered to the OS.
1016 		 */
1017 		_lapic_end_of_interrupt();
1018 
1019 		/*
1020 		 * timer is one-shot, trigger another quick countdown to trigger
1021 		 * another timer interrupt.
1022 		 */
1023 		if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
1024 			LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
1025 		}
1026 
1027 		kprintf("lapic_smm_restore\n");
1028 	}
1029 
1030 	ml_set_interrupts_enabled(state);
1031 }
1032 
1033 void
lapic_send_ipi(int cpu,int vector)1034 lapic_send_ipi(int cpu, int vector)
1035 {
1036 	boolean_t       state;
1037 
1038 	if (vector < lapic_interrupt_base) {
1039 		vector += lapic_interrupt_base;
1040 	}
1041 
1042 	state = ml_set_interrupts_enabled(FALSE);
1043 
1044 	/* X2APIC's ICR doesn't have a pending bit. */
1045 	if (!is_x2apic) {
1046 		/* Wait for pending outgoing send to complete */
1047 		while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1048 			cpu_pause();
1049 		}
1050 	}
1051 
1052 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
1053 
1054 	(void) ml_set_interrupts_enabled(state);
1055 }
1056 
1057 void
lapic_send_nmi(int cpu)1058 lapic_send_nmi(int cpu)
1059 {
1060 	if (!is_x2apic) {
1061 		if (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1062 			uint64_t now = mach_absolute_time();
1063 			/* Wait up to 10ms for the pending outgoing send (if any) to complete */
1064 			while ((LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) &&
1065 			    (mach_absolute_time() - now) < (10 * NSEC_PER_MSEC)) {
1066 				cpu_pause();
1067 			}
1068 		}
1069 #if DEVELOPMENT || DEBUG
1070 		if (__improbable(LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING)) {
1071 			/* Since it's not safe to invoke printf here, kprintf and counting is the best we can do */
1072 			kprintf("WARNING: Wait for lapic ICR pending bit timed-out!\n");
1073 			atomic_incl((volatile long *)&lapic_icr_pending_timeouts, 1);
1074 		}
1075 #endif
1076 	}
1077 
1078 	/* Program the interrupt command register */
1079 	/* The vector is ignored in this case--the target CPU will enter on the
1080 	 * NMI vector.
1081 	 */
1082 	LAPIC_WRITE_ICR(cpu_to_lapic[cpu],
1083 	    LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI);
1084 }
1085 
1086 /*
1087  * The following interfaces are privately exported to AICPM.
1088  */
1089 
1090 boolean_t
lapic_is_interrupt_pending(void)1091 lapic_is_interrupt_pending(void)
1092 {
1093 	int             i;
1094 
1095 	for (i = 0; i < 8; i += 1) {
1096 		if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
1097 		    (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) {
1098 			return TRUE;
1099 		}
1100 	}
1101 
1102 	return FALSE;
1103 }
1104 
1105 boolean_t
lapic_is_interrupting(uint8_t vector)1106 lapic_is_interrupting(uint8_t vector)
1107 {
1108 	int             i;
1109 	int             bit;
1110 	uint32_t        irr;
1111 	uint32_t        isr;
1112 
1113 	i = vector / 32;
1114 	bit = 1 << (vector % 32);
1115 
1116 	irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1117 	isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1118 
1119 	if ((irr | isr) & bit) {
1120 		return TRUE;
1121 	}
1122 
1123 	return FALSE;
1124 }
1125 
1126 void
lapic_interrupt_counts(uint64_t intrs[256])1127 lapic_interrupt_counts(uint64_t intrs[256])
1128 {
1129 	int             i;
1130 	int             j;
1131 	int             bit;
1132 	uint32_t        irr;
1133 	uint32_t        isr;
1134 
1135 	if (intrs == NULL) {
1136 		return;
1137 	}
1138 
1139 	for (i = 0; i < 8; i += 1) {
1140 		irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1141 		isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1142 
1143 		if ((isr | irr) == 0) {
1144 			continue;
1145 		}
1146 
1147 		for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
1148 			bit = (32 * i) + j;
1149 			if ((isr | irr) & (1 << j)) {
1150 				intrs[bit] += 1;
1151 			}
1152 		}
1153 	}
1154 }
1155 
1156 void
lapic_disable_timer(void)1157 lapic_disable_timer(void)
1158 {
1159 	uint32_t        lvt_timer;
1160 
1161 	/*
1162 	 * If we're in deadline timer mode,
1163 	 * simply clear the deadline timer, otherwise
1164 	 * mask the timer interrupt and clear the countdown.
1165 	 */
1166 	lvt_timer = LAPIC_READ(LVT_TIMER);
1167 	if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
1168 		wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
1169 	} else {
1170 		LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
1171 		LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
1172 		lvt_timer = LAPIC_READ(LVT_TIMER);
1173 	}
1174 }
1175 
1176 /* SPI returning the CMCI vector */
1177 uint8_t
lapic_get_cmci_vector(void)1178 lapic_get_cmci_vector(void)
1179 {
1180 	uint8_t cmci_vector = 0;
1181 #if CONFIG_MCA
1182 	/* CMCI, if available */
1183 	if (mca_is_cmci_present()) {
1184 		cmci_vector = LAPIC_VECTOR(CMCI);
1185 	}
1186 #endif
1187 	return cmci_vector;
1188 }
1189 
1190 #if DEVELOPMENT || DEBUG
1191 extern void lapic_trigger_MC(void);
1192 void
lapic_trigger_MC(void)1193 lapic_trigger_MC(void)
1194 {
1195 	/* A 64-bit access to any register will do it. */
1196 	volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID);
1197 	dummy++;
1198 }
1199 #endif
1200