xref: /xnu-10002.61.3/osfmk/i386/machine_check.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/zalloc.h>
30 #include <mach/mach_time.h>
31 #include <i386/cpu_data.h>
32 #include <i386/cpuid.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/lapic.h>
36 #include <i386/machine_cpu.h>
37 #include <i386/machine_check.h>
38 #include <i386/proc_reg.h>
39 
40 /*
41  * At the time of the machine-check exception, all hardware-threads panic.
42  * Each thread saves the state of its MCA registers to its per-cpu data area.
43  *
44  * State reporting is serialized so one thread dumps all valid state for all
45  * threads to the panic log. This may entail spinning waiting for other
46  * threads to complete saving state to memory. A timeout applies to this wait
47  * -- in particular, a 3-strikes timeout may prevent a thread from taking
48  * part is the affair.
49  */
50 
51 #define IF(bool, str)    ((bool) ? (str) : "")
52 
53 static boolean_t        mca_initialized = FALSE;
54 static boolean_t        mca_MCE_present = FALSE;
55 static boolean_t        mca_MCA_present = FALSE;
56 static uint32_t         mca_family = 0;
57 static unsigned int     mca_error_bank_count = 0;
58 static boolean_t        mca_control_MSR_present = FALSE;
59 static boolean_t        mca_cmci_present = FALSE;
60 static ia32_mcg_cap_t   ia32_mcg_cap;
61 decl_simple_lock_data(static, mca_lock);
62 
63 typedef struct {
64 	ia32_mci_ctl_t          mca_mci_ctl;
65 	ia32_mci_status_t       mca_mci_status;
66 	ia32_mci_misc_t         mca_mci_misc;
67 	ia32_mci_addr_t         mca_mci_addr;
68 } mca_mci_bank_t;
69 
70 typedef struct mca_state {
71 	boolean_t               mca_is_saved;
72 	boolean_t               mca_is_valid;   /* some state is valid */
73 	ia32_mcg_ctl_t          mca_mcg_ctl;
74 	ia32_mcg_status_t       mca_mcg_status;
75 	mca_mci_bank_t          mca_error_bank[0];
76 } mca_state_t;
77 
78 typedef enum {
79 	CLEAR,
80 	DUMPING,
81 	DUMPED
82 } mca_dump_state_t;
83 static volatile mca_dump_state_t mca_dump_state = CLEAR;
84 
85 static void
mca_get_availability(void)86 mca_get_availability(void)
87 {
88 	uint64_t        features = cpuid_info()->cpuid_features;
89 	uint32_t        family =   cpuid_info()->cpuid_family;
90 	uint32_t        model =    cpuid_info()->cpuid_model;
91 	uint32_t        stepping = cpuid_info()->cpuid_stepping;
92 
93 	if ((model == CPUID_MODEL_HASWELL && stepping < 3) ||
94 	    (model == CPUID_MODEL_HASWELL_ULT && stepping < 1) ||
95 	    (model == CPUID_MODEL_CRYSTALWELL && stepping < 1)) {
96 		panic("Haswell pre-C0 steppings are not supported");
97 	}
98 
99 	mca_MCE_present = (features & CPUID_FEATURE_MCE) != 0;
100 	mca_MCA_present = (features & CPUID_FEATURE_MCA) != 0;
101 	mca_family = family;
102 
103 	/*
104 	 * If MCA, the number of banks etc is reported by the IA32_MCG_CAP MSR.
105 	 */
106 	if (mca_MCA_present) {
107 		ia32_mcg_cap.u64 = rdmsr64(IA32_MCG_CAP);
108 		mca_error_bank_count = ia32_mcg_cap.bits.count;
109 		mca_control_MSR_present = ia32_mcg_cap.bits.mcg_ctl_p;
110 		mca_cmci_present = ia32_mcg_cap.bits.mcg_ext_corr_err_p;
111 	}
112 }
113 
114 void
mca_cpu_init(void)115 mca_cpu_init(void)
116 {
117 	unsigned int    i;
118 
119 	/*
120 	 * The first (boot) processor is responsible for discovering the
121 	 * machine check architecture present on this machine.
122 	 */
123 	if (!mca_initialized) {
124 		mca_get_availability();
125 		mca_initialized = TRUE;
126 		simple_lock_init(&mca_lock, 0);
127 	}
128 
129 	if (mca_MCA_present) {
130 		/* Enable all MCA features */
131 		if (mca_control_MSR_present) {
132 			wrmsr64(IA32_MCG_CTL, IA32_MCG_CTL_ENABLE);
133 		}
134 
135 		switch (mca_family) {
136 		case 0x06:
137 			/* Enable all but mc0 */
138 			for (i = 1; i < mca_error_bank_count; i++) {
139 				wrmsr64(IA32_MCi_CTL(i), 0xFFFFFFFFFFFFFFFFULL);
140 			}
141 
142 			/* Clear all errors */
143 			for (i = 0; i < mca_error_bank_count; i++) {
144 				wrmsr64(IA32_MCi_STATUS(i), 0ULL);
145 			}
146 			break;
147 		case 0x0F:
148 			/* Enable all banks */
149 			for (i = 0; i < mca_error_bank_count; i++) {
150 				wrmsr64(IA32_MCi_CTL(i), 0xFFFFFFFFFFFFFFFFULL);
151 			}
152 
153 			/* Clear all errors */
154 			for (i = 0; i < mca_error_bank_count; i++) {
155 				wrmsr64(IA32_MCi_STATUS(i), 0ULL);
156 			}
157 			break;
158 		}
159 	}
160 
161 	/* Enable machine check exception handling if available */
162 	if (mca_MCE_present) {
163 		set_cr4(get_cr4() | CR4_MCE);
164 	}
165 }
166 
167 boolean_t
mca_is_cmci_present(void)168 mca_is_cmci_present(void)
169 {
170 	if (!mca_initialized) {
171 		mca_cpu_init();
172 	}
173 	return mca_cmci_present;
174 }
175 
176 void
mca_cpu_alloc(cpu_data_t * cdp)177 mca_cpu_alloc(cpu_data_t        *cdp)
178 {
179 	vm_size_t       mca_state_size;
180 
181 	/*
182 	 * Allocate space for an array of error banks.
183 	 */
184 	mca_state_size = sizeof(mca_state_t) +
185 	    sizeof(mca_mci_bank_t) * mca_error_bank_count;
186 	cdp->cpu_mca_state = zalloc_permanent_tag(mca_state_size, ZALIGN_PTR,
187 	    VM_KERN_MEMORY_CPU);
188 	if (cdp->cpu_mca_state == NULL) {
189 		printf("mca_cpu_alloc() failed for cpu %d\n", cdp->cpu_number);
190 		return;
191 	}
192 
193 	/*
194 	 * If the boot processor is yet have its allocation made,
195 	 * do this now.
196 	 */
197 	if (cpu_datap(master_cpu)->cpu_mca_state == NULL) {
198 		mca_cpu_alloc(cpu_datap(master_cpu));
199 	}
200 }
201 
202 static void
mca_save_state(mca_state_t * mca_state)203 mca_save_state(mca_state_t *mca_state)
204 {
205 	mca_mci_bank_t  *bank;
206 	unsigned int    i;
207 
208 	assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0);
209 
210 	if (mca_state == NULL) {
211 		return;
212 	}
213 
214 	mca_state->mca_mcg_ctl = mca_control_MSR_present ?
215 	    rdmsr64(IA32_MCG_CTL) : 0ULL;
216 	mca_state->mca_mcg_status.u64 = rdmsr64(IA32_MCG_STATUS);
217 
218 	bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0];
219 	for (i = 0; i < mca_error_bank_count; i++, bank++) {
220 		bank->mca_mci_ctl        = rdmsr64(IA32_MCi_CTL(i));
221 		bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i));
222 		if (!bank->mca_mci_status.bits.val) {
223 			continue;
224 		}
225 		bank->mca_mci_misc = (bank->mca_mci_status.bits.miscv)?
226 		    rdmsr64(IA32_MCi_MISC(i)) : 0ULL;
227 		bank->mca_mci_addr = (bank->mca_mci_status.bits.addrv)?
228 		    rdmsr64(IA32_MCi_ADDR(i)) : 0ULL;
229 		mca_state->mca_is_valid = TRUE;
230 	}
231 
232 	/*
233 	 * If we're the first thread with MCA state, point our package to it
234 	 * and don't care about races
235 	 */
236 	if (x86_package()->mca_state == NULL) {
237 		x86_package()->mca_state = mca_state;
238 	}
239 
240 	mca_state->mca_is_saved = TRUE;
241 }
242 
243 void
mca_check_save(void)244 mca_check_save(void)
245 {
246 	if (mca_dump_state > CLEAR) {
247 		mca_save_state(current_cpu_datap()->cpu_mca_state);
248 	}
249 }
250 
251 static void
mca_report_cpu_info(void)252 mca_report_cpu_info(void)
253 {
254 	i386_cpu_info_t *infop = cpuid_info();
255 
256 	paniclog_append_noflush(" family: %d model: %d stepping: %d microcode: %d\n",
257 	    infop->cpuid_family,
258 	    infop->cpuid_model,
259 	    infop->cpuid_stepping,
260 	    infop->cpuid_microcode_version);
261 	paniclog_append_noflush(" signature: 0x%x\n",
262 	    infop->cpuid_signature);
263 	paniclog_append_noflush(" %s\n",
264 	    infop->cpuid_brand_string);
265 }
266 
267 static void
mca_dump_bank(mca_state_t * state,int i)268 mca_dump_bank(mca_state_t *state, int i)
269 {
270 	mca_mci_bank_t          *bank;
271 	ia32_mci_status_t       status;
272 
273 	bank = &state->mca_error_bank[i];
274 	status = bank->mca_mci_status;
275 	if (!status.bits.val) {
276 		return;
277 	}
278 
279 	paniclog_append_noflush(" IA32_MC%d_STATUS(0x%x): 0x%016qx\n",
280 	    i, IA32_MCi_STATUS(i), status.u64);
281 
282 	if (status.bits.addrv) {
283 		paniclog_append_noflush(" IA32_MC%d_ADDR(0x%x):   0x%016qx\n",
284 		    i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
285 	}
286 
287 	if (status.bits.miscv) {
288 		paniclog_append_noflush(" IA32_MC%d_MISC(0x%x):   0x%016qx\n",
289 		    i, IA32_MCi_MISC(i), bank->mca_mci_misc);
290 	}
291 }
292 
293 static void
mca_cpu_dump_error_banks(mca_state_t * state)294 mca_cpu_dump_error_banks(mca_state_t *state)
295 {
296 	unsigned int            i;
297 
298 	if (!state->mca_is_valid) {
299 		return;
300 	}
301 
302 	for (i = 0; i < mca_error_bank_count; i++) {
303 		mca_dump_bank(state, i);
304 	}
305 }
306 
307 void
mca_dump(void)308 mca_dump(void)
309 {
310 	mca_state_t     *mca_state = current_cpu_datap()->cpu_mca_state;
311 	uint64_t        deadline;
312 	unsigned int    i = 0;
313 
314 	/*
315 	 * Capture local MCA registers to per-cpu data.
316 	 */
317 	mca_save_state(mca_state);
318 
319 	/*
320 	 * Serialize: the first caller controls dumping MCA registers,
321 	 * other threads spin meantime.
322 	 */
323 	simple_lock(&mca_lock, LCK_GRP_NULL);
324 	if (mca_dump_state > CLEAR) {
325 		simple_unlock(&mca_lock);
326 		while (mca_dump_state == DUMPING) {
327 			cpu_pause();
328 		}
329 		return;
330 	}
331 	mca_dump_state = DUMPING;
332 	simple_unlock(&mca_lock);
333 
334 	/*
335 	 * Wait for all other hardware threads to save their state.
336 	 * Or timeout.
337 	 */
338 	deadline = mach_absolute_time() + LockTimeOut;
339 	while (mach_absolute_time() < deadline && i < real_ncpus) {
340 		if (!cpu_datap(i)->cpu_mca_state->mca_is_saved) {
341 			cpu_pause();
342 			continue;
343 		}
344 		i += 1;
345 	}
346 
347 	/*
348 	 * Report machine-check capabilities:
349 	 */
350 	paniclog_append_noflush("Machine-check capabilities: 0x%016qx\n", ia32_mcg_cap.u64);
351 
352 	mca_report_cpu_info();
353 
354 	paniclog_append_noflush(" %d error-reporting banks\n", mca_error_bank_count);
355 
356 	/*
357 	 * Dump all processor state:
358 	 */
359 	for (i = 0; i < real_ncpus; i++) {
360 		mca_state_t             *mcsp = cpu_datap(i)->cpu_mca_state;
361 		ia32_mcg_status_t       status;
362 
363 		if (mcsp == NULL ||
364 		    mcsp->mca_is_saved == FALSE ||
365 		    mcsp->mca_mcg_status.u64 == 0 ||
366 		    !mcsp->mca_is_valid) {
367 			continue;
368 		}
369 		status = mcsp->mca_mcg_status;
370 		paniclog_append_noflush("Processor %d: IA32_MCG_STATUS: 0x%016qx\n",
371 		    i, status.u64);
372 		mca_cpu_dump_error_banks(mcsp);
373 	}
374 
375 	/* Update state to release any other threads. */
376 	mca_dump_state = DUMPED;
377 }
378 
379 
380 #if DEVELOPMENT || DEBUG
381 extern void mca_exception_panic(void);
382 extern void lapic_trigger_MC(void);
383 void
mca_exception_panic(void)384 mca_exception_panic(void)
385 {
386 	lapic_trigger_MC();
387 }
388 #endif
389