xref: /xnu-11215.41.3/osfmk/i386/Diagnostics.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2005-2008 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * @APPLE_FREE_COPYRIGHT@
33  */
34 
35 /*
36  *	Author: Bill Angell, Apple
37  *	Date:	10/auht-five
38  *
39  *	Random diagnostics, augmented Derek Kumar 2011
40  *
41  *
42  */
43 
44 
45 #include <kern/machine.h>
46 #include <kern/processor.h>
47 #include <mach/machine.h>
48 #include <mach/processor_info.h>
49 #include <mach/mach_types.h>
50 #include <mach/boolean.h>
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 #include <kern/ipc_kobject.h>
54 #include <kern/monotonic.h>
55 #include <mach/vm_param.h>
56 #include <ipc/port.h>
57 #include <ipc/ipc_entry.h>
58 #include <ipc/ipc_space.h>
59 #include <ipc/ipc_object.h>
60 #include <ipc/ipc_port.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_page.h>
64 #include <vm/pmap.h>
65 #include <pexpert/pexpert.h>
66 #include <console/video_console.h>
67 #include <i386/cpu_data.h>
68 #include <i386/Diagnostics.h>
69 #include <i386/mp.h>
70 #include <i386/pmCPU.h>
71 #include <i386/tsc.h>
72 #include <mach/i386/syscall_sw.h>
73 #include <kern/kalloc.h>
74 #include <sys/kdebug.h>
75 #include <i386/machine_cpu.h>
76 #include <i386/misc_protos.h>
77 #include <i386/cpuid.h>
78 
79 #define PERMIT_PERMCHECK (0)
80 
81 diagWork        dgWork;
82 uint64_t        lastRuptClear = 0ULL;
83 boolean_t       diag_pmc_enabled = FALSE;
84 void cpu_powerstats(void *);
85 
86 typedef struct {
87 	uint64_t caperf;
88 	uint64_t cmperf;
89 	uint64_t ccres[6];
90 	uint64_t crtimes[CPU_RTIME_BINS];
91 	uint64_t citimes[CPU_ITIME_BINS];
92 	uint64_t crtime_total;
93 	uint64_t citime_total;
94 	uint64_t cpu_idle_exits;
95 	uint64_t cpu_insns;
96 	uint64_t cpu_ucc;
97 	uint64_t cpu_urc;
98 #if     DIAG_ALL_PMCS
99 	uint64_t gpmcs[4];
100 #endif /* DIAG_ALL_PMCS */
101 } core_energy_stat_t;
102 
103 typedef struct {
104 	uint64_t pkes_version;
105 	uint64_t pkg_cres[2][7];
106 	uint64_t pkg_power_unit;
107 	uint64_t pkg_energy;
108 	uint64_t pp0_energy;
109 	uint64_t pp1_energy;
110 	uint64_t ddr_energy;
111 	uint64_t llc_flushed_cycles;
112 	uint64_t ring_ratio_instantaneous;
113 	uint64_t IA_frequency_clipping_cause;
114 	uint64_t GT_frequency_clipping_cause;
115 	uint64_t pkg_idle_exits;
116 	uint64_t pkg_rtimes[CPU_RTIME_BINS];
117 	uint64_t pkg_itimes[CPU_ITIME_BINS];
118 	uint64_t mbus_delay_time;
119 	uint64_t mint_delay_time;
120 	uint32_t ncpus;
121 	core_energy_stat_t cest[];
122 } pkg_energy_statistics_t;
123 
124 
125 int
diagCall64(x86_saved_state_t * state)126 diagCall64(x86_saved_state_t * state)
127 {
128 	uint64_t        curpos, i, j;
129 	uint64_t        selector, data;
130 	uint64_t        currNap, durNap;
131 	x86_saved_state64_t     *regs;
132 	boolean_t       diagflag;
133 	uint32_t        rval = 0;
134 
135 	assert(is_saved_state64(state));
136 	regs = saved_state64(state);
137 
138 	diagflag = ((dgWork.dgFlags & enaDiagSCs) != 0);
139 	selector = regs->rdi;
140 
141 	switch (selector) {     /* Select the routine */
142 	case dgRuptStat:        /* Suck Interruption statistics */
143 		(void) ml_set_interrupts_enabled(TRUE);
144 		data = regs->rsi; /* Get the number of processors */
145 
146 		if (data == 0) { /* If no location is specified for data, clear all
147 			          * counts
148 			          */
149 			for (i = 0; i < real_ncpus; i++) {      /* Cycle through
150 				                                 * processors */
151 				for (j = 0; j < 256; j++) {
152 					cpu_data_ptr[i]->cpu_hwIntCnt[j] = 0;
153 				}
154 			}
155 
156 			lastRuptClear = mach_absolute_time();   /* Get the time of clear */
157 			rval = 1;       /* Normal return */
158 			(void) ml_set_interrupts_enabled(FALSE);
159 			break;
160 		}
161 
162 		(void) copyout((char *) &real_ncpus, data, sizeof(real_ncpus)); /* Copy out number of
163 		                                                                 * processors */
164 		currNap = mach_absolute_time(); /* Get the time now */
165 		durNap = currNap - lastRuptClear;       /* Get the last interval
166 		                                         * duration */
167 		if (durNap == 0) {
168 			durNap = 1;     /* This is a very short time, make it
169 			                 * bigger */
170 		}
171 		curpos = data + sizeof(real_ncpus);     /* Point to the next
172 		                                         * available spot */
173 
174 		for (i = 0; i < real_ncpus; i++) {      /* Move 'em all out */
175 			(void) copyout((char *) &durNap, curpos, 8);    /* Copy out the time
176 			                                                 * since last clear */
177 			(void) copyout((char *) &cpu_data_ptr[i]->cpu_hwIntCnt, curpos + 8, 256 * sizeof(uint32_t));    /* Copy out interrupt
178 			                                                                                                 * data for this
179 			                                                                                                 * processor */
180 			curpos = curpos + (256 * sizeof(uint32_t) + 8); /* Point to next out put
181 			                                                 * slot */
182 		}
183 		rval = 1;
184 		(void) ml_set_interrupts_enabled(FALSE);
185 		break;
186 
187 	case dgPowerStat:
188 	{
189 		uint32_t c2l = 0, c2h = 0, c3l = 0, c3h = 0, c6l = 0, c6h = 0, c7l = 0, c7h = 0;
190 		uint32_t pkg_unit_l = 0, pkg_unit_h = 0, pkg_ecl = 0, pkg_ech = 0;
191 
192 		pkg_energy_statistics_t pkes;
193 		core_energy_stat_t cest;
194 
195 		bzero(&pkes, sizeof(pkes));
196 		bzero(&cest, sizeof(cest));
197 
198 		pkes.pkes_version = 1ULL;
199 		rdmsr_carefully(MSR_IA32_PKG_C2_RESIDENCY, &c2l, &c2h);
200 		rdmsr_carefully(MSR_IA32_PKG_C3_RESIDENCY, &c3l, &c3h);
201 		rdmsr_carefully(MSR_IA32_PKG_C6_RESIDENCY, &c6l, &c6h);
202 		rdmsr_carefully(MSR_IA32_PKG_C7_RESIDENCY, &c7l, &c7h);
203 
204 		pkes.pkg_cres[0][0] = ((uint64_t)c2h << 32) | c2l;
205 		pkes.pkg_cres[0][1] = ((uint64_t)c3h << 32) | c3l;
206 		pkes.pkg_cres[0][2] = ((uint64_t)c6h << 32) | c6l;
207 		pkes.pkg_cres[0][3] = ((uint64_t)c7h << 32) | c7l;
208 
209 		uint64_t c8r = ~0ULL, c9r = ~0ULL, c10r = ~0ULL;
210 
211 		rdmsr64_carefully(MSR_IA32_PKG_C8_RESIDENCY, &c8r);
212 		rdmsr64_carefully(MSR_IA32_PKG_C9_RESIDENCY, &c9r);
213 		rdmsr64_carefully(MSR_IA32_PKG_C10_RESIDENCY, &c10r);
214 
215 		pkes.pkg_cres[0][4] = c8r;
216 		pkes.pkg_cres[0][5] = c9r;
217 		pkes.pkg_cres[0][6] = c10r;
218 
219 		pkes.ddr_energy = ~0ULL;
220 		rdmsr64_carefully(MSR_IA32_DDR_ENERGY_STATUS, &pkes.ddr_energy);
221 		pkes.llc_flushed_cycles = ~0ULL;
222 		rdmsr64_carefully(MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER, &pkes.llc_flushed_cycles);
223 
224 		pkes.ring_ratio_instantaneous = ~0ULL;
225 		rdmsr64_carefully(MSR_IA32_RING_PERF_STATUS, &pkes.ring_ratio_instantaneous);
226 
227 		pkes.IA_frequency_clipping_cause = ~0ULL;
228 
229 		uint32_t ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS;
230 		/* Should perhaps be a generic register map module for these
231 		 * registers with identical functionality that were renumbered.
232 		 */
233 		switch (cpuid_cpufamily()) {
234 		case CPUFAMILY_INTEL_SKYLAKE:
235 		case CPUFAMILY_INTEL_KABYLAKE:
236 		case CPUFAMILY_INTEL_ICELAKE:
237 		case CPUFAMILY_INTEL_COMETLAKE:
238 			ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS_SKL;
239 			break;
240 		default:
241 			break;
242 		}
243 
244 		rdmsr64_carefully(ia_perf_limits, &pkes.IA_frequency_clipping_cause);
245 
246 		pkes.GT_frequency_clipping_cause = ~0ULL;
247 		rdmsr64_carefully(MSR_IA32_GT_PERF_LIMIT_REASONS, &pkes.GT_frequency_clipping_cause);
248 
249 		rdmsr_carefully(MSR_IA32_PKG_POWER_SKU_UNIT, &pkg_unit_l, &pkg_unit_h);
250 		rdmsr_carefully(MSR_IA32_PKG_ENERGY_STATUS, &pkg_ecl, &pkg_ech);
251 		pkes.pkg_power_unit = ((uint64_t)pkg_unit_h << 32) | pkg_unit_l;
252 		pkes.pkg_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl;
253 
254 		rdmsr_carefully(MSR_IA32_PP0_ENERGY_STATUS, &pkg_ecl, &pkg_ech);
255 		pkes.pp0_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl;
256 
257 		rdmsr_carefully(MSR_IA32_PP1_ENERGY_STATUS, &pkg_ecl, &pkg_ech);
258 		pkes.pp1_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl;
259 
260 		pkes.pkg_idle_exits = current_cpu_datap()->lcpu.package->package_idle_exits;
261 		pkes.ncpus = real_ncpus;
262 
263 		(void) ml_set_interrupts_enabled(TRUE);
264 
265 		copyout(&pkes, regs->rsi, sizeof(pkes));
266 		curpos = regs->rsi + sizeof(pkes);
267 
268 		mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_powerstats, NULL);
269 
270 		for (i = 0; i < real_ncpus; i++) {
271 			(void) ml_set_interrupts_enabled(FALSE);
272 
273 			cest.caperf = cpu_data_ptr[i]->cpu_aperf;
274 			cest.cmperf = cpu_data_ptr[i]->cpu_mperf;
275 			cest.ccres[0] = cpu_data_ptr[i]->cpu_c3res;
276 			cest.ccres[1] = cpu_data_ptr[i]->cpu_c6res;
277 			cest.ccres[2] = cpu_data_ptr[i]->cpu_c7res;
278 
279 			bcopy(&cpu_data_ptr[i]->cpu_rtimes[0], &cest.crtimes[0], sizeof(cest.crtimes));
280 			bcopy(&cpu_data_ptr[i]->cpu_itimes[0], &cest.citimes[0], sizeof(cest.citimes));
281 
282 			cest.citime_total = cpu_data_ptr[i]->cpu_itime_total;
283 			cest.crtime_total = cpu_data_ptr[i]->cpu_rtime_total;
284 			cest.cpu_idle_exits = cpu_data_ptr[i]->cpu_idle_exits;
285 #if CONFIG_CPU_COUNTERS
286 			cest.cpu_insns = cpu_data_ptr[i]->cpu_monotonic.mtc_counts[MT_CORE_INSTRS];
287 			cest.cpu_ucc = cpu_data_ptr[i]->cpu_monotonic.mtc_counts[MT_CORE_CYCLES];
288 			cest.cpu_urc = cpu_data_ptr[i]->cpu_monotonic.mtc_counts[MT_CORE_REFCYCLES];
289 #endif /* CONFIG_CPU_COUNTERS */
290 #if DIAG_ALL_PMCS
291 			bcopy(&cpu_data_ptr[i]->cpu_gpmcs[0], &cest.gpmcs[0], sizeof(cest.gpmcs));
292 #endif /* DIAG_ALL_PMCS */
293 			(void) ml_set_interrupts_enabled(TRUE);
294 
295 			copyout(&cest, curpos, sizeof(cest));
296 			curpos += sizeof(cest);
297 		}
298 		rval = 1;
299 		(void) ml_set_interrupts_enabled(FALSE);
300 	}
301 	break;
302 	case dgEnaPMC:
303 	{
304 		boolean_t enable = TRUE;
305 		uint32_t cpuinfo[4];
306 		/* Require architectural PMC v2 or higher, corresponding to
307 		 * Merom+, or equivalent virtualised facility.
308 		 */
309 		do_cpuid(0xA, &cpuinfo[0]);
310 		if ((cpuinfo[0] & 0xFF) >= 2) {
311 			mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_pmc_control, &enable);
312 			diag_pmc_enabled = TRUE;
313 		}
314 		rval = 1;
315 	}
316 	break;
317 #if     DEVELOPMENT || DEBUG
318 	case dgGzallocTest:
319 	{
320 		(void) ml_set_interrupts_enabled(TRUE);
321 		if (diagflag) {
322 			unsigned *ptr = (unsigned *)kalloc_data(1024, Z_WAITOK);
323 			kfree_data(ptr, 1024);
324 			*ptr = 0x42;
325 		}
326 		(void) ml_set_interrupts_enabled(FALSE);
327 	}
328 	break;
329 #endif
330 
331 #if DEVELOPMENT || DEBUG
332 	case    dgPermCheck:
333 	{
334 		(void) ml_set_interrupts_enabled(TRUE);
335 		if (diagflag) {
336 			rval = pmap_permissions_verify(kernel_pmap, kernel_map, 0, ~0ULL);
337 		}
338 		(void) ml_set_interrupts_enabled(FALSE);
339 	}
340 	break;
341 #endif /* DEVELOPMENT || DEBUG */
342 	default:                /* Handle invalid ones */
343 		rval = 0;       /* Return an exception */
344 	}
345 
346 	regs->rax = rval;
347 
348 	assert(ml_get_interrupts_enabled() == FALSE);
349 	return rval;
350 }
351 
352 void
cpu_powerstats(__unused void * arg)353 cpu_powerstats(__unused void *arg)
354 {
355 	cpu_data_t *cdp = current_cpu_datap();
356 	__unused int cnum = cdp->cpu_number;
357 	uint32_t cl = 0, ch = 0, mpl = 0, mph = 0, apl = 0, aph = 0;
358 
359 	rdmsr_carefully(MSR_IA32_MPERF, &mpl, &mph);
360 	rdmsr_carefully(MSR_IA32_APERF, &apl, &aph);
361 
362 	cdp->cpu_mperf = ((uint64_t)mph << 32) | mpl;
363 	cdp->cpu_aperf = ((uint64_t)aph << 32) | apl;
364 
365 	uint64_t ctime = mach_absolute_time();
366 	cdp->cpu_rtime_total += ctime - cdp->cpu_ixtime;
367 	cdp->cpu_ixtime = ctime;
368 
369 	rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
370 	cdp->cpu_c3res = ((uint64_t)ch << 32) | cl;
371 
372 	rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
373 	cdp->cpu_c6res = ((uint64_t)ch << 32) | cl;
374 
375 	rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
376 	cdp->cpu_c7res = ((uint64_t)ch << 32) | cl;
377 
378 	if (diag_pmc_enabled) {
379 #if CONFIG_CPU_COUNTERS
380 		mt_update_fixed_counts();
381 #else /* CONFIG_CPU_COUNTERS */
382 		uint64_t insns = read_pmc(FIXED_PMC0);
383 		uint64_t ucc = read_pmc(FIXED_PMC1);
384 		uint64_t urc = read_pmc(FIXED_PMC2);
385 #endif /* !CONFIG_CPU_COUNTERS */
386 #if DIAG_ALL_PMCS
387 		int i;
388 
389 		for (i = 0; i < 4; i++) {
390 			cdp->cpu_gpmcs[i] = read_pmc(i);
391 		}
392 #endif /* DIAG_ALL_PMCS */
393 #if !CONFIG_CPU_COUNTERS
394 		cdp->cpu_cur_insns = insns;
395 		cdp->cpu_cur_ucc = ucc;
396 		cdp->cpu_cur_urc = urc;
397 #endif /* !CONFIG_CPU_COUNTERS */
398 	}
399 }
400 
401 void
cpu_pmc_control(void * enablep)402 cpu_pmc_control(void *enablep)
403 {
404 #if !CONFIG_CPU_COUNTERS
405 	boolean_t enable = *(boolean_t *)enablep;
406 	cpu_data_t      *cdp = current_cpu_datap();
407 
408 	if (enable) {
409 		wrmsr64(0x38F, 0x70000000FULL);
410 		wrmsr64(0x38D, 0x333);
411 		set_cr4(get_cr4() | CR4_PCE);
412 	} else {
413 		wrmsr64(0x38F, 0);
414 		wrmsr64(0x38D, 0);
415 		set_cr4((get_cr4() & ~CR4_PCE));
416 	}
417 	cdp->cpu_fixed_pmcs_enabled = enable;
418 #else /* !CONFIG_CPU_COUNTERS */
419 #pragma unused(enablep)
420 #endif /* CONFIG_CPU_COUNTERS */
421 }
422