xref: /xnu-10002.61.3/osfmk/i386/proc_reg.h (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /* CMU_ENDHIST */
32 /*
33  * Mach Operating System
34  * Copyright (c) 1991,1990 Carnegie Mellon University
35  * All Rights Reserved.
36  *
37  * Permission to use, copy, modify and distribute this software and its
38  * documentation is hereby granted, provided that both the copyright
39  * notice and this permission notice appear in all copies of the
40  * software, derivative works or modified versions, and any portions
41  * thereof, and that both notices appear in supporting documentation.
42  *
43  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
44  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
45  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46  *
47  * Carnegie Mellon requests users of this software to return to
48  *
49  *  Software Distribution Coordinator  or  [email protected]
50  *  School of Computer Science
51  *  Carnegie Mellon University
52  *  Pittsburgh PA 15213-3890
53  *
54  * any improvements or extensions that they make and grant Carnegie Mellon
55  * the rights to redistribute these changes.
56  */
57 
58 /*
59  */
60 
61 /*
62  * Processor registers for i386 and i486.
63  */
64 #ifndef _I386_PROC_REG_H_
65 #define _I386_PROC_REG_H_
66 
67 /*
68  * Model Specific Registers
69  */
70 #define MSR_P5_TSC              0x10    /* Time Stamp Register */
71 #define MSR_P5_CESR             0x11    /* Control and Event Select Register */
72 #define MSR_P5_CTR0             0x12    /* Counter #0 */
73 #define MSR_P5_CTR1             0x13    /* Counter #1 */
74 
75 #define MSR_P5_CESR_PC          0x0200  /* Pin Control */
76 #define MSR_P5_CESR_CC          0x01C0  /* Counter Control mask */
77 #define MSR_P5_CESR_ES          0x003F  /* Event Control mask */
78 
79 #define MSR_P5_CESR_SHIFT       16              /* Shift to get Counter 1 */
80 #define MSR_P5_CESR_MASK        (MSR_P5_CESR_PC|\
81 	                         MSR_P5_CESR_CC|\
82 	                         MSR_P5_CESR_ES) /* Mask Counter */
83 
84 #define MSR_P5_CESR_CC_CLOCK    0x0100  /* Clock Counting (otherwise Event) */
85 #define MSR_P5_CESR_CC_DISABLE  0x0000  /* Disable counter */
86 #define MSR_P5_CESR_CC_CPL012   0x0040  /* Count if the CPL == 0, 1, 2 */
87 #define MSR_P5_CESR_CC_CPL3     0x0080  /* Count if the CPL == 3 */
88 #define MSR_P5_CESR_CC_CPL      0x00C0  /* Count regardless of the CPL */
89 
90 #define MSR_P5_CESR_ES_DATA_READ       0x000000 /* Data Read */
91 #define MSR_P5_CESR_ES_DATA_WRITE      0x000001 /* Data Write */
92 #define MSR_P5_CESR_ES_DATA_RW         0x101000 /* Data Read or Write */
93 #define MSR_P5_CESR_ES_DATA_TLB_MISS   0x000010 /* Data TLB Miss */
94 #define MSR_P5_CESR_ES_DATA_READ_MISS  0x000011 /* Data Read Miss */
95 #define MSR_P5_CESR_ES_DATA_WRITE_MISS 0x000100 /* Data Write Miss */
96 #define MSR_P5_CESR_ES_DATA_RW_MISS    0x101001 /* Data Read or Write Miss */
97 #define MSR_P5_CESR_ES_HIT_EM          0x000101 /* Write (hit) to M|E state */
98 #define MSR_P5_CESR_ES_DATA_CACHE_WB   0x000110 /* Cache lines written back */
99 #define MSR_P5_CESR_ES_EXTERNAL_SNOOP  0x000111 /* External Snoop */
100 #define MSR_P5_CESR_ES_CACHE_SNOOP_HIT 0x001000 /* Data cache snoop hits */
101 #define MSR_P5_CESR_ES_MEM_ACCESS_PIPE 0x001001 /* Mem. access in both pipes */
102 #define MSR_P5_CESR_ES_BANK_CONFLICTS  0x001010 /* Bank conflicts */
103 #define MSR_P5_CESR_ES_MISALIGNED      0x001011 /* Misaligned Memory or I/O */
104 #define MSR_P5_CESR_ES_CODE_READ       0x001100 /* Code Read */
105 #define MSR_P5_CESR_ES_CODE_TLB_MISS   0x001101 /* Code TLB miss */
106 #define MSR_P5_CESR_ES_CODE_CACHE_MISS 0x001110 /* Code Cache miss */
107 #define MSR_P5_CESR_ES_SEGMENT_LOADED  0x001111 /* Any segment reg. loaded */
108 #define MSR_P5_CESR_ES_BRANCHE         0x010010 /* Branches */
109 #define MSR_P5_CESR_ES_BTB_HIT         0x010011 /* BTB Hits */
110 #define MSR_P5_CESR_ES_BRANCHE_BTB     0x010100 /* Taken branch or BTB Hit */
111 #define MSR_P5_CESR_ES_PIPELINE_FLUSH  0x010101 /* Pipeline Flushes */
112 #define MSR_P5_CESR_ES_INSTRUCTION     0x010110 /* Instruction executed */
113 #define MSR_P5_CESR_ES_INSTRUCTION_V   0x010111 /* Inst. executed (v-pipe) */
114 #define MSR_P5_CESR_ES_BUS_CYCLE       0x011000 /* Clocks while bus cycle */
115 #define MSR_P5_CESR_ES_FULL_WRITE_BUF  0x011001 /* Clocks while full wrt buf. */
116 #define MSR_P5_CESR_ES_DATA_MEM_READ   0x011010 /* Pipeline waiting for read */
117 #define MSR_P5_CESR_ES_WRITE_EM        0x011011 /* Stall on write E|M state */
118 #define MSR_P5_CESR_ES_LOCKED_CYCLE    0x011100 /* Locked bus cycles */
119 #define MSR_P5_CESR_ES_IO_CYCLE        0x011101 /* I/O Read or Write cycles */
120 #define MSR_P5_CESR_ES_NON_CACHEABLE   0x011110 /* Non-cacheable Mem. read */
121 #define MSR_P5_CESR_ES_AGI             0x011111 /* Stall because of AGI */
122 #define MSR_P5_CESR_ES_FLOP            0x100010 /* Floating Point operations */
123 #define MSR_P5_CESR_ES_BREAK_DR0       0x100011 /* Breakpoint matches on DR0 */
124 #define MSR_P5_CESR_ES_BREAK_DR1       0x100100 /* Breakpoint matches on DR1 */
125 #define MSR_P5_CESR_ES_BREAK_DR2       0x100101 /* Breakpoint matches on DR2 */
126 #define MSR_P5_CESR_ES_BREAK_DR3       0x100110 /* Breakpoint matches on DR3 */
127 #define MSR_P5_CESR_ES_HARDWARE_IT     0x100111 /* Hardware interrupts */
128 
129 /*
130  * CR0
131  */
132 #define CR0_PG  0x80000000      /*	 Enable paging */
133 #define CR0_CD  0x40000000      /* i486: Cache disable */
134 #define CR0_NW  0x20000000      /* i486: No write-through */
135 #define CR0_AM  0x00040000      /* i486: Alignment check mask */
136 #define CR0_WP  0x00010000      /* i486: Write-protect kernel access */
137 #define CR0_NE  0x00000020      /* i486: Handle numeric exceptions */
138 #define CR0_ET  0x00000010      /*	 Extension type is 80387 */
139                                 /*	 (not official) */
140 #define CR0_TS  0x00000008      /*	 Task switch */
141 #define CR0_EM  0x00000004      /*	 Emulate coprocessor */
142 #define CR0_MP  0x00000002      /*	 Monitor coprocessor */
143 #define CR0_PE  0x00000001      /*	 Enable protected mode */
144 
145 /*
146  * CR4
147  */
148 #define CR4_SEE         0x00008000      /* Secure Enclave Enable XXX */
149 #define CR4_SMAP        0x00200000      /* Supervisor-Mode Access Protect */
150 #define CR4_SMEP        0x00100000      /* Supervisor-Mode Execute Protect */
151 #define CR4_OSXSAVE     0x00040000      /* OS supports XSAVE */
152 #define CR4_PCIDE       0x00020000      /* PCID Enable */
153 #define CR4_RDWRFSGS    0x00010000      /* RDWRFSGS Enable */
154 #define CR4_SMXE        0x00004000      /* Enable SMX operation */
155 #define CR4_VMXE        0x00002000      /* Enable VMX operation */
156 #define CR4_OSXMM       0x00000400      /* SSE/SSE2 exception support in OS */
157 #define CR4_OSFXS       0x00000200      /* SSE/SSE2 OS supports FXSave */
158 #define CR4_PCE         0x00000100      /* Performance-Monitor Count Enable */
159 #define CR4_PGE         0x00000080      /* Page Global Enable */
160 #define CR4_MCE         0x00000040      /* Machine Check Exceptions */
161 #define CR4_PAE         0x00000020      /* Physical Address Extensions */
162 #define CR4_PSE         0x00000010      /* Page Size Extensions */
163 #define CR4_DE          0x00000008      /* Debugging Extensions */
164 #define CR4_TSD         0x00000004      /* Time Stamp Disable */
165 #define CR4_PVI         0x00000002      /* Protected-mode Virtual Interrupts */
166 #define CR4_VME         0x00000001      /* Virtual-8086 Mode Extensions */
167 
168 /*
169  * XCR0 - XFEATURE_ENABLED_MASK (a.k.a. XFEM) register
170  */
171 #define XCR0_X87        (1ULL << 0)     /* x87, FPU/MMX (always set) */
172 #define XCR0_SSE        (1ULL << 1)     /* SSE supported by XSAVE/XRESTORE */
173 #define XCR0_YMM        (1ULL << 2)     /* YMM state available */
174 #define XCR0_BNDREGS    (1ULL << 3)     /* MPX Bounds register state */
175 #define XCR0_BNDCSR     (1ULL << 4)     /* MPX Bounds configuration/state  */
176 #define XCR0_OPMASK     (1ULL << 5)     /* Opmask register state */
177 #define XCR0_ZMM_HI256  (1ULL << 6)     /* ZMM upper 256-bit state */
178 #define XCR0_HI16_ZMM   (1ULL << 7)     /* ZMM16..ZMM31 512-bit state */
179 #define XFEM_X87        XCR0_X87
180 #define XFEM_SSE        XCR0_SSE
181 #define XFEM_YMM        XCR0_YMM
182 #define XFEM_BNDREGS    XCR0_BNDREGS
183 #define XFEM_BNDCSR     XCR0_BNDCSR
184 #define XFEM_OPMASK     XCR0_OPMASK
185 #define XFEM_ZMM_HI256  XCR0_ZMM_HI256
186 #define XFEM_HI16_ZMM   XCR0_HI16_ZMM
187 #define XFEM_ZMM_OPMASK (XFEM_ZMM_HI256 | XFEM_HI16_ZMM | XFEM_OPMASK)
188 /* Legacy name for Hypervisor.  Remove once it has updated. rdar://85833887&85613709 */
189 #define XFEM_ZMM        XFEM_ZMM_OPMASK
190 #define XCR0 (0)
191 
192 #define PMAP_PCID_PRESERVE (1ULL << 63)
193 #define PMAP_PCID_MASK (0xFFF)
194 
195 #define EARLY_GSBASE_MAGIC 0xffffdeadbeefee00
196 
197 /*
198  * If thread groups are needed for x86, set this to 1
199  */
200 #define CONFIG_THREAD_GROUPS 0
201 #define MAX_CPUS 64
202 #define MAX_PSETS 64
203 
204 #ifndef ASSEMBLER
205 
206 #include <sys/cdefs.h>
207 #include <stdint.h>
208 
209 __BEGIN_DECLS
210 
211 #define set_ts() set_cr0(get_cr0() | CR0_TS)
212 
213 static inline uint16_t
get_es(void)214 get_es(void)
215 {
216 	uint16_t es;
217 	__asm__ volatile ("mov %%es, %0" : "=r" (es));
218 	return es;
219 }
220 
221 static inline void
set_es(uint16_t es)222 set_es(uint16_t es)
223 {
224 	__asm__ volatile ("mov %0, %%es" : : "r" (es));
225 }
226 
227 static inline uint16_t
get_ds(void)228 get_ds(void)
229 {
230 	uint16_t ds;
231 	__asm__ volatile ("mov %%ds, %0" : "=r" (ds));
232 	return ds;
233 }
234 
235 static inline void
set_ds(uint16_t ds)236 set_ds(uint16_t ds)
237 {
238 	__asm__ volatile ("mov %0, %%ds" : : "r" (ds));
239 }
240 
241 static inline uint16_t
get_fs(void)242 get_fs(void)
243 {
244 	uint16_t fs;
245 	__asm__ volatile ("mov %%fs, %0" : "=r" (fs));
246 	return fs;
247 }
248 
249 static inline void
set_fs(uint16_t fs)250 set_fs(uint16_t fs)
251 {
252 	__asm__ volatile ("mov %0, %%fs" : : "r" (fs));
253 }
254 
255 static inline uint16_t
get_gs(void)256 get_gs(void)
257 {
258 	uint16_t gs;
259 	__asm__ volatile ("mov %%gs, %0" : "=r" (gs));
260 	return gs;
261 }
262 
263 static inline void
set_gs(uint16_t gs)264 set_gs(uint16_t gs)
265 {
266 	__asm__ volatile ("mov %0, %%gs" : : "r" (gs));
267 }
268 
269 static inline uint16_t
get_ss(void)270 get_ss(void)
271 {
272 	uint16_t ss;
273 	__asm__ volatile ("mov %%ss, %0" : "=r" (ss));
274 	return ss;
275 }
276 
277 static inline void
set_ss(uint16_t ss)278 set_ss(uint16_t ss)
279 {
280 	__asm__ volatile ("mov %0, %%ss" : : "r" (ss));
281 }
282 
283 static inline uintptr_t
get_cr0(void)284 get_cr0(void)
285 {
286 	uintptr_t cr0;
287 	__asm__ volatile ("mov %%cr0, %0" : "=r" (cr0));
288 	return cr0;
289 }
290 
291 static inline void
set_cr0(uintptr_t value)292 set_cr0(uintptr_t value)
293 {
294 	__asm__ volatile ("mov %0, %%cr0" : : "r" (value));
295 }
296 
297 static inline uintptr_t
get_cr2(void)298 get_cr2(void)
299 {
300 	uintptr_t cr2;
301 	__asm__ volatile ("mov %%cr2, %0" : "=r" (cr2));
302 	return cr2;
303 }
304 
305 static inline uintptr_t
get_cr3_raw(void)306 get_cr3_raw(void)
307 {
308 	uintptr_t cr3;
309 	__asm__ volatile ("mov %%cr3, %0" : "=r" (cr3));
310 	return cr3;
311 }
312 
313 static inline void
set_cr3_raw(uintptr_t value)314 set_cr3_raw(uintptr_t value)
315 {
316 	__asm__ volatile ("mov %0, %%cr3" : : "r" (value));
317 }
318 
319 static inline uintptr_t
get_cr3_base(void)320 get_cr3_base(void)
321 {
322 	uintptr_t cr3;
323 	__asm__ volatile ("mov %%cr3, %0" : "=r" (cr3));
324 	return cr3 & ~(0xFFFULL);
325 }
326 
327 static inline void
set_cr3_composed(uintptr_t base,uint16_t pcid,uint64_t preserve)328 set_cr3_composed(uintptr_t base, uint16_t pcid, uint64_t preserve)
329 {
330 	__asm__ volatile ("mov %0, %%cr3" : : "r" (base | pcid | ( (preserve) << 63) ));
331 }
332 
333 static inline uintptr_t
get_cr4(void)334 get_cr4(void)
335 {
336 	uintptr_t cr4;
337 	__asm__ volatile ("mov %%cr4, %0" : "=r" (cr4));
338 	return cr4;
339 }
340 
341 static inline void
set_cr4(uintptr_t value)342 set_cr4(uintptr_t value)
343 {
344 	__asm__ volatile ("mov %0, %%cr4" : : "r" (value));
345 }
346 
347 static inline uintptr_t
x86_get_flags(void)348 x86_get_flags(void)
349 {
350 	uintptr_t erflags;
351 	__asm__ volatile ("pushf; pop	%0"  :  "=r" (erflags));
352 	return erflags;
353 }
354 
355 static inline void
clear_ts(void)356 clear_ts(void)
357 {
358 	__asm__ volatile ("clts");
359 }
360 
361 static inline unsigned short
get_tr(void)362 get_tr(void)
363 {
364 	unsigned short seg;
365 	__asm__ volatile ("str %0" : "=rm" (seg));
366 	return seg;
367 }
368 
369 static inline void
set_tr(unsigned int seg)370 set_tr(unsigned int seg)
371 {
372 	__asm__ volatile ("ltr %0" : : "rm" ((unsigned short)(seg)));
373 }
374 
375 static inline unsigned short
sldt(void)376 sldt(void)
377 {
378 	unsigned short seg;
379 	__asm__ volatile ("sldt %0" : "=rm" (seg));
380 	return seg;
381 }
382 
383 static inline void
lldt(unsigned int seg)384 lldt(unsigned int seg)
385 {
386 	__asm__ volatile ("lldt %0" : : "rm" ((unsigned short)(seg)));
387 }
388 
389 static inline void
lgdt(uintptr_t * desc)390 lgdt(uintptr_t *desc)
391 {
392 	__asm__ volatile ("lgdt %0" : : "m" (*desc));
393 }
394 
395 static inline void
lidt(uintptr_t * desc)396 lidt(uintptr_t *desc)
397 {
398 	__asm__ volatile ("lidt %0" : : "m" (*desc));
399 }
400 
401 static inline void
swapgs(void)402 swapgs(void)
403 {
404 	__asm__ volatile ("swapgs");
405 }
406 
407 static inline void
hlt(void)408 hlt(void)
409 {
410 	__asm__ volatile ("hlt");
411 }
412 
413 #ifdef MACH_KERNEL_PRIVATE
414 
415 extern int rdmsr64_carefully(uint32_t msr, uint64_t *val);
416 extern int wrmsr64_carefully(uint32_t msr, uint64_t val);
417 #endif  /* MACH_KERNEL_PRIVATE */
418 
419 static inline void
wbinvd(void)420 wbinvd(void)
421 {
422 	__asm__ volatile ("wbinvd");
423 }
424 
425 static inline void
invlpg(uintptr_t addr)426 invlpg(uintptr_t addr)
427 {
428 	__asm__  volatile ("invlpg (%0)" :: "r" (addr) : "memory");
429 }
430 
431 static inline void
clac(void)432 clac(void)
433 {
434 	__asm__  volatile ("clac");
435 }
436 
437 static inline void
stac(void)438 stac(void)
439 {
440 	__asm__  volatile ("stac");
441 }
442 
443 /*
444  * Access to machine-specific registers (available on 586 and better only)
445  * Note: the rd* operations modify the parameters directly (without using
446  * pointer indirection), this allows gcc to optimize better
447  */
448 
449 #define rdmsr(msr, lo, hi) \
450 	__asm__ volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (msr))
451 
452 #define wrmsr(msr, lo, hi) \
453 	__asm__ volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi))
454 
455 #define rdtsc(lo, hi) \
456 	__asm__ volatile("lfence; rdtsc" : "=a" (lo), "=d" (hi))
457 
458 #define rdtsc_nofence(lo, hi) \
459 	__asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi))
460 
461 #define write_tsc(lo, hi) wrmsr(0x10, lo, hi)
462 
463 #define rdpmc(counter, lo, hi) \
464 	__asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter))
465 
466 #ifdef XNU_KERNEL_PRIVATE
467 
468 #define X86_MAX_LBRS    32
469 struct x86_lbr_record {
470 	/*
471 	 * Note that some CPUs convey extra info in the upper bits of the from/to fields,
472 	 * whereas others convey that information in the LBR_INFO companion MSRs.
473 	 * The proper info will be extracted based on the CPU family detected at runtime
474 	 * when LBR thread state is requested.
475 	 */
476 	uint64_t        from_rip;
477 	uint64_t        to_rip;
478 	uint64_t        info;
479 };
480 
481 typedef struct x86_lbrs {
482 	uint64_t                lbr_tos;
483 	struct x86_lbr_record   lbrs[X86_MAX_LBRS];
484 } x86_lbrs_t;
485 
486 
487 extern void do_mfence(void);
488 #define mfence() do_mfence()
489 #endif
490 
491 #ifdef __LP64__
492 static inline uint64_t
rdpmc64(uint32_t pmc)493 rdpmc64(uint32_t pmc)
494 {
495 	uint32_t lo = 0, hi = 0;
496 	rdpmc(pmc, lo, hi);
497 	return (((uint64_t)hi) << 32) | ((uint64_t)lo);
498 }
499 
500 static inline uint64_t
rdmsr64(uint32_t msr)501 rdmsr64(uint32_t msr)
502 {
503 	uint32_t lo = 0, hi = 0;
504 	rdmsr(msr, lo, hi);
505 	return (((uint64_t)hi) << 32) | ((uint64_t)lo);
506 }
507 
508 static inline void
wrmsr64(uint32_t msr,uint64_t val)509 wrmsr64(uint32_t msr, uint64_t val)
510 {
511 	wrmsr(msr, (val & 0xFFFFFFFFUL), ((val >> 32) & 0xFFFFFFFFUL));
512 }
513 
514 static inline uint64_t
rdtsc64(void)515 rdtsc64(void)
516 {
517 	uint64_t lo, hi;
518 	rdtsc(lo, hi);
519 	return ((hi) << 32) | (lo);
520 }
521 
522 static inline uint64_t
rdtsc64_nofence(void)523 rdtsc64_nofence(void)
524 {
525 	uint64_t lo, hi;
526 	rdtsc_nofence(lo, hi);
527 	return ((hi) << 32) | (lo);
528 }
529 
530 static inline uint64_t
rdtscp64(uint32_t * aux)531 rdtscp64(uint32_t *aux)
532 {
533 	uint64_t lo, hi;
534 	__asm__ volatile ("rdtscp; mov %%ecx, %1"
535                                           : "=a" (lo), "=d" (hi), "=m" (*aux)
536                                           :
537                                           : "ecx");
538 	return ((hi) << 32) | (lo);
539 }
540 #endif /* __LP64__ */
541 
542 /*
543  * rdmsr_carefully() returns 0 when the MSR has been read successfully,
544  * or non-zero (1) if the MSR does not exist.
545  * The implementation is in locore.s.
546  */
547 extern int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi);
548 __END_DECLS
549 
550 #endif  /* ASSEMBLER */
551 
552 #define MSR_IA32_P5_MC_ADDR                     0
553 #define MSR_IA32_P5_MC_TYPE                     1
554 #define MSR_IA32_PLATFORM_ID                    0x17
555 #define MSR_IA32_EBL_CR_POWERON                 0x2a
556 
557 #define MSR_IA32_APIC_BASE                      0x1b
558 #define     MSR_IA32_APIC_BASE_BSP                  (1<<8)
559 #define     MSR_IA32_APIC_BASE_EXTENDED             (1<<10)
560 #define     MSR_IA32_APIC_BASE_ENABLE               (1<<11)
561 #define     MSR_IA32_APIC_BASE_BASE                 (0xfffff<<12)
562 
563 #define MSR_CORE_THREAD_COUNT                   0x35
564 
565 #define MSR_IA32_FEATURE_CONTROL                0x3a
566 #define     MSR_IA32_FEATCTL_LOCK                   (1<<0)
567 #define     MSR_IA32_FEATCTL_VMXON_SMX              (1<<1)
568 #define     MSR_IA32_FEATCTL_VMXON                  (1<<2)
569 #define     MSR_IA32_FEATCTL_CSTATE_SMI             (1<<16)
570 
571 #define MSR_IA32_UPDT_TRIG                      0x79
572 #define MSR_IA32_BIOS_SIGN_ID                   0x8b
573 #define MSR_IA32_UCODE_WRITE                    MSR_IA32_UPDT_TRIG
574 #define MSR_IA32_UCODE_REV                      MSR_IA32_BIOS_SIGN_ID
575 
576 #define MSR_IA32_PERFCTR0                       0xc1
577 #define MSR_IA32_PERFCTR1                       0xc2
578 #define MSR_IA32_PERFCTR3                       0xc3
579 #define MSR_IA32_PERFCTR4                       0xc4
580 
581 #define MSR_PLATFORM_INFO                       0xce
582 
583 #define MSR_IA32_MPERF                          0xE7
584 #define MSR_IA32_APERF                          0xE8
585 
586 #define MSR_IA32_ARCH_CAPABILITIES              0x10a
587 #define         MSR_IA32_ARCH_CAPABILITIES_RDCL_NO      (1ULL << 0)
588 #define         MSR_IA32_ARCH_CAPABILITIES_IBRS_ALL     (1ULL << 1)
589 #define         MSR_IA32_ARCH_CAPABILITIES_RSBA         (1ULL << 2)
590 #define         MSR_IA32_ARCH_CAPABILITIES_L1DF_NO      (1ULL << 3)
591 #define         MSR_IA32_ARCH_CAPABILITIES_SSB_NO       (1ULL << 4)
592 #define         MSR_IA32_ARCH_CAPABILITIES_MDS_NO       (1ULL << 5)
593 #define         MSR_IA32_ARCH_CAPABILITIES_IFU_NO       (1ULL << 6)     /* This CPU is not susceptible to the instruction-fetch erratum */
594 #define         MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL     (1ULL << 7)     /* This CPU supports the TSX_CTRL MSR */
595 #define         MSR_IA32_ARCH_CAPABILITIES_TAA_NO       (1ULL << 8)     /* This CPU is not susceptible to TAA */
596 
597 #define MSR_IA32_TSX_FORCE_ABORT                0x10f
598 #define         MSR_IA32_TSXFA_RTM_FORCE_ABORT  (1ULL << 0)     /* Bit 0 */
599 
600 #define MSR_IA32_BBL_CR_CTL                     0x119
601 
602 #define MSR_IA32_TSX_CTRL                       0x122
603 #define         MSR_IA32_TSXCTRL_RTM_DISABLE            (1ULL << 0)     /* Bit 0 */
604 #define         MSR_IA32_TSXCTRL_TSX_CPU_CLEAR          (1ULL << 1)     /* Bit 1 */
605 
606 #define MSR_IA32_MCU_OPT_CTRL                   0x123
607 #define         MSR_IA32_MCUOPTCTRL_RNGDS_MITG_DIS      (1ULL << 0)     /* Bit 0 */
608 
609 #define MSR_IA32_SYSENTER_CS                    0x174
610 #define MSR_IA32_SYSENTER_ESP                   0x175
611 #define MSR_IA32_SYSENTER_EIP                   0x176
612 
613 #define MSR_IA32_MCG_CAP                        0x179
614 #define MSR_IA32_MCG_STATUS                     0x17a
615 #define MSR_IA32_MCG_CTL                        0x17b
616 
617 #define MSR_IA32_EVNTSEL0                       0x186
618 #define MSR_IA32_EVNTSEL1                       0x187
619 #define MSR_IA32_EVNTSEL2                       0x188
620 #define MSR_IA32_EVNTSEL3                       0x189
621 
622 #define MSR_FLEX_RATIO                          0x194
623 #define MSR_IA32_PERF_STS                       0x198
624 #define MSR_IA32_PERF_CTL                       0x199
625 #define MSR_IA32_CLOCK_MODULATION               0x19a
626 
627 #define MSR_IA32_MISC_ENABLE                    0x1a0
628 #define MSR_IA32_PACKAGE_THERM_STATUS           0x1b1
629 #define MSR_IA32_PACKAGE_THERM_INTERRUPT        0x1b2
630 
631 #define MSR_IA32_LBR_SELECT                     0x1c8
632 #define         LBR_SELECT_CPL_EQ_0             (1ULL)          /* R/W When set, do not capture branches ending in ring 0 */
633 #define         LBR_SELECT_CPL_NEQ_0            (1ULL << 1)     /* R/W When set, do not capture branches ending in ring >0 */
634 #define         LBR_SELECT_JCC                  (1ULL << 2)     /* R/W When set, do not capture conditional branches */
635 #define         LBR_SELECT_NEAR_REL_CALL        (1ULL << 3)     /* R/W When set, do not capture near relative calls */
636 #define         LBR_SELECT_NEAR_IND_CALL        (1ULL << 4)     /* R/W When set, do not capture near indirect calls */
637 #define         LBR_SELECT_NEAR_RET             (1ULL << 5)     /* R/W When set, do not capture near returns */
638 #define         LBR_SELECT_NEAR_IND_JMP         (1ULL << 6)     /* R/W When set, do not capture near indirect jumps except near indirect calls and near returns */
639 #define         LBR_SELECT_NEAR_REL_JMP         (1ULL << 7)     /* R/W When set, do not capture near relative jumps except near relative calls. */
640 #define         LBR_SELECT_FAR_BRANCH           (1ULL << 8)     /* R/W When set, do not capture far branches */
641 #define         LBR_SELECT_HSW_EN_CALLSTACK1    (1ULL << 9)     /* Enable LBR stack to use LIFO filtering to capture Call stack profile */
642 
643 #define MSR_IA32_LASTBRANCH_TOS                 0x1c9
644 
645 /* LBR INFO MSR fields (SKL and later) */
646 /* Same fields can be used for HSW in the FROM_x LBR MSRs */
647 #define MSR_IA32_LBRINFO_TSX_ABORT              (1ULL << 61)
648 #define MSR_IA32_LBRINFO_IN_TSX                 (1ULL << 62)
649 #define MSR_IA32_LBRINFO_MISPREDICT             (1ULL << 63)
650 #define MSR_IA32_LBRINFO_CYCLECNT_MASK          (0xFFFFULL)
651 
652 #define MSR_IA32_DEBUGCTLMSR                    0x1d9
653 #define         DEBUGCTL_LBR_ENA                (1U)
654 
655 #define MSR_IA32_LASTBRANCHFROMIP               0x1db
656 #define MSR_IA32_LASTBRANCHTOIP                 0x1dc
657 #define MSR_IA32_LASTINTFROMIP                  0x1dd
658 #define MSR_IA32_LASTINTTOIP                    0x1de
659 
660 #define MSR_IA32_CR_PAT                         0x277
661 
662 #define MSR_IA32_MTRRCAP                        0xfe
663 #define MSR_IA32_MTRR_DEF_TYPE                  0x2ff
664 #define MSR_IA32_MTRR_PHYSBASE(n)               (0x200 + 2*(n))
665 #define MSR_IA32_MTRR_PHYSMASK(n)               (0x200 + 2*(n) + 1)
666 #define MSR_IA32_MTRR_FIX64K_00000              0x250
667 #define MSR_IA32_MTRR_FIX16K_80000              0x258
668 #define MSR_IA32_MTRR_FIX16K_A0000              0x259
669 #define MSR_IA32_MTRR_FIX4K_C0000               0x268
670 #define MSR_IA32_MTRR_FIX4K_C8000               0x269
671 #define MSR_IA32_MTRR_FIX4K_D0000               0x26a
672 #define MSR_IA32_MTRR_FIX4K_D8000               0x26b
673 #define MSR_IA32_MTRR_FIX4K_E0000               0x26c
674 #define MSR_IA32_MTRR_FIX4K_E8000               0x26d
675 #define MSR_IA32_MTRR_FIX4K_F0000               0x26e
676 #define MSR_IA32_MTRR_FIX4K_F8000               0x26f
677 
678 #define MSR_IA32_PERF_FIXED_CTR0                0x309
679 
680 #define MSR_IA32_PERF_CAPABILITIES              0x345
681 #define         PERFCAP_LBR_FMT_MASK            (0x3f)
682 #define PERFCAP_LBR_TYPE(msrval) ((msrval) & PERFCAP_LBR_FMT_MASK)
683 #define PERFCAP_LBR_TYPE_MISPRED                3       /* NHM */
684 #define PERFCAP_LBR_TYPE_TSXINFO                4       /* HSW/BDW */
685 #define PERFCAP_LBR_TYPE_EIP_WITH_LBRINFO       5       /* SKL+ */
686 /* Types 6 & 7 are for Goldmont and Goldmont Plus, respectively */
687 
688 #define LBR_TYPE_MISPRED_FROMRIP(from_rip)      (((from_rip) & 0xFFFFFFFFFFFFULL) | (((from_rip) & (1ULL << 47)) ? 0xFFFF000000000000ULL : 0))
689 #define LBR_TYPE_MISPRED_MISPREDICT(from_rip)   (((from_rip) & MSR_IA32_LBRINFO_MISPREDICT) ? 1 : 0)
690 
691 #define LBR_TYPE_TSXINFO_FROMRIP(from_rip)      (LBR_TYPE_MISPRED_FROMRIP(from_rip))
692 #define LBR_TYPE_TSXINFO_MISPREDICT(from_rip)   (((from_rip) & MSR_IA32_LBRINFO_MISPREDICT) ? 1 : 0)
693 #define LBR_TYPE_TSXINFO_TSX_ABORT(from_rip)    (((from_rip) & MSR_IA32_LBRINFO_TSX_ABORT) ? 1 : 0)
694 #define LBR_TYPE_TSXINFO_IN_TSX(from_rip)       (((from_rip) & MSR_IA32_LBRINFO_IN_TSX) ? 1 : 0)
695 
696 #define LBR_TYPE_EIP_WITH_LBRINFO_MISPREDICT(lbrinfo)   LBR_TYPE_TSXINFO_MISPREDICT(lbrinfo)
697 #define LBR_TYPE_EIP_WITH_LBRINFO_TSX_ABORT(lbrinfo)    LBR_TYPE_TSXINFO_TSX_ABORT(lbrinfo)
698 #define LBR_TYPE_EIP_WITH_LBRINFO_IN_TSX(lbrinfo)       LBR_TYPE_TSXINFO_IN_TSX(lbrinfo)
699 #define LBR_TYPE_EIP_WITH_LBRINFO_CYC_COUNT(lbrinfo)    ((lbrinfo) & 0xFFFFULL)
700 
701 
702 #define MSR_IA32_PERF_FIXED_CTR_CTRL            0x38D
703 #define MSR_IA32_PERF_GLOBAL_STATUS             0x38E
704 #define MSR_IA32_PERF_GLOBAL_CTRL               0x38F
705 #define MSR_IA32_PERF_GLOBAL_OVF_CTRL   0x390
706 
707 #define MSR_IA32_PKG_C3_RESIDENCY               0x3F8
708 #define MSR_IA32_PKG_C6_RESIDENCY               0x3F9
709 #define MSR_IA32_PKG_C7_RESIDENCY               0x3FA
710 
711 #define MSR_IA32_CORE_C3_RESIDENCY              0x3FC
712 #define MSR_IA32_CORE_C6_RESIDENCY              0x3FD
713 #define MSR_IA32_CORE_C7_RESIDENCY              0x3FE
714 
715 #define MSR_IA32_MC0_CTL                        0x400
716 #define MSR_IA32_MC0_STATUS                     0x401
717 #define MSR_IA32_MC0_ADDR                       0x402
718 #define MSR_IA32_MC0_MISC                       0x403
719 
720 #define MSR_IA32_VMX_BASE                                       0x480
721 #define MSR_IA32_VMX_BASIC                                      MSR_IA32_VMX_BASE
722 #define MSR_IA32_VMX_PINBASED_CTLS                      MSR_IA32_VMX_BASE+1
723 #define MSR_IA32_VMX_PROCBASED_CTLS                     MSR_IA32_VMX_BASE+2
724 #define MSR_IA32_VMX_EXIT_CTLS                          MSR_IA32_VMX_BASE+3
725 #define MSR_IA32_VMX_ENTRY_CTLS                         MSR_IA32_VMX_BASE+4
726 #define MSR_IA32_VMX_MISC                                       MSR_IA32_VMX_BASE+5
727 #define MSR_IA32_VMX_CR0_FIXED0                         MSR_IA32_VMX_BASE+6
728 #define MSR_IA32_VMX_CR0_FIXED1                         MSR_IA32_VMX_BASE+7
729 #define MSR_IA32_VMX_CR4_FIXED0                         MSR_IA32_VMX_BASE+8
730 #define MSR_IA32_VMX_CR4_FIXED1                         MSR_IA32_VMX_BASE+9
731 #define MSR_IA32_VMX_VMCS_ENUM                          MSR_IA32_VMX_BASE+10
732 #define MSR_IA32_VMX_PROCBASED_CTLS2            MSR_IA32_VMX_BASE+11
733 #define MSR_IA32_VMX_EPT_VPID_CAP                       MSR_IA32_VMX_BASE+12
734 #define         MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT      21
735 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS         MSR_IA32_VMX_BASE+13
736 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS        MSR_IA32_VMX_BASE+14
737 #define MSR_IA32_VMX_TRUE_VMEXIT_CTLS           MSR_IA32_VMX_BASE+15
738 #define MSR_IA32_VMX_TRUE_VMENTRY_CTLS          MSR_IA32_VMX_BASE+16
739 #define MSR_IA32_VMX_VMFUNC                                     MSR_IA32_VMX_BASE+17
740 
741 #define MSR_IA32_DS_AREA                        0x600
742 
743 #define MSR_IA32_PKG_POWER_SKU_UNIT             0x606
744 #define MSR_IA32_PKG_C2_RESIDENCY               0x60D
745 #define MSR_IA32_PKG_ENERGY_STATUS              0x611
746 #define MSR_IA32_DDR_ENERGY_STATUS              0x619
747 #define MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER    0x61D
748 #define MSR_IA32_RING_PERF_STATUS               0x621
749 
750 #define MSR_IA32_PKG_C8_RESIDENCY               0x630
751 #define MSR_IA32_PKG_C9_RESIDENCY               0x631
752 #define MSR_IA32_PKG_C10_RESIDENCY              0x632
753 
754 #define MSR_IA32_PP0_ENERGY_STATUS              0x639
755 #define MSR_IA32_PP1_ENERGY_STATUS              0x641
756 #define MSR_IA32_IA_PERF_LIMIT_REASONS_SKL      0x64F
757 
758 #define MSR_IA32_IA_PERF_LIMIT_REASONS          0x690
759 #define MSR_IA32_GT_PERF_LIMIT_REASONS          0x6B0
760 
761 #define MSR_IA32_TSC_DEADLINE                   0x6e0
762 
763 #define MSR_IA32_EFER                           0xC0000080
764 #define     MSR_IA32_EFER_SCE                       0x00000001
765 #define     MSR_IA32_EFER_LME                       0x00000100
766 #define     MSR_IA32_EFER_LMA                       0x00000400
767 #define     MSR_IA32_EFER_NXE                       0x00000800
768 
769 #define MSR_IA32_STAR                           0xC0000081
770 #define MSR_IA32_LSTAR                          0xC0000082
771 #define MSR_IA32_CSTAR                          0xC0000083
772 #define MSR_IA32_FMASK                          0xC0000084
773 
774 #define MSR_IA32_FS_BASE                        0xC0000100
775 #define MSR_IA32_GS_BASE                        0xC0000101
776 #define MSR_IA32_KERNEL_GS_BASE                 0xC0000102
777 #define MSR_IA32_TSC_AUX                        0xC0000103
778 
779 #define HV_VMX_EPTP_MEMORY_TYPE_UC              0x0
780 #define HV_VMX_EPTP_MEMORY_TYPE_WB              0x6
781 #define HV_VMX_EPTP_WALK_LENGTH(wl)             (0ULL | ((((wl) - 1) & 0x7) << 3))
782 #define HV_VMX_EPTP_ENABLE_AD_FLAGS             (1ULL << 6)
783 
784 #endif  /* _I386_PROC_REG_H_ */
785