xref: /xnu-8020.140.41/tests/hvtest_x86_asm.s (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1#include <machine/asm.h>
2
3	.text
4
5	.balign 0x1000
6
7	.global _hvtest_begin
8_hvtest_begin:
9
10	/*
11	 * Everything between _hvtest_begin and _hvtest_end will be copied for
12	 * tests that don't use the page faulting of the test harness.
13	 * You can put constants here.
14	 */
15
16.code64
17
18	.balign 16
19
20	.global _save_restore_regs_entry
21_save_restore_regs_entry:
22
23    pushq %rax
24    pushq %rcx
25
26    xor %rcx, %rcx
27
28    pushq %rbx
29
30
31    /*
32     * For all registers to test, each of these blocks:
33     * 1. increments rcx (to keep track in case of test failure),
34     * 2. checks the register's value against a (constant) template
35     * 3. flips all bits for the VMM to later verify that the changes value is available.
36     *
37     * For a second pass, bits are all flipped back to their original state after
38     * the vmcall.
39     */
40
41
42    // segment registers (pass 1)
43
44    incq %rcx
45    movq $0x1010, %rax
46    movq %ds, %rbx
47    cmpq %rbx, %rax
48    jne .foul
49    movq $1, %rbx
50    movq %rbx, %ds
51
52    incq %rcx
53    movq $0x2020, %rax
54    movq %es, %rbx
55    cmpq %rbx, %rax
56    jne .foul
57    movq $2, %rbx
58    movq %rbx, %es
59
60    incq %rcx
61    movq $0x3030, %rax
62    movq %fs, %rbx
63    cmpq %rbx, %rax
64    jne .foul
65    movq $3, %rbx
66    movq %rbx, %fs
67
68    incq %rcx
69    movq $0x4040, %rax
70    movq %gs, %rbx
71    cmpq %rbx, %rax
72    jne .foul
73    movq $1, %rbx
74    movq %rbx, %gs
75
76    popq %rbx
77
78    jmp .pass
79
80.pass2:
81    pushq %rax
82    pushq %rcx
83
84    xor %rcx, %rcx
85
86    pushq %rbx
87
88    // segment registers (pass 2)
89
90    incq %rcx
91    movq $0x1, %rax
92    movq %ds, %rbx
93    cmpq %rbx, %rax
94    jne .foul
95    movq $1, %rbx
96    movq %rbx, %ds
97
98    incq %rcx
99    movq $0x2, %rax
100    movq %es, %rbx
101    cmpq %rbx, %rax
102    jne .foul
103    movq $2, %rbx
104    movq %rbx, %es
105
106    incq %rcx
107    movq $0x3, %rax
108    movq %fs, %rbx
109    cmpq %rbx, %rax
110    jne .foul
111    movq $3, %rbx
112    movq %rbx, %fs
113
114    incq %rcx
115    movq $0x1, %rax
116    movq %gs, %rbx
117    cmpq %rbx, %rax
118    jne .foul
119    movq $1, %rbx
120    movq %rbx, %gs
121
122    popq %rbx
123
124.pass:
125    // general purpose registers
126
127    incq %rcx
128    movq $0x0101010101010101, %rax
129    cmpq 8(%rsp), %rax // %rax on stack
130    jne .foul
131    notq 8(%rsp)
132
133    incq %rcx
134    movq $0x0202020202020202, %rax
135    cmpq %rbx, %rax
136    jne .foul
137    notq %rbx
138
139    incq %rcx
140    movq $0x0303030303030303, %rax
141    cmpq (%rsp), %rax // %rcx on stack
142    jne .foul
143    notq (%rsp)
144
145    incq %rcx
146    movq $0x0404040404040404, %rax
147    cmpq %rdx, %rax
148    jne .foul
149    notq %rdx
150
151    incq %rcx
152    movq $0x0505050505050505, %rax
153    cmpq %rsi, %rax
154    jne .foul
155    notq %rsi
156
157    incq %rcx
158    movq $0x0606060606060606, %rax
159    cmpq %rdi, %rax
160    jne .foul
161    notq %rdi
162
163    incq %rcx
164    movq $0x0707070707070707, %rax
165    cmpq %rbp, %rax
166    jne .foul
167    notq %rbp
168
169    incq %rcx
170    movq $0x0808080808080808, %rax
171    cmpq %r8, %rax
172    jne .foul
173    notq %r8
174
175    incq %rcx
176    movq $0x0909090909090909, %rax
177    cmpq %r9, %rax
178    jne .foul
179    notq %r9
180
181    incq %rcx
182    movq $0x0a0a0a0a0a0a0a0a, %rax
183    cmpq %r10, %rax
184    jne .foul
185    notq %r10
186
187    incq %rcx
188    movq $0x0b0b0b0b0b0b0b0b, %rax
189    cmpq %r11, %rax
190    jne .foul
191    notq %r11
192
193    incq %rcx
194    movq $0x0c0c0c0c0c0c0c0c, %rax
195    cmpq %r12, %rax
196    jne .foul
197    notq %r12
198
199    incq %rcx
200    movq $0x0d0d0d0d0d0d0d0d, %rax
201    cmpq %r13, %rax
202    jne .foul
203    notq %r13
204
205    incq %rcx
206    movq $0x0e0e0e0e0e0e0e0e, %rax
207    cmpq %r14, %rax
208    jne .foul
209    notq %r14
210
211    incq %rcx
212    movq $0x0f0f0f0f0f0f0f0f, %rax
213    cmpq %r15, %rax
214    jne .foul
215    notq %r15
216
217    popq %rcx
218    movq (%rsp), %rax
219    vmcall
220
221    notq %rax
222    notq %rbx
223    notq %rcx
224    notq %rdx
225    notq %rsi
226    notq %rdi
227    notq %rbp
228    notq %r8
229    notq %r9
230    notq %r10
231    notq %r11
232    notq %r12
233    notq %r13
234    notq %r14
235    notq %r15
236
237    jmp .pass2
238
239.foul:
240    movq %rcx, %rax
241    vmcall
242
243	.global _save_restore_debug_regs_entry
244_save_restore_debug_regs_entry:
245
246    pushq %rax
247    xor %rcx, %rcx
248
249    /*
250     * For all registers to test, each of these blocks:
251     * 1. increments rcx (to keep track in case of test failure),
252     * 2. checks the register's value against a (constant) template
253     * 3. flips all bits for the VMM to later verify that the changes value is available.
254     *
255     * For a second pass, bits are all flipped back to their original state after
256     * the vmcall.
257     */
258
259    incq %rcx
260    movq $0x1111111111111111, %rbx
261    movq %dr0, %rax
262    cmpq %rbx, %rax
263    jne .foul
264    notq %rbx
265    movq %rbx, %dr0
266
267    movq $0xEEEEEEEEEEEEEEEE, %rbx
268    movq %dr0, %rax
269    cmpq %rbx, %rax
270    jne .foul
271
272    incq %rcx
273    movq $0x2222222222222222, %rbx
274    movq %dr1, %rax
275    cmpq %rbx, %rax
276    jne .foul
277    notq %rbx
278    movq %rbx, %dr1
279
280    incq %rcx
281    movq $0x3333333333333333, %rbx
282    movq %dr2, %rax
283    cmpq %rbx, %rax
284    jne .foul
285    notq %rbx
286    movq %rbx, %dr2
287
288    incq %rcx
289    movq $0x4444444444444444, %rbx
290    movq %dr3, %rax
291    cmpq %rbx, %rax
292    jne .foul
293    notq %rbx
294    movq %rbx, %dr3
295
296    /*
297     * flip only defined bits for debug status and control registers
298     * (and also don't flip General Detect Enable, as the next access
299     * to any debug register would generate an exception)
300     */
301
302    incq %rcx
303    movq $0x5555555555555555, %rbx
304    mov $0xffff0ff0, %rax
305    orq %rax, %rbx
306    movq $0xffffefff, %rax
307    andq %rax, %rbx
308    movq %dr6, %rax
309    cmpq %rbx, %rax
310    jne .foul
311    notq %rbx
312    mov $0xffff0ff0, %rax
313    orq %rax, %rbx
314    movq $0xffffefff, %rax
315    andq %rax, %rbx
316    movq %rbx, %dr6
317
318    incq %rcx
319    movq $0x5555555555555555, %rbx
320    orq $0x400, %rbx
321    movq $0xffff0fff, %rax
322    andq %rax, %rbx
323    movq %dr7, %rax
324    cmpq %rbx, %rax
325    jne .foul
326    notq %rbx
327    orq $0x400, %rbx
328    movq $0xffff0fff, %rax
329    andq %rax, %rbx
330    movq %rbx, %dr7
331
332    popq %rax
333    notq %rax
334    vmcall
335
336    pushq %rax
337
338    inc %rcx
339    movq $0xEEEEEEEEEEEEEEEE, %rbx
340    movq %dr0, %rax
341    cmpq %rbx, %rax
342    jne .foul
343
344    movq %dr0, %rbx
345    notq %rbx
346    movq %rbx, %dr0
347
348    movq %dr1, %rbx
349    notq %rbx
350    movq %rbx, %dr1
351
352    movq %dr2, %rbx
353    notq %rbx
354    movq %rbx, %dr2
355
356    movq %dr3, %rbx
357    notq %rbx
358    movq %rbx, %dr3
359
360    movq %dr6, %rbx
361    notq %rbx
362    mov $0xffff0ff0, %rax
363    orq %rax, %rbx
364    movq $0xffffefff, %rax
365    andq %rax, %rbx
366    movq %rbx, %dr6
367
368    movq %dr7, %rbx
369    notq %rbx
370    orq $0x400, %rbx
371    movq $0xffff0fff, %rax
372    andq %rax, %rbx
373    movq %rbx, %dr7
374
375    popq %rax
376
377    jmp _save_restore_debug_regs_entry // 2nd pass
378
379.code32
380
381	.global _simple_protected_mode_vcpu_entry
382_simple_protected_mode_vcpu_entry:
383
384    movl $0x23456, %eax
385    vmcall
386
387.code16
388
389	.global _simple_real_mode_vcpu_entry
390_simple_real_mode_vcpu_entry:
391
392    movl $0x23456, %eax
393    vmcall
394
395.code32
396
397	.global _radar61961809_entry
398_radar61961809_entry:
399
400	mov		$0x99999999, %ebx	// sentinel address, see _radar61961809_loop64
401
402	mov		$0xc0000080,%ecx	// IA32_EFER
403	rdmsr
404	or		$0x100,%eax			// .LME
405	wrmsr
406
407	vmcall
408
409	mov		%cr0,%ecx
410	or		$0x80000000,%ecx	// CR0.PG
411	mov		%ecx,%cr0
412
413	// first (%edi) 6 bytes are _radar61961809_prepare far ptr
414	ljmp	*(%edi)
415
416.code32
417
418	.global _radar61961809_prepare
419_radar61961809_prepare:
420
421	/*
422	 * We switched into long mode, now immediately out, and the test
423	 * will switch back in.
424	 *
425	 * This is done to suppress (legitimate) EPT and Page Fault exits.
426	 * Until CR0.PG is enabled (which is what effectively activates
427	 * long mode), the page tables are never looked at. Right after
428	 * setting PG, that changes immediately, effecting transparently
429	 * handled EPT violations. Additionally, the far jump that
430	 * would be necessary to switch into a 64bit code segment would
431	 * also cause EPT violations and PFs when fetching the segment
432	 * descriptor from the GDT.
433	 *
434	 * By first jumping into a 32bit code segment after enabling PG
435	 * once, we "warm up" both EPT and (harness managed) page tables,
436	 * so the next exit after the far jump will most likely be an
437	 * IRQ exit, most faithfully reproducing the problem.
438	 */
439
440	mov		%cr0,%ecx
441	and		$~0x80000000,%ecx
442	mov		%ecx,%cr0
443
444	mov		$0x1111, %eax
445	vmcall
446
447	// This is where the actual test really starts.
448	mov		%cr0,%ecx
449	or		$0x80000000,%ecx
450	mov		%ecx,%cr0	// enable PG => long mode
451
452	xor		%ecx, %ecx
453
454	add		$8,%edi
455	ljmp	*(%edi)		// _radar61961809_loop64
456
457.code64
458
459	.global _radar61961809_loop64
460_radar61961809_loop64:
4611:
462	// as 16bit code, this instruction will be:
463	//   add %al,(%bx,%si)
464	// and cause an obvious EPT violation (%bx is 0x9999)
465	mov		$0x1,%ebp
466
467	// loop long enough for a good chance to an IRQ exit
468	dec		%ecx
469	jnz		1b
470
471	// if we reach here, we stayed in long mode.
472	mov		$0x2222, %eax
473	vmcall
474
475	.global _radar60691363_entry
476_radar60691363_entry:
477	movq $0x800, %rsi // VMCS_GUEST_ES
478	vmreadq %rsi, %rax
479	vmcall
480	movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC
481	vmreadq %rsi, %rax
482	vmcall
483	movq $0x6402, %rsi // VMCS_RO_IO_RCX
484	vmreadq %rsi, %rax
485	vmcall
486
487	movq $0x800, %rsi // VMCS_GUEST_ES
488	movq $0x9191, %rax
489	vmwriteq %rax, %rsi
490	movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC
491	movq $0x9898, %rax
492	vmwriteq %rax, %rsi
493	movq $0x6402, %rsi // VMCS_RO_IO_RCX
494	movq $0x7979, %rax
495	vmwriteq %rax, %rsi
496
497	movq $0x4567, %rax
498
499	vmcall
500
501.code16
502
503	// Perform a fixed number of port I/Os with various arguments.
504	.global _pio_entry
505_pio_entry:
506
507	movl	$0xaa, %eax
508
509	outl	%eax, $0xab
510
511	movl	$3, %ecx
5121:	outb	%al, $0xab
513	loop	1b
514
515	movl	$10, %ecx
5161:	outb	%al, $0xcd
517	loop	1b
518
519	movl	$10, %ecx
5201:	outb	%al, $0xef
521	loop	1b
522
523	movl	$0x23456, %eax
524	vmcall
525
526.code16
527	// Perform 10 port I/Os on 0xef.
528	.global _pio_entry_basic
529_pio_entry_basic:
530
531	movl	$10, %ecx
5321:	outb	%al, $0xef
533	loop	1b
534
535	movl	$0x23456, %eax
536	vmcall
537
538	.global _hvtest_end
539_hvtest_end:
540