xref: /kvm-unit-tests/x86/eventinj.c (revision 7fd49c4a8871a6424e3da4e4260a3cfbf7a9441b)
1 #include "libcflat.h"
2 #include "vm.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "isr.h"
6 #include "apic.h"
7 #include "apic-defs.h"
8 #include "vmalloc.h"
9 #include "alloc_page.h"
10 #include "delay.h"
11 
12 #ifdef __x86_64__
13 #  define R "r"
14 #else
15 #  define R "e"
16 #endif
17 
18 void do_pf_tss(void);
19 
20 static void apic_self_ipi(u8 v)
21 {
22 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
23 		       APIC_INT_ASSERT | v, 0);
24 }
25 
26 static void apic_self_nmi(void)
27 {
28 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
29 }
30 
31 #define flush_phys_addr(__s) outl(__s, 0xe4)
32 #define flush_stack() do {						\
33 		int __l;						\
34 		flush_phys_addr(virt_to_phys(&__l));			\
35 	} while (0)
36 
37 extern char isr_iret_ip[];
38 
39 static void flush_idt_page(void)
40 {
41 	struct descriptor_table_ptr ptr;
42 	sidt(&ptr);
43 	flush_phys_addr(virt_to_phys((void*)ptr.base));
44 }
45 
46 static volatile unsigned int test_divider;
47 static volatile int test_count;
48 
49 ulong stack_phys;
50 void *stack_va;
51 
52 void do_pf_tss(void)
53 {
54 	printf("PF running\n");
55 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
56 		    stack_phys | PT_PRESENT_MASK | PT_WRITABLE_MASK, 0);
57 	invlpg(stack_va);
58 }
59 
60 extern void pf_tss(void);
61 
62 asm ("pf_tss: \n\t"
63 #ifdef __x86_64__
64         // no task on x86_64, save/restore caller-save regs
65         "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n"
66         "push %r8; push %r9; push %r10; push %r11\n"
67 #endif
68         "call do_pf_tss \n\t"
69 #ifdef __x86_64__
70         "pop %r11; pop %r10; pop %r9; pop %r8\n"
71         "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n"
72 #endif
73         "add $"S", %"R "sp\n\t"	// discard error code
74         "iret"W" \n\t"
75         "jmp pf_tss\n\t"
76     );
77 
78 
79 #ifndef __x86_64__
80 static void of_isr(struct ex_regs *r)
81 {
82 	printf("OF isr running\n");
83 	test_count++;
84 }
85 #endif
86 
87 static void np_isr(struct ex_regs *r)
88 {
89 	printf("NP isr running %lx err=%lx\n", r->rip, r->error_code);
90 	set_idt_sel(33, read_cs());
91 	test_count++;
92 }
93 
94 static void de_isr(struct ex_regs *r)
95 {
96 	printf("DE isr running divider is %d\n", test_divider);
97 	test_divider = 10;
98 }
99 
100 static void bp_isr(struct ex_regs *r)
101 {
102 	printf("BP isr running\n");
103 	test_count++;
104 }
105 
106 static void nested_nmi_isr(struct ex_regs *r)
107 {
108 	printf("Nested NMI isr running rip=%lx\n", r->rip);
109 
110 	if (r->rip != (ulong)&isr_iret_ip)
111 		test_count++;
112 }
113 static void nmi_isr(struct ex_regs *r)
114 {
115 	printf("NMI isr running %p\n", &isr_iret_ip);
116 	test_count++;
117 	handle_exception(2, nested_nmi_isr);
118 	printf("Sending nested NMI to self\n");
119 	apic_self_nmi();
120 	io_delay();
121 	printf("After nested NMI to self\n");
122 }
123 
124 unsigned long *iret_stack;
125 
126 static void nested_nmi_iret_isr(struct ex_regs *r)
127 {
128 	printf("Nested NMI isr running rip=%lx\n", r->rip);
129 
130 	if (r->rip == iret_stack[-3])
131 		test_count++;
132 }
133 
134 extern void do_iret(ulong phys_stack, void *virt_stack);
135 
136 // Return to same privilege level won't pop SS or SP, so
137 // save it in RDX while we run on the nested stack
138 
139 asm("do_iret:"
140 #ifdef __x86_64__
141 	"mov %rdi, %rax \n\t"		// phys_stack
142 	"mov %rsi, %rdx \n\t"		// virt_stack
143 #else
144 	"mov 4(%esp), %eax \n\t"	// phys_stack
145 	"mov 8(%esp), %edx \n\t"	// virt_stack
146 #endif
147 	"xchg %"R "dx, %"R "sp \n\t"	// point to new stack
148 	"pushf"W" \n\t"
149 	"mov %cs, %ecx \n\t"
150 	"push"W" %"R "cx \n\t"
151 	"push"W" $1f \n\t"
152 	"outl %eax, $0xe4 \n\t"		// flush page
153 	"iret"W" \n\t"
154 	"1: xchg %"R "dx, %"R "sp \n\t"	// point to old stack
155 	"ret\n\t"
156    );
157 
158 static void nmi_iret_isr(struct ex_regs *r)
159 {
160 	unsigned long *s = alloc_page();
161 	test_count++;
162 	printf("NMI isr running stack %p\n", s);
163 	handle_exception(2, nested_nmi_iret_isr);
164 	printf("Sending nested NMI to self\n");
165 	apic_self_nmi();
166 	printf("After nested NMI to self\n");
167 	iret_stack = &s[128];
168 	do_iret(virt_to_phys(s), iret_stack);
169 	printf("After iret\n");
170 }
171 
172 static void tirq0(isr_regs_t *r)
173 {
174 	printf("irq0 running\n");
175 	if (test_count == 1)
176 		test_count++;
177 	eoi();
178 }
179 
180 static void tirq1(isr_regs_t *r)
181 {
182 	printf("irq1 running\n");
183 	test_count++;
184 	eoi();
185 }
186 
187 ulong saved_stack;
188 
189 #define switch_stack(S) do {						\
190 		asm volatile ("mov %%" R "sp, %0":"=r"(saved_stack));	\
191 		asm volatile ("mov %0, %%" R "sp"::"r"(S));		\
192 	} while(0)
193 
194 #define restore_stack() do {						\
195 		asm volatile ("mov %0, %%" R "sp"::"r"(saved_stack));	\
196 	} while(0)
197 
198 int main(void)
199 {
200 	unsigned int res;
201 	ulong *pt, *cr3, i;
202 
203 	setup_vm();
204 	setup_idt();
205 	setup_alt_stack();
206 
207 	handle_irq(32, tirq0);
208 	handle_irq(33, tirq1);
209 
210 	/* generate HW exception that will fault on IDT and stack */
211 	handle_exception(0, de_isr);
212 	printf("Try to divide by 0\n");
213 	flush_idt_page();
214 	flush_stack();
215 	asm volatile ("divl %3": "=a"(res)
216 		      : "d"(0), "a"(1500), "m"(test_divider));
217 	printf("Result is %d\n", res);
218 	report("DE exception", res == 150);
219 
220 	/* generate soft exception (BP) that will fault on IDT and stack */
221 	test_count = 0;
222 	handle_exception(3, bp_isr);
223 	printf("Try int 3\n");
224 	flush_idt_page();
225 	flush_stack();
226 	asm volatile ("int $3");
227 	printf("After int 3\n");
228 	report("BP exception", test_count == 1);
229 
230 #ifndef __x86_64__
231 	/* generate soft exception (OF) that will fault on IDT */
232 	test_count = 0;
233 	handle_exception(4, of_isr);
234 	flush_idt_page();
235 	printf("Try into\n");
236 	asm volatile ("addb $127, %b0\ninto"::"a"(127));
237 	printf("After into\n");
238 	report("OF exception", test_count == 1);
239 
240 	/* generate soft exception (OF) using two bit instruction that will
241 	   fault on IDT */
242 	test_count = 0;
243 	handle_exception(4, of_isr);
244 	flush_idt_page();
245 	printf("Try into\n");
246 	asm volatile ("addb $127, %b0\naddr16 into"::"a"(127));
247 	printf("After into\n");
248 	report("2 byte OF exception", test_count == 1);
249 #endif
250 
251 	/* generate HW interrupt that will fault on IDT */
252 	test_count = 0;
253 	flush_idt_page();
254 	printf("Sending vec 33 to self\n");
255 	irq_enable();
256 	apic_self_ipi(33);
257 	io_delay();
258 	irq_disable();
259 	printf("After vec 33 to self\n");
260 	report("vec 33", test_count == 1);
261 
262 	/* generate soft interrupt that will fault on IDT and stack */
263 	test_count = 0;
264 	flush_idt_page();
265 	printf("Try int $33\n");
266 	flush_stack();
267 	asm volatile ("int $33");
268 	printf("After int $33\n");
269 	report("int $33", test_count == 1);
270 
271 	/* Inject two HW interrupt than open iterrupt windows. Both interrupt
272 	   will fault on IDT access */
273 	test_count = 0;
274 	flush_idt_page();
275 	printf("Sending vec 32 and 33 to self\n");
276 	apic_self_ipi(32);
277 	apic_self_ipi(33);
278 	io_delay();
279 	irq_enable();
280 	asm volatile("nop");
281 	irq_disable();
282 	printf("After vec 32 and 33 to self\n");
283 	report("vec 32/33", test_count == 2);
284 
285 
286 	/* Inject HW interrupt, do sti and than (while in irq shadow) inject
287 	   soft interrupt. Fault during soft interrupt. Soft interrup shoud be
288 	   handled before HW interrupt */
289 	test_count = 0;
290 	flush_idt_page();
291 	printf("Sending vec 32 and int $33\n");
292 	apic_self_ipi(32);
293 	flush_stack();
294 	io_delay();
295 	asm volatile ("sti; int $33");
296 	irq_disable();
297 	printf("After vec 32 and int $33\n");
298 	report("vec 32/int $33", test_count == 2);
299 
300 	/* test that TPR is honored */
301 	test_count = 0;
302 	handle_irq(62, tirq1);
303 	flush_idt_page();
304 	printf("Sending vec 33 and 62 and mask one with TPR\n");
305 	apic_write(APIC_TASKPRI, 0xf << 4);
306 	irq_enable();
307 	apic_self_ipi(32);
308 	apic_self_ipi(62);
309 	io_delay();
310 	apic_write(APIC_TASKPRI, 0x2 << 4);
311 	printf("After 33/62 TPR test\n");
312 	report("TPR", test_count == 1);
313 	apic_write(APIC_TASKPRI, 0x0);
314 	while(test_count != 2); /* wait for second irq */
315 	irq_disable();
316 
317 	/* test fault durint NP delivery */
318 	printf("Before NP test\n");
319 	test_count = 0;
320 	handle_exception(11, np_isr);
321 	set_idt_sel(33, NP_SEL);
322 	flush_idt_page();
323 	flush_stack();
324 	asm volatile ("int $33");
325 	printf("After int33\n");
326 	report("NP exception", test_count == 2);
327 
328 	/* generate NMI that will fault on IDT */
329 	test_count = 0;
330 	handle_exception(2, nmi_isr);
331 	flush_idt_page();
332 	printf("Sending NMI to self\n");
333 	apic_self_nmi();
334 	printf("After NMI to self\n");
335 	/* this is needed on VMX without NMI window notification.
336 	   Interrupt windows is used instead, so let pending NMI
337 	   to be injected */
338 	irq_enable();
339 	asm volatile ("nop");
340 	irq_disable();
341 	report("NMI", test_count == 2);
342 
343 	/* generate NMI that will fault on IRET */
344 	printf("Before NMI IRET test\n");
345 	test_count = 0;
346 	handle_exception(2, nmi_iret_isr);
347 	printf("Sending NMI to self\n");
348 	apic_self_nmi();
349 	/* this is needed on VMX without NMI window notification.
350 	   Interrupt windows is used instead, so let pending NMI
351 	   to be injected */
352 	irq_enable();
353 	asm volatile ("nop");
354 	irq_disable();
355 	printf("After NMI to self\n");
356 	report("NMI", test_count == 2);
357 	stack_phys = (ulong)virt_to_phys(alloc_page());
358 	stack_va = alloc_vpage();
359 
360 	/* Generate DE and PF exceptions serially */
361 	test_divider = 0;
362 	set_intr_alt_stack(14, pf_tss);
363 	handle_exception(0, de_isr);
364 	printf("Try to divide by 0\n");
365 	/* install read only pte */
366 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
367 		    stack_phys | PT_PRESENT_MASK, 0);
368 	invlpg(stack_va);
369 	flush_phys_addr(stack_phys);
370 	switch_stack(stack_va + 4095);
371 	flush_idt_page();
372 	asm volatile ("divl %3": "=a"(res)
373 		      : "d"(0), "a"(1500), "m"(test_divider));
374 	restore_stack();
375 	printf("Result is %d\n", res);
376 	report("DE PF exceptions", res == 150);
377 
378 	/* Generate NP and PF exceptions serially */
379 	printf("Before NP test\n");
380 	test_count = 0;
381 	set_intr_alt_stack(14, pf_tss);
382 	handle_exception(11, np_isr);
383 	set_idt_sel(33, NP_SEL);
384 	/* install read only pte */
385 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
386 		    stack_phys | PT_PRESENT_MASK, 0);
387 	invlpg(stack_va);
388 	flush_idt_page();
389 	flush_phys_addr(stack_phys);
390 	switch_stack(stack_va + 4095);
391 	asm volatile ("int $33");
392 	restore_stack();
393 	printf("After int33\n");
394 	report("NP PF exceptions", test_count == 2);
395 
396 	pt = alloc_page();
397 	cr3 = (void*)read_cr3();
398 	/* use shadowed stack during interrupt delivery */
399 	for (i = 0; i < 4096/sizeof(ulong); i++) {
400 		if (!cr3[i]) {
401 			cr3[i] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK;
402 			pt[0] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK;
403 #ifndef __x86_64__
404 			((ulong*)(i<<22))[1] = 0;
405 #else
406 			((ulong*)(i<<39))[1] = 0;
407 #endif
408 			write_cr3(virt_to_phys(cr3));
409 			break;
410 		}
411 	}
412 	test_count = 0;
413 	printf("Try int 33 with shadowed stack\n");
414 	switch_stack(((char*)pt) + 4095);
415 	asm volatile("int $33");
416 	restore_stack();
417 	printf("After int 33 with shadowed stack\n");
418 	report("int 33 with shadowed stack", test_count == 1);
419 
420 	return report_summary();
421 }
422