xref: /kvm-unit-tests/x86/eventinj.c (revision b006d7eb9c64ed1046041c4eb3c4077be11d8a3d)
1 #include "libcflat.h"
2 #include "processor.h"
3 #include "vm.h"
4 #include "desc.h"
5 #include "isr.h"
6 #include "apic.h"
7 #include "apic-defs.h"
8 
9 #ifdef __x86_64__
10 #  define R "r"
11 #else
12 #  define R "e"
13 #endif
14 
15 static inline void io_delay(void)
16 {
17 }
18 
19 static inline void outl(int addr, int val)
20 {
21         asm volatile ("outl %1, %w0" : : "d" (addr), "a" (val));
22 }
23 
24 void apic_self_ipi(u8 v)
25 {
26 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
27 		       APIC_INT_ASSERT | v, 0);
28 }
29 
30 void apic_self_nmi(void)
31 {
32 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
33 }
34 
35 #define flush_phys_addr(__s) outl(0xe4, __s)
36 #define flush_stack() do {						\
37 		int __l;						\
38 		flush_phys_addr(virt_to_phys(&__l));			\
39 	} while (0)
40 
41 extern char isr_iret_ip[];
42 
43 static void flush_idt_page()
44 {
45 	struct descriptor_table_ptr ptr;
46 	sidt(&ptr);
47 	flush_phys_addr(virt_to_phys((void*)ptr.base));
48 }
49 
50 static volatile unsigned int test_divider;
51 static volatile int test_count;
52 
53 ulong stack_phys;
54 void *stack_va;
55 
56 void do_pf_tss(void)
57 {
58 	printf("PF running\n");
59 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
60 		    stack_phys | PTE_PRESENT | PTE_WRITE, 0);
61 	invlpg(stack_va);
62 }
63 
64 extern void pf_tss(void);
65 
66 asm ("pf_tss: \n\t"
67 #ifdef __x86_64__
68         // no task on x86_64, save/restore caller-save regs
69         "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n"
70         "push %r8; push %r9; push %r10; push %r11\n"
71 #endif
72         "call do_pf_tss \n\t"
73 #ifdef __x86_64__
74         "pop %r11; pop %r10; pop %r9; pop %r8\n"
75         "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n"
76 #endif
77         "add $"S", %"R "sp\n\t"	// discard error code
78         "iret"W" \n\t"
79         "jmp pf_tss\n\t"
80     );
81 
82 
83 #ifndef __x86_64__
84 static void of_isr(struct ex_regs *r)
85 {
86 	printf("OF isr running\n");
87 	test_count++;
88 }
89 #endif
90 
91 static void np_isr(struct ex_regs *r)
92 {
93 	printf("NP isr running %lx err=%lx\n", r->rip, r->error_code);
94 	set_idt_sel(33, read_cs());
95 	test_count++;
96 }
97 
98 static void de_isr(struct ex_regs *r)
99 {
100 	printf("DE isr running divider is %d\n", test_divider);
101 	test_divider = 10;
102 }
103 
104 static void bp_isr(struct ex_regs *r)
105 {
106 	printf("BP isr running\n");
107 	test_count++;
108 }
109 
110 static void nested_nmi_isr(struct ex_regs *r)
111 {
112 	printf("Nested NMI isr running rip=%lx\n", r->rip);
113 
114 	if (r->rip != (ulong)&isr_iret_ip)
115 		test_count++;
116 }
117 static void nmi_isr(struct ex_regs *r)
118 {
119 	printf("NMI isr running %p\n", &isr_iret_ip);
120 	test_count++;
121 	handle_exception(2, nested_nmi_isr);
122 	printf("Sending nested NMI to self\n");
123 	apic_self_nmi();
124 	io_delay();
125 	printf("After nested NMI to self\n");
126 }
127 
128 unsigned long *iret_stack;
129 
130 static void nested_nmi_iret_isr(struct ex_regs *r)
131 {
132 	printf("Nested NMI isr running rip=%lx\n", r->rip);
133 
134 	if (r->rip == iret_stack[-3])
135 		test_count++;
136 }
137 
138 extern void do_iret(ulong phys_stack, void *virt_stack);
139 
140 // Return to same privilege level won't pop SS or SP, so
141 // save it in RDX while we run on the nested stack
142 
143 asm("do_iret:"
144 #ifdef __x86_64__
145 	"mov %rdi, %rax \n\t"		// phys_stack
146 	"mov %rsi, %rdx \n\t"		// virt_stack
147 #else
148 	"mov 4(%esp), %eax \n\t"	// phys_stack
149 	"mov 8(%esp), %edx \n\t"	// virt_stack
150 #endif
151 	"xchg %"R "dx, %"R "sp \n\t"	// point to new stack
152 	"pushf"W" \n\t"
153 	"mov %cs, %ecx \n\t"
154 	"push"W" %"R "cx \n\t"
155 	"push"W" $1f \n\t"
156 	"outl %eax, $0xe4 \n\t"		// flush page
157 	"iret"W" \n\t"
158 	"1: xchg %"R "dx, %"R "sp \n\t"	// point to old stack
159 	"ret\n\t"
160    );
161 
162 static void nmi_iret_isr(struct ex_regs *r)
163 {
164 	unsigned long *s = alloc_page();
165 	test_count++;
166 	printf("NMI isr running stack %p\n", s);
167 	handle_exception(2, nested_nmi_iret_isr);
168 	printf("Sending nested NMI to self\n");
169 	apic_self_nmi();
170 	printf("After nested NMI to self\n");
171 	iret_stack = &s[128];
172 	do_iret(virt_to_phys(s), iret_stack);
173 	printf("After iret\n");
174 }
175 
176 static void tirq0(isr_regs_t *r)
177 {
178 	printf("irq0 running\n");
179 	if (test_count != 0)
180 		test_count++;
181 	eoi();
182 }
183 
184 static void tirq1(isr_regs_t *r)
185 {
186 	printf("irq1 running\n");
187 	test_count++;
188 	eoi();
189 }
190 
191 ulong saved_stack;
192 
193 #define switch_stack(S) do {						\
194 		asm volatile ("mov %%" R "sp, %0":"=r"(saved_stack));	\
195 		asm volatile ("mov %0, %%" R "sp"::"r"(S));		\
196 	} while(0)
197 
198 #define restore_stack() do {						\
199 		asm volatile ("mov %0, %%" R "sp"::"r"(saved_stack));	\
200 	} while(0)
201 
202 int main()
203 {
204 	unsigned int res;
205 	ulong *pt, *cr3, i;
206 
207 	setup_vm();
208 	setup_idt();
209 	setup_alt_stack();
210 
211 	handle_irq(32, tirq0);
212 	handle_irq(33, tirq1);
213 
214 	/* generate HW exception that will fault on IDT and stack */
215 	handle_exception(0, de_isr);
216 	printf("Try to divide by 0\n");
217 	flush_idt_page();
218 	flush_stack();
219 	asm volatile ("divl %3": "=a"(res)
220 		      : "d"(0), "a"(1500), "m"(test_divider));
221 	printf("Result is %d\n", res);
222 	report("DE exception", res == 150);
223 
224 	/* generate soft exception (BP) that will fault on IDT and stack */
225 	test_count = 0;
226 	handle_exception(3, bp_isr);
227 	printf("Try int 3\n");
228 	flush_idt_page();
229 	flush_stack();
230 	asm volatile ("int $3");
231 	printf("After int 3\n");
232 	report("BP exception", test_count == 1);
233 
234 #ifndef __x86_64__
235 	/* generate soft exception (OF) that will fault on IDT */
236 	test_count = 0;
237 	handle_exception(4, of_isr);
238 	flush_idt_page();
239 	printf("Try into\n");
240 	asm volatile ("addb $127, %b0\ninto"::"a"(127));
241 	printf("After into\n");
242 	report("OF exception", test_count == 1);
243 
244 	/* generate soft exception (OF) using two bit instruction that will
245 	   fault on IDT */
246 	test_count = 0;
247 	handle_exception(4, of_isr);
248 	flush_idt_page();
249 	printf("Try into\n");
250 	asm volatile ("addb $127, %b0\naddr16 into"::"a"(127));
251 	printf("After into\n");
252 	report("2 byte OF exception", test_count == 1);
253 #endif
254 
255 	/* generate HW interrupt that will fault on IDT */
256 	test_count = 0;
257 	flush_idt_page();
258 	printf("Sending vec 33 to self\n");
259 	irq_enable();
260 	apic_self_ipi(33);
261 	io_delay();
262 	irq_disable();
263 	printf("After vec 33 to self\n");
264 	report("vec 33", test_count == 1);
265 
266 	/* generate soft interrupt that will fault on IDT and stack */
267 	test_count = 0;
268 	flush_idt_page();
269 	printf("Try int $33\n");
270 	flush_stack();
271 	asm volatile ("int $33");
272 	printf("After int $33\n");
273 	report("int $33", test_count == 1);
274 
275 	/* Inject two HW interrupt than open iterrupt windows. Both interrupt
276 	   will fault on IDT access */
277 	test_count = 0;
278 	flush_idt_page();
279 	printf("Sending vec 32 and 33 to self\n");
280 	apic_self_ipi(32);
281 	apic_self_ipi(33);
282 	io_delay();
283 	irq_enable();
284 	asm volatile("nop");
285 	irq_disable();
286 	printf("After vec 32 and 33 to self\n");
287 	report("vec 32/33", test_count == 2);
288 
289 
290 	/* Inject HW interrupt, do sti and than (while in irq shadow) inject
291 	   soft interrupt. Fault during soft interrupt. Soft interrup shoud be
292 	   handled before HW interrupt */
293 	test_count = 0;
294 	flush_idt_page();
295 	printf("Sending vec 32 and int $33\n");
296 	apic_self_ipi(32);
297 	flush_stack();
298 	io_delay();
299 	irq_enable();
300 	asm volatile ("int $33");
301 	irq_disable();
302 	printf("After vec 32 and int $33\n");
303 	report("vec 32/int $33", test_count == 2);
304 
305 	/* test that TPR is honored */
306 	test_count = 0;
307 	handle_irq(62, tirq1);
308 	flush_idt_page();
309 	printf("Sending vec 33 and 62 and mask one with TPR\n");
310 	apic_write(APIC_TASKPRI, 0xf << 4);
311 	irq_enable();
312 	apic_self_ipi(32);
313 	apic_self_ipi(62);
314 	io_delay();
315 	apic_write(APIC_TASKPRI, 0x2 << 4);
316 	printf("After 33/62 TPR test\n");
317 	report("TPR", test_count == 1);
318 	apic_write(APIC_TASKPRI, 0x0);
319 	while(test_count != 2); /* wait for second irq */
320 	irq_disable();
321 
322 	/* test fault durint NP delivery */
323 	printf("Before NP test\n");
324 	test_count = 0;
325 	handle_exception(11, np_isr);
326 	set_idt_sel(33, NP_SEL);
327 	flush_idt_page();
328 	flush_stack();
329 	asm volatile ("int $33");
330 	printf("After int33\n");
331 	report("NP exception", test_count == 2);
332 
333 	/* generate NMI that will fault on IDT */
334 	test_count = 0;
335 	handle_exception(2, nmi_isr);
336 	flush_idt_page();
337 	printf("Sending NMI to self\n");
338 	apic_self_nmi();
339 	printf("After NMI to self\n");
340 	/* this is needed on VMX without NMI window notification.
341 	   Interrupt windows is used instead, so let pending NMI
342 	   to be injected */
343 	irq_enable();
344 	asm volatile ("nop");
345 	irq_disable();
346 	report("NMI", test_count == 2);
347 
348 	/* generate NMI that will fault on IRET */
349 	printf("Before NMI IRET test\n");
350 	test_count = 0;
351 	handle_exception(2, nmi_iret_isr);
352 	printf("Sending NMI to self\n");
353 	apic_self_nmi();
354 	/* this is needed on VMX without NMI window notification.
355 	   Interrupt windows is used instead, so let pending NMI
356 	   to be injected */
357 	irq_enable();
358 	asm volatile ("nop");
359 	irq_disable();
360 	printf("After NMI to self\n");
361 	report("NMI", test_count == 2);
362 	stack_phys = (ulong)virt_to_phys(alloc_page());
363 	stack_va = alloc_vpage();
364 
365 	/* Generate DE and PF exceptions serially */
366 	test_divider = 0;
367 	set_intr_alt_stack(14, pf_tss);
368 	handle_exception(0, de_isr);
369 	printf("Try to divide by 0\n");
370 	/* install read only pte */
371 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
372 		    stack_phys | PTE_PRESENT, 0);
373 	invlpg(stack_va);
374 	flush_phys_addr(stack_phys);
375 	switch_stack(stack_va + 4095);
376 	flush_idt_page();
377 	asm volatile ("divl %3": "=a"(res)
378 		      : "d"(0), "a"(1500), "m"(test_divider));
379 	restore_stack();
380 	printf("Result is %d\n", res);
381 	report("DE PF exceptions", res == 150);
382 
383 	/* Generate NP and PF exceptions serially */
384 	printf("Before NP test\n");
385 	test_count = 0;
386 	set_intr_alt_stack(14, pf_tss);
387 	handle_exception(11, np_isr);
388 	set_idt_sel(33, NP_SEL);
389 	/* install read only pte */
390 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
391 		    stack_phys | PTE_PRESENT, 0);
392 	invlpg(stack_va);
393 	flush_idt_page();
394 	flush_phys_addr(stack_phys);
395 	switch_stack(stack_va + 4095);
396 	asm volatile ("int $33");
397 	restore_stack();
398 	printf("After int33\n");
399 	report("NP PF exceptions", test_count == 2);
400 
401 	pt = alloc_page();
402 	cr3 = (void*)read_cr3();
403 	memset(pt, 0, 4096);
404 	/* use shadowed stack during interrupt delivery */
405 	for (i = 0; i < 4096/sizeof(ulong); i++) {
406 		if (!cr3[i]) {
407 			cr3[i] = virt_to_phys(pt) | PTE_PRESENT | PTE_WRITE;
408 			pt[0] = virt_to_phys(pt) | PTE_PRESENT | PTE_WRITE;
409 #ifndef __x86_64__
410 			((ulong*)(i<<22))[1] = 0;
411 #else
412 			((ulong*)(i<<39))[1] = 0;
413 #endif
414 			write_cr3(virt_to_phys(cr3));
415 			break;
416 		}
417 	}
418 	test_count = 0;
419 	printf("Try int 33 with shadowed stack\n");
420 	switch_stack(((char*)pt) + 4095);
421 	asm volatile("int $33");
422 	restore_stack();
423 	printf("After int 33 with shadowed stack\n");
424 	report("int 33 with shadowed stack", test_count == 1);
425 
426 	return report_summary();
427 }
428