xref: /kvm-unit-tests/x86/eventinj.c (revision 06846df56370af39fb4c9cfd71c032197133929b)
1 #include "libcflat.h"
2 #include "vm.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "isr.h"
6 #include "apic.h"
7 #include "apic-defs.h"
8 #include "vmalloc.h"
9 #include "alloc_page.h"
10 
11 #ifdef __x86_64__
12 #  define R "r"
13 #else
14 #  define R "e"
15 #endif
16 
17 void do_pf_tss(void);
18 
19 static inline void io_delay(void)
20 {
21 }
22 
23 static void apic_self_ipi(u8 v)
24 {
25 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
26 		       APIC_INT_ASSERT | v, 0);
27 }
28 
29 static void apic_self_nmi(void)
30 {
31 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
32 }
33 
34 #define flush_phys_addr(__s) outl(__s, 0xe4)
35 #define flush_stack() do {						\
36 		int __l;						\
37 		flush_phys_addr(virt_to_phys(&__l));			\
38 	} while (0)
39 
40 extern char isr_iret_ip[];
41 
42 static void flush_idt_page(void)
43 {
44 	struct descriptor_table_ptr ptr;
45 	sidt(&ptr);
46 	flush_phys_addr(virt_to_phys((void*)ptr.base));
47 }
48 
49 static volatile unsigned int test_divider;
50 static volatile int test_count;
51 
52 ulong stack_phys;
53 void *stack_va;
54 
55 void do_pf_tss(void)
56 {
57 	printf("PF running\n");
58 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
59 		    stack_phys | PT_PRESENT_MASK | PT_WRITABLE_MASK, 0);
60 	invlpg(stack_va);
61 }
62 
63 extern void pf_tss(void);
64 
65 asm ("pf_tss: \n\t"
66 #ifdef __x86_64__
67         // no task on x86_64, save/restore caller-save regs
68         "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n"
69         "push %r8; push %r9; push %r10; push %r11\n"
70 #endif
71         "call do_pf_tss \n\t"
72 #ifdef __x86_64__
73         "pop %r11; pop %r10; pop %r9; pop %r8\n"
74         "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n"
75 #endif
76         "add $"S", %"R "sp\n\t"	// discard error code
77         "iret"W" \n\t"
78         "jmp pf_tss\n\t"
79     );
80 
81 
82 #ifndef __x86_64__
83 static void of_isr(struct ex_regs *r)
84 {
85 	printf("OF isr running\n");
86 	test_count++;
87 }
88 #endif
89 
90 static void np_isr(struct ex_regs *r)
91 {
92 	printf("NP isr running %lx err=%lx\n", r->rip, r->error_code);
93 	set_idt_sel(33, read_cs());
94 	test_count++;
95 }
96 
97 static void de_isr(struct ex_regs *r)
98 {
99 	printf("DE isr running divider is %d\n", test_divider);
100 	test_divider = 10;
101 }
102 
103 static void bp_isr(struct ex_regs *r)
104 {
105 	printf("BP isr running\n");
106 	test_count++;
107 }
108 
109 static void nested_nmi_isr(struct ex_regs *r)
110 {
111 	printf("Nested NMI isr running rip=%lx\n", r->rip);
112 
113 	if (r->rip != (ulong)&isr_iret_ip)
114 		test_count++;
115 }
116 static void nmi_isr(struct ex_regs *r)
117 {
118 	printf("NMI isr running %p\n", &isr_iret_ip);
119 	test_count++;
120 	handle_exception(2, nested_nmi_isr);
121 	printf("Sending nested NMI to self\n");
122 	apic_self_nmi();
123 	io_delay();
124 	printf("After nested NMI to self\n");
125 }
126 
127 unsigned long *iret_stack;
128 
129 static void nested_nmi_iret_isr(struct ex_regs *r)
130 {
131 	printf("Nested NMI isr running rip=%lx\n", r->rip);
132 
133 	if (r->rip == iret_stack[-3])
134 		test_count++;
135 }
136 
137 extern void do_iret(ulong phys_stack, void *virt_stack);
138 
139 // Return to same privilege level won't pop SS or SP, so
140 // save it in RDX while we run on the nested stack
141 
142 asm("do_iret:"
143 #ifdef __x86_64__
144 	"mov %rdi, %rax \n\t"		// phys_stack
145 	"mov %rsi, %rdx \n\t"		// virt_stack
146 #else
147 	"mov 4(%esp), %eax \n\t"	// phys_stack
148 	"mov 8(%esp), %edx \n\t"	// virt_stack
149 #endif
150 	"xchg %"R "dx, %"R "sp \n\t"	// point to new stack
151 	"pushf"W" \n\t"
152 	"mov %cs, %ecx \n\t"
153 	"push"W" %"R "cx \n\t"
154 	"push"W" $1f \n\t"
155 	"outl %eax, $0xe4 \n\t"		// flush page
156 	"iret"W" \n\t"
157 	"1: xchg %"R "dx, %"R "sp \n\t"	// point to old stack
158 	"ret\n\t"
159    );
160 
161 static void nmi_iret_isr(struct ex_regs *r)
162 {
163 	unsigned long *s = alloc_page();
164 	test_count++;
165 	printf("NMI isr running stack %p\n", s);
166 	handle_exception(2, nested_nmi_iret_isr);
167 	printf("Sending nested NMI to self\n");
168 	apic_self_nmi();
169 	printf("After nested NMI to self\n");
170 	iret_stack = &s[128];
171 	do_iret(virt_to_phys(s), iret_stack);
172 	printf("After iret\n");
173 }
174 
175 static void tirq0(isr_regs_t *r)
176 {
177 	printf("irq0 running\n");
178 	if (test_count == 1)
179 		test_count++;
180 	eoi();
181 }
182 
183 static void tirq1(isr_regs_t *r)
184 {
185 	printf("irq1 running\n");
186 	test_count++;
187 	eoi();
188 }
189 
190 ulong saved_stack;
191 
192 #define switch_stack(S) do {						\
193 		asm volatile ("mov %%" R "sp, %0":"=r"(saved_stack));	\
194 		asm volatile ("mov %0, %%" R "sp"::"r"(S));		\
195 	} while(0)
196 
197 #define restore_stack() do {						\
198 		asm volatile ("mov %0, %%" R "sp"::"r"(saved_stack));	\
199 	} while(0)
200 
201 int main(void)
202 {
203 	unsigned int res;
204 	ulong *pt, *cr3, i;
205 
206 	setup_vm();
207 	setup_idt();
208 	setup_alt_stack();
209 
210 	handle_irq(32, tirq0);
211 	handle_irq(33, tirq1);
212 
213 	/* generate HW exception that will fault on IDT and stack */
214 	handle_exception(0, de_isr);
215 	printf("Try to divide by 0\n");
216 	flush_idt_page();
217 	flush_stack();
218 	asm volatile ("divl %3": "=a"(res)
219 		      : "d"(0), "a"(1500), "m"(test_divider));
220 	printf("Result is %d\n", res);
221 	report("DE exception", res == 150);
222 
223 	/* generate soft exception (BP) that will fault on IDT and stack */
224 	test_count = 0;
225 	handle_exception(3, bp_isr);
226 	printf("Try int 3\n");
227 	flush_idt_page();
228 	flush_stack();
229 	asm volatile ("int $3");
230 	printf("After int 3\n");
231 	report("BP exception", test_count == 1);
232 
233 #ifndef __x86_64__
234 	/* generate soft exception (OF) that will fault on IDT */
235 	test_count = 0;
236 	handle_exception(4, of_isr);
237 	flush_idt_page();
238 	printf("Try into\n");
239 	asm volatile ("addb $127, %b0\ninto"::"a"(127));
240 	printf("After into\n");
241 	report("OF exception", test_count == 1);
242 
243 	/* generate soft exception (OF) using two bit instruction that will
244 	   fault on IDT */
245 	test_count = 0;
246 	handle_exception(4, of_isr);
247 	flush_idt_page();
248 	printf("Try into\n");
249 	asm volatile ("addb $127, %b0\naddr16 into"::"a"(127));
250 	printf("After into\n");
251 	report("2 byte OF exception", test_count == 1);
252 #endif
253 
254 	/* generate HW interrupt that will fault on IDT */
255 	test_count = 0;
256 	flush_idt_page();
257 	printf("Sending vec 33 to self\n");
258 	irq_enable();
259 	apic_self_ipi(33);
260 	io_delay();
261 	irq_disable();
262 	printf("After vec 33 to self\n");
263 	report("vec 33", test_count == 1);
264 
265 	/* generate soft interrupt that will fault on IDT and stack */
266 	test_count = 0;
267 	flush_idt_page();
268 	printf("Try int $33\n");
269 	flush_stack();
270 	asm volatile ("int $33");
271 	printf("After int $33\n");
272 	report("int $33", test_count == 1);
273 
274 	/* Inject two HW interrupt than open iterrupt windows. Both interrupt
275 	   will fault on IDT access */
276 	test_count = 0;
277 	flush_idt_page();
278 	printf("Sending vec 32 and 33 to self\n");
279 	apic_self_ipi(32);
280 	apic_self_ipi(33);
281 	io_delay();
282 	irq_enable();
283 	asm volatile("nop");
284 	irq_disable();
285 	printf("After vec 32 and 33 to self\n");
286 	report("vec 32/33", test_count == 2);
287 
288 
289 	/* Inject HW interrupt, do sti and than (while in irq shadow) inject
290 	   soft interrupt. Fault during soft interrupt. Soft interrup shoud be
291 	   handled before HW interrupt */
292 	test_count = 0;
293 	flush_idt_page();
294 	printf("Sending vec 32 and int $33\n");
295 	apic_self_ipi(32);
296 	flush_stack();
297 	io_delay();
298 	asm volatile ("sti; int $33");
299 	irq_disable();
300 	printf("After vec 32 and int $33\n");
301 	report("vec 32/int $33", test_count == 2);
302 
303 	/* test that TPR is honored */
304 	test_count = 0;
305 	handle_irq(62, tirq1);
306 	flush_idt_page();
307 	printf("Sending vec 33 and 62 and mask one with TPR\n");
308 	apic_write(APIC_TASKPRI, 0xf << 4);
309 	irq_enable();
310 	apic_self_ipi(32);
311 	apic_self_ipi(62);
312 	io_delay();
313 	apic_write(APIC_TASKPRI, 0x2 << 4);
314 	printf("After 33/62 TPR test\n");
315 	report("TPR", test_count == 1);
316 	apic_write(APIC_TASKPRI, 0x0);
317 	while(test_count != 2); /* wait for second irq */
318 	irq_disable();
319 
320 	/* test fault durint NP delivery */
321 	printf("Before NP test\n");
322 	test_count = 0;
323 	handle_exception(11, np_isr);
324 	set_idt_sel(33, NP_SEL);
325 	flush_idt_page();
326 	flush_stack();
327 	asm volatile ("int $33");
328 	printf("After int33\n");
329 	report("NP exception", test_count == 2);
330 
331 	/* generate NMI that will fault on IDT */
332 	test_count = 0;
333 	handle_exception(2, nmi_isr);
334 	flush_idt_page();
335 	printf("Sending NMI to self\n");
336 	apic_self_nmi();
337 	printf("After NMI to self\n");
338 	/* this is needed on VMX without NMI window notification.
339 	   Interrupt windows is used instead, so let pending NMI
340 	   to be injected */
341 	irq_enable();
342 	asm volatile ("nop");
343 	irq_disable();
344 	report("NMI", test_count == 2);
345 
346 	/* generate NMI that will fault on IRET */
347 	printf("Before NMI IRET test\n");
348 	test_count = 0;
349 	handle_exception(2, nmi_iret_isr);
350 	printf("Sending NMI to self\n");
351 	apic_self_nmi();
352 	/* this is needed on VMX without NMI window notification.
353 	   Interrupt windows is used instead, so let pending NMI
354 	   to be injected */
355 	irq_enable();
356 	asm volatile ("nop");
357 	irq_disable();
358 	printf("After NMI to self\n");
359 	report("NMI", test_count == 2);
360 	stack_phys = (ulong)virt_to_phys(alloc_page());
361 	stack_va = alloc_vpage();
362 
363 	/* Generate DE and PF exceptions serially */
364 	test_divider = 0;
365 	set_intr_alt_stack(14, pf_tss);
366 	handle_exception(0, de_isr);
367 	printf("Try to divide by 0\n");
368 	/* install read only pte */
369 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
370 		    stack_phys | PT_PRESENT_MASK, 0);
371 	invlpg(stack_va);
372 	flush_phys_addr(stack_phys);
373 	switch_stack(stack_va + 4095);
374 	flush_idt_page();
375 	asm volatile ("divl %3": "=a"(res)
376 		      : "d"(0), "a"(1500), "m"(test_divider));
377 	restore_stack();
378 	printf("Result is %d\n", res);
379 	report("DE PF exceptions", res == 150);
380 
381 	/* Generate NP and PF exceptions serially */
382 	printf("Before NP test\n");
383 	test_count = 0;
384 	set_intr_alt_stack(14, pf_tss);
385 	handle_exception(11, np_isr);
386 	set_idt_sel(33, NP_SEL);
387 	/* install read only pte */
388 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
389 		    stack_phys | PT_PRESENT_MASK, 0);
390 	invlpg(stack_va);
391 	flush_idt_page();
392 	flush_phys_addr(stack_phys);
393 	switch_stack(stack_va + 4095);
394 	asm volatile ("int $33");
395 	restore_stack();
396 	printf("After int33\n");
397 	report("NP PF exceptions", test_count == 2);
398 
399 	pt = alloc_page();
400 	cr3 = (void*)read_cr3();
401 	memset(pt, 0, 4096);
402 	/* use shadowed stack during interrupt delivery */
403 	for (i = 0; i < 4096/sizeof(ulong); i++) {
404 		if (!cr3[i]) {
405 			cr3[i] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK;
406 			pt[0] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK;
407 #ifndef __x86_64__
408 			((ulong*)(i<<22))[1] = 0;
409 #else
410 			((ulong*)(i<<39))[1] = 0;
411 #endif
412 			write_cr3(virt_to_phys(cr3));
413 			break;
414 		}
415 	}
416 	test_count = 0;
417 	printf("Try int 33 with shadowed stack\n");
418 	switch_stack(((char*)pt) + 4095);
419 	asm volatile("int $33");
420 	restore_stack();
421 	printf("After int 33 with shadowed stack\n");
422 	report("int 33 with shadowed stack", test_count == 1);
423 
424 	return report_summary();
425 }
426