xref: /kvm-unit-tests/x86/eventinj.c (revision a322d4c597bb7a4de7985e7b51b80504f7e4fdda)
1 #include "libcflat.h"
2 #include "processor.h"
3 #include "vm.h"
4 #include "desc.h"
5 #include "isr.h"
6 #include "apic.h"
7 #include "apic-defs.h"
8 
9 #ifdef __x86_64__
10 #  define R "r"
11 #else
12 #  define R "e"
13 #endif
14 
15 static inline void io_delay(void)
16 {
17 }
18 
19 static inline void outl(int addr, int val)
20 {
21         asm volatile ("outl %1, %w0" : : "d" (addr), "a" (val));
22 }
23 
24 void apic_self_ipi(u8 v)
25 {
26 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
27 		       APIC_INT_ASSERT | v, 0);
28 }
29 
30 void apic_self_nmi(void)
31 {
32 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
33 }
34 
35 static void eoi(void)
36 {
37     apic_write(APIC_EOI, 0);
38 }
39 
40 #define flush_phys_addr(__s) outl(0xe4, __s)
41 #define flush_stack() do {						\
42 		int __l;						\
43 		flush_phys_addr(virt_to_phys(&__l));			\
44 	} while (0)
45 
46 extern char isr_iret_ip[];
47 
48 static void flush_idt_page()
49 {
50 	struct descriptor_table_ptr ptr;
51 	sidt(&ptr);
52 	flush_phys_addr(virt_to_phys((void*)ptr.base));
53 }
54 
55 static volatile unsigned int test_divider;
56 static volatile int test_count;
57 
58 ulong stack_phys;
59 void *stack_va;
60 
61 void do_pf_tss(void)
62 {
63 	printf("PF running\n");
64 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
65 		    stack_phys | PTE_PRESENT | PTE_WRITE, 0);
66 	invlpg(stack_va);
67 }
68 
69 extern void pf_tss(void);
70 
71 asm ("pf_tss: \n\t"
72 #ifdef __x86_64__
73         // no task on x86_64, save/restore caller-save regs
74         "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n"
75         "push %r8; push %r9; push %r10; push %r11\n"
76 #endif
77         "call do_pf_tss \n\t"
78 #ifdef __x86_64__
79         "pop %r11; pop %r10; pop %r9; pop %r8\n"
80         "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n"
81 #endif
82         "add $"S", %"R "sp\n\t"	// discard error code
83         "iret"W" \n\t"
84         "jmp pf_tss\n\t"
85     );
86 
87 
88 #ifndef __x86_64__
89 static void of_isr(struct ex_regs *r)
90 {
91 	printf("OF isr running\n");
92 	test_count++;
93 }
94 #endif
95 
96 static void np_isr(struct ex_regs *r)
97 {
98 	printf("NP isr running %x err=%x\n", r->rip, r->error_code);
99 	set_idt_sel(33, read_cs());
100 	test_count++;
101 }
102 
103 static void de_isr(struct ex_regs *r)
104 {
105 	printf("DE isr running divider is %d\n", test_divider);
106 	test_divider = 10;
107 }
108 
109 static void bp_isr(struct ex_regs *r)
110 {
111 	printf("BP isr running\n");
112 	test_count++;
113 }
114 
115 static void nested_nmi_isr(struct ex_regs *r)
116 {
117 	printf("Nested NMI isr running rip=%x\n", r->rip);
118 
119 	if (r->rip != (ulong)&isr_iret_ip)
120 		test_count++;
121 }
122 static void nmi_isr(struct ex_regs *r)
123 {
124 	printf("NMI isr running %x\n", &isr_iret_ip);
125 	test_count++;
126 	handle_exception(2, nested_nmi_isr);
127 	printf("Sending nested NMI to self\n");
128 	apic_self_nmi();
129 	io_delay();
130 	printf("After nested NMI to self\n");
131 }
132 
133 unsigned long *iret_stack;
134 
135 static void nested_nmi_iret_isr(struct ex_regs *r)
136 {
137 	printf("Nested NMI isr running rip=%x\n", r->rip);
138 
139 	if (r->rip == iret_stack[-3])
140 		test_count++;
141 }
142 
143 extern void do_iret(ulong phys_stack, void *virt_stack);
144 
145 // Return to same privilege level won't pop SS or SP, so
146 // save it in RDX while we run on the nested stack
147 
148 asm("do_iret:"
149 #ifdef __x86_64__
150 	"mov %rdi, %rax \n\t"		// phys_stack
151 	"mov %rsi, %rdx \n\t"		// virt_stack
152 #else
153 	"mov 4(%esp), %eax \n\t"	// phys_stack
154 	"mov 8(%esp), %edx \n\t"	// virt_stack
155 #endif
156 	"xchg %"R "dx, %"R "sp \n\t"	// point to new stack
157 	"pushf"W" \n\t"
158 	"mov %cs, %ecx \n\t"
159 	"push"W" %"R "cx \n\t"
160 	"push"W" $1f \n\t"
161 	"outl %eax, $0xe4 \n\t"		// flush page
162 	"iret"W" \n\t"
163 	"1: xchg %"R "dx, %"R "sp \n\t"	// point to old stack
164 	"ret\n\t"
165    );
166 
167 static void nmi_iret_isr(struct ex_regs *r)
168 {
169 	unsigned long *s = alloc_page();
170 	test_count++;
171 	printf("NMI isr running stack %p\n", s);
172 	handle_exception(2, nested_nmi_iret_isr);
173 	printf("Sending nested NMI to self\n");
174 	apic_self_nmi();
175 	printf("After nested NMI to self\n");
176 	iret_stack = &s[128];
177 	do_iret(virt_to_phys(s), iret_stack);
178 	printf("After iret\n");
179 }
180 
181 static void tirq0(isr_regs_t *r)
182 {
183 	printf("irq0 running\n");
184 	if (test_count != 0)
185 		test_count++;
186 	eoi();
187 }
188 
189 static void tirq1(isr_regs_t *r)
190 {
191 	printf("irq1 running\n");
192 	test_count++;
193 	eoi();
194 }
195 
196 ulong saved_stack;
197 
198 #define switch_stack(S) do {						\
199 		asm volatile ("mov %%" R "sp, %0":"=r"(saved_stack));	\
200 		asm volatile ("mov %0, %%" R "sp"::"r"(S));		\
201 	} while(0)
202 
203 #define restore_stack() do {						\
204 		asm volatile ("mov %0, %%" R "sp"::"r"(saved_stack));	\
205 	} while(0)
206 
207 int main()
208 {
209 	unsigned int res;
210 	ulong *pt, *cr3, i;
211 
212 	setup_vm();
213 	setup_idt();
214 	setup_alt_stack();
215 
216 	handle_irq(32, tirq0);
217 	handle_irq(33, tirq1);
218 
219 	/* generate HW exception that will fault on IDT and stack */
220 	handle_exception(0, de_isr);
221 	printf("Try to divide by 0\n");
222 	flush_idt_page();
223 	flush_stack();
224 	asm volatile ("divl %3": "=a"(res)
225 		      : "d"(0), "a"(1500), "m"(test_divider));
226 	printf("Result is %d\n", res);
227 	report("DE exception", res == 150);
228 
229 	/* generate soft exception (BP) that will fault on IDT and stack */
230 	test_count = 0;
231 	handle_exception(3, bp_isr);
232 	printf("Try int 3\n");
233 	flush_idt_page();
234 	flush_stack();
235 	asm volatile ("int $3");
236 	printf("After int 3\n");
237 	report("BP exception", test_count == 1);
238 
239 #ifndef __x86_64__
240 	/* generate soft exception (OF) that will fault on IDT */
241 	test_count = 0;
242 	handle_exception(4, of_isr);
243 	flush_idt_page();
244 	printf("Try into\n");
245 	asm volatile ("addb $127, %b0\ninto"::"a"(127));
246 	printf("After into\n");
247 	report("OF exception", test_count == 1);
248 
249 	/* generate soft exception (OF) using two bit instruction that will
250 	   fault on IDT */
251 	test_count = 0;
252 	handle_exception(4, of_isr);
253 	flush_idt_page();
254 	printf("Try into\n");
255 	asm volatile ("addb $127, %b0\naddr16 into"::"a"(127));
256 	printf("After into\n");
257 	report("2 byte OF exception", test_count == 1);
258 #endif
259 
260 	/* generate HW interrupt that will fault on IDT */
261 	test_count = 0;
262 	flush_idt_page();
263 	printf("Sending vec 33 to self\n");
264 	irq_enable();
265 	apic_self_ipi(33);
266 	io_delay();
267 	irq_disable();
268 	printf("After vec 33 to self\n");
269 	report("vec 33", test_count == 1);
270 
271 	/* generate soft interrupt that will fault on IDT and stack */
272 	test_count = 0;
273 	flush_idt_page();
274 	printf("Try int $33\n");
275 	flush_stack();
276 	asm volatile ("int $33");
277 	printf("After int $33\n");
278 	report("int $33", test_count == 1);
279 
280 	/* Inject two HW interrupt than open iterrupt windows. Both interrupt
281 	   will fault on IDT access */
282 	test_count = 0;
283 	flush_idt_page();
284 	printf("Sending vec 32 and 33 to self\n");
285 	apic_self_ipi(32);
286 	apic_self_ipi(33);
287 	io_delay();
288 	irq_enable();
289 	asm volatile("nop");
290 	irq_disable();
291 	printf("After vec 32 and 33 to self\n");
292 	report("vec 32/33", test_count == 2);
293 
294 
295 	/* Inject HW interrupt, do sti and than (while in irq shadow) inject
296 	   soft interrupt. Fault during soft interrupt. Soft interrup shoud be
297 	   handled before HW interrupt */
298 	test_count = 0;
299 	flush_idt_page();
300 	printf("Sending vec 32 and int $33\n");
301 	apic_self_ipi(32);
302 	flush_stack();
303 	io_delay();
304 	irq_enable();
305 	asm volatile ("int $33");
306 	irq_disable();
307 	printf("After vec 32 and int $33\n");
308 	report("vec 32/int $33", test_count == 2);
309 
310 	/* test that TPR is honored */
311 	test_count = 0;
312 	handle_irq(62, tirq1);
313 	flush_idt_page();
314 	printf("Sending vec 33 and 62 and mask one with TPR\n");
315 	apic_write(APIC_TASKPRI, 0xf << 4);
316 	irq_enable();
317 	apic_self_ipi(32);
318 	apic_self_ipi(62);
319 	io_delay();
320 	apic_write(APIC_TASKPRI, 0x2 << 4);
321 	printf("After 33/62 TPR test\n");
322 	report("TPR", test_count == 1);
323 	apic_write(APIC_TASKPRI, 0x0);
324 	while(test_count != 2); /* wait for second irq */
325 	irq_disable();
326 
327 	/* test fault durint NP delivery */
328 	printf("Before NP test\n");
329 	test_count = 0;
330 	handle_exception(11, np_isr);
331 	set_idt_sel(33, NP_SEL);
332 	flush_idt_page();
333 	flush_stack();
334 	asm volatile ("int $33");
335 	printf("After int33\n");
336 	report("NP exception", test_count == 2);
337 
338 	/* generate NMI that will fault on IDT */
339 	test_count = 0;
340 	handle_exception(2, nmi_isr);
341 	flush_idt_page();
342 	printf("Sending NMI to self\n");
343 	apic_self_nmi();
344 	printf("After NMI to self\n");
345 	/* this is needed on VMX without NMI window notification.
346 	   Interrupt windows is used instead, so let pending NMI
347 	   to be injected */
348 	irq_enable();
349 	asm volatile ("nop");
350 	irq_disable();
351 	report("NMI", test_count == 2);
352 
353 	/* generate NMI that will fault on IRET */
354 	printf("Before NMI IRET test\n");
355 	test_count = 0;
356 	handle_exception(2, nmi_iret_isr);
357 	printf("Sending NMI to self\n");
358 	apic_self_nmi();
359 	/* this is needed on VMX without NMI window notification.
360 	   Interrupt windows is used instead, so let pending NMI
361 	   to be injected */
362 	irq_enable();
363 	asm volatile ("nop");
364 	irq_disable();
365 	printf("After NMI to self\n");
366 	report("NMI", test_count == 2);
367 	stack_phys = (ulong)virt_to_phys(alloc_page());
368 	stack_va = alloc_vpage();
369 
370 	/* Generate DE and PF exceptions serially */
371 	test_divider = 0;
372 	set_intr_alt_stack(14, pf_tss);
373 	handle_exception(0, de_isr);
374 	printf("Try to divide by 0\n");
375 	/* install read only pte */
376 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
377 		    stack_phys | PTE_PRESENT, 0);
378 	invlpg(stack_va);
379 	flush_phys_addr(stack_phys);
380 	switch_stack(stack_va + 4095);
381 	flush_idt_page();
382 	asm volatile ("divl %3": "=a"(res)
383 		      : "d"(0), "a"(1500), "m"(test_divider));
384 	restore_stack();
385 	printf("Result is %d\n", res);
386 	report("DE PF exceptions", res == 150);
387 
388 	/* Generate NP and PF exceptions serially */
389 	printf("Before NP test\n");
390 	test_count = 0;
391 	set_intr_alt_stack(14, pf_tss);
392 	handle_exception(11, np_isr);
393 	set_idt_sel(33, NP_SEL);
394 	/* install read only pte */
395 	install_pte(phys_to_virt(read_cr3()), 1, stack_va,
396 		    stack_phys | PTE_PRESENT, 0);
397 	invlpg(stack_va);
398 	flush_idt_page();
399 	flush_phys_addr(stack_phys);
400 	switch_stack(stack_va + 4095);
401 	asm volatile ("int $33");
402 	restore_stack();
403 	printf("After int33\n");
404 	report("NP PF exceptions", test_count == 2);
405 
406 	pt = alloc_page();
407 	cr3 = (void*)read_cr3();
408 	memset(pt, 0, 4096);
409 	/* use shadowed stack during interrupt delivery */
410 	for (i = 0; i < 4096/sizeof(ulong); i++) {
411 		if (!cr3[i]) {
412 			cr3[i] = virt_to_phys(pt) | PTE_PRESENT | PTE_WRITE;
413 			pt[0] = virt_to_phys(pt) | PTE_PRESENT | PTE_WRITE;
414 #ifndef __x86_64__
415 			((ulong*)(i<<22))[1] = 0;
416 #else
417 			((ulong*)(i<<39))[1] = 0;
418 #endif
419 			write_cr3(virt_to_phys(cr3));
420 			break;
421 		}
422 	}
423 	test_count = 0;
424 	printf("Try int 33 with shadowed stack\n");
425 	switch_stack(((char*)pt) + 4095);
426 	asm volatile("int $33");
427 	restore_stack();
428 	printf("After int 33 with shadowed stack\n");
429 	report("int 33 with shadowed stack", test_count == 1);
430 
431 	return report_summary();
432 }
433