1 #include "libcflat.h"
2 #include "vm.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "isr.h"
6 #include "apic.h"
7 #include "apic-defs.h"
8 #include "vmalloc.h"
9 #include "alloc_page.h"
10 #include "delay.h"
11 #include "fwcfg.h"
12
13 #ifdef __x86_64__
14 # define R "r"
15 #else
16 # define R "e"
17 #endif
18
19 void do_pf_tss(void);
20
apic_self_ipi(u8 v)21 static void apic_self_ipi(u8 v)
22 {
23 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
24 APIC_INT_ASSERT | v, 0);
25 }
26
apic_self_nmi(void)27 static void apic_self_nmi(void)
28 {
29 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
30 }
31
32 #define flush_phys_addr(__s) do { \
33 if (test_device_enabled()) \
34 outl(__s, 0xe4); \
35 } while (0)
36
37 #define flush_stack() do { \
38 int __l; \
39 flush_phys_addr(virt_to_phys(&__l)); \
40 } while (0)
41
42 extern char isr_iret_ip[];
43
flush_idt_page(void)44 static void flush_idt_page(void)
45 {
46 struct descriptor_table_ptr ptr;
47 sidt(&ptr);
48 flush_phys_addr(virt_to_phys((void*)ptr.base));
49 }
50
51 static volatile unsigned int test_divider;
52 static volatile int test_count;
53
54 ulong stack_phys;
55 void *stack_va;
56
do_pf_tss(void)57 void do_pf_tss(void)
58 {
59 printf("PF running\n");
60 install_pte(phys_to_virt(read_cr3()), 1, stack_va,
61 stack_phys | PT_PRESENT_MASK | PT_WRITABLE_MASK, 0);
62 invlpg(stack_va);
63 }
64
65 extern void pf_tss(void);
66
67 asm ("pf_tss: \n\t"
68 #ifdef __x86_64__
69 // no task on x86_64, save/restore caller-save regs
70 "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n"
71 "push %r8; push %r9; push %r10; push %r11\n"
72 #endif
73 "call do_pf_tss \n\t"
74 #ifdef __x86_64__
75 "pop %r11; pop %r10; pop %r9; pop %r8\n"
76 "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n"
77 #endif
78 "add $"S", %"R "sp\n\t" // discard error code
79 "iret"W" \n\t"
80 "jmp pf_tss\n\t"
81 );
82
83
84 #ifndef __x86_64__
of_isr(struct ex_regs * r)85 static void of_isr(struct ex_regs *r)
86 {
87 printf("OF isr running\n");
88 test_count++;
89 }
90 #endif
91
np_isr(struct ex_regs * r)92 static void np_isr(struct ex_regs *r)
93 {
94 printf("NP isr running %lx err=%lx\n", r->rip, r->error_code);
95 set_idt_sel(33, read_cs());
96 test_count++;
97 }
98
de_isr(struct ex_regs * r)99 static void de_isr(struct ex_regs *r)
100 {
101 printf("DE isr running divider is %d\n", test_divider);
102 test_divider = 10;
103 }
104
bp_isr(struct ex_regs * r)105 static void bp_isr(struct ex_regs *r)
106 {
107 printf("BP isr running\n");
108 test_count++;
109 }
110
nested_nmi_isr(struct ex_regs * r)111 static void nested_nmi_isr(struct ex_regs *r)
112 {
113 printf("Nested NMI isr running rip=%lx\n", r->rip);
114
115 if (r->rip != (ulong)&isr_iret_ip)
116 test_count++;
117 }
nmi_isr(struct ex_regs * r)118 static void nmi_isr(struct ex_regs *r)
119 {
120 printf("NMI isr running %p\n", &isr_iret_ip);
121 test_count++;
122 handle_exception(2, nested_nmi_isr);
123 printf("Sending nested NMI to self\n");
124 apic_self_nmi();
125 io_delay();
126 printf("After nested NMI to self\n");
127 }
128
129 unsigned long *iret_stack;
130
nested_nmi_iret_isr(struct ex_regs * r)131 static void nested_nmi_iret_isr(struct ex_regs *r)
132 {
133 printf("Nested NMI isr running rip=%lx\n", r->rip);
134
135 if (r->rip == iret_stack[-3])
136 test_count++;
137 }
138
139 extern void do_iret(ulong phys_stack, void *virt_stack);
140
141 // Return to same privilege level won't pop SS or SP, so
142 // save it in RDX while we run on the nested stack
143
144 extern bool no_test_device;
145
146 asm("do_iret:"
147 #ifdef __x86_64__
148 "mov %rdi, %rax \n\t" // phys_stack
149 "mov %rsi, %rdx \n\t" // virt_stack
150 #else
151 "mov 4(%esp), %eax \n\t" // phys_stack
152 "mov 8(%esp), %edx \n\t" // virt_stack
153 #endif
154 "xchg %"R "dx, %"R "sp \n\t" // point to new stack
155 "pushf"W" \n\t"
156 "mov %cs, %ecx \n\t"
157 "push"W" %"R "cx \n\t"
158 #ifndef __x86_64__
159 "push"W" $2f \n\t"
160
161 "cmpb $0, no_test_device\n\t" // see if need to flush
162 #else
163 "leaq 2f(%rip), %rbx \n\t"
164 "pushq %rbx \n\t"
165
166 "mov no_test_device(%rip), %bl \n\t"
167 "cmpb $0, %bl\n\t" // see if need to flush
168 #endif
169 "jnz 1f\n\t"
170 "outl %eax, $0xe4 \n\t" // flush page
171 "1: \n\t"
172 "iret"W" \n\t"
173 "2: xchg %"R "dx, %"R "sp \n\t" // point to old stack
174 "ret\n\t"
175 );
176
nmi_iret_isr(struct ex_regs * r)177 static void nmi_iret_isr(struct ex_regs *r)
178 {
179 unsigned long *s = alloc_page();
180 test_count++;
181 printf("NMI isr running stack %p\n", s);
182 handle_exception(2, nested_nmi_iret_isr);
183 printf("Sending nested NMI to self\n");
184 apic_self_nmi();
185 printf("After nested NMI to self\n");
186 iret_stack = &s[128];
187 do_iret(virt_to_phys(s), iret_stack);
188 printf("After iret\n");
189 }
190
tirq0(isr_regs_t * r)191 static void tirq0(isr_regs_t *r)
192 {
193 printf("irq0 running\n");
194 if (test_count == 1)
195 test_count++;
196 eoi();
197 }
198
tirq1(isr_regs_t * r)199 static void tirq1(isr_regs_t *r)
200 {
201 printf("irq1 running\n");
202 test_count++;
203 eoi();
204 }
205
206 ulong saved_stack;
207
208 #define switch_stack(S) do { \
209 asm volatile ("mov %%" R "sp, %0":"=r"(saved_stack)); \
210 asm volatile ("mov %0, %%" R "sp"::"r"(S)); \
211 } while(0)
212
213 #define restore_stack() do { \
214 asm volatile ("mov %0, %%" R "sp"::"r"(saved_stack)); \
215 } while(0)
216
main(void)217 int main(void)
218 {
219 unsigned int res;
220 ulong *pt, *cr3, i;
221
222 setup_vm();
223 setup_alt_stack();
224
225 handle_irq(32, tirq0);
226 handle_irq(33, tirq1);
227
228 /* generate HW exception that will fault on IDT and stack */
229 handle_exception(0, de_isr);
230 printf("Try to divide by 0\n");
231 flush_idt_page();
232 flush_stack();
233 asm volatile ("divl %3": "=a"(res)
234 : "d"(0), "a"(1500), "m"(test_divider));
235 printf("Result is %d\n", res);
236 report(res == 150, "DE exception");
237
238 /* generate soft exception (BP) that will fault on IDT and stack */
239 test_count = 0;
240 handle_exception(3, bp_isr);
241 printf("Try int 3\n");
242 flush_idt_page();
243 flush_stack();
244 asm volatile ("int $3");
245 printf("After int 3\n");
246 report(test_count == 1, "BP exception");
247
248 #ifndef __x86_64__
249 /* generate soft exception (OF) that will fault on IDT */
250 test_count = 0;
251 handle_exception(4, of_isr);
252 flush_idt_page();
253 printf("Try into\n");
254 asm volatile ("addb $127, %b0\ninto"::"a"(127));
255 printf("After into\n");
256 report(test_count == 1, "OF exception");
257
258 /* generate soft exception (OF) using two bit instruction that will
259 fault on IDT */
260 test_count = 0;
261 handle_exception(4, of_isr);
262 flush_idt_page();
263 printf("Try into\n");
264 asm volatile ("addb $127, %b0\naddr16 into"::"a"(127));
265 printf("After into\n");
266 report(test_count == 1, "2 byte OF exception");
267 #endif
268
269 /* generate HW interrupt that will fault on IDT */
270 test_count = 0;
271 flush_idt_page();
272 printf("Sending vec 33 to self\n");
273 sti();
274 apic_self_ipi(33);
275 io_delay();
276 cli();
277 printf("After vec 33 to self\n");
278 report(test_count == 1, "vec 33");
279
280 /* generate soft interrupt that will fault on IDT and stack */
281 test_count = 0;
282 flush_idt_page();
283 printf("Try int $33\n");
284 flush_stack();
285 asm volatile ("int $33");
286 printf("After int $33\n");
287 report(test_count == 1, "int $33");
288
289 /* Inject two HW interrupt than open interrupt windows. Both interrupt
290 will fault on IDT access */
291 test_count = 0;
292 flush_idt_page();
293 printf("Sending vec 32 and 33 to self\n");
294 apic_self_ipi(32);
295 apic_self_ipi(33);
296 io_delay();
297 sti_nop_cli();
298 printf("After vec 32 and 33 to self\n");
299 report(test_count == 2, "vec 32/33");
300
301
302 /* Inject HW interrupt, do sti and than (while in irq shadow) inject
303 soft interrupt. Fault during soft interrupt. Soft interrupt should
304 be handled before HW interrupt */
305 test_count = 0;
306 flush_idt_page();
307 printf("Sending vec 32 and int $33\n");
308 apic_self_ipi(32);
309 flush_stack();
310 io_delay();
311 asm volatile ("sti; int $33");
312 cli();
313 printf("After vec 32 and int $33\n");
314 report(test_count == 2, "vec 32/int $33");
315
316 /* test that TPR is honored */
317 test_count = 0;
318 handle_irq(62, tirq1);
319 flush_idt_page();
320 printf("Sending vec 33 and 62 and mask one with TPR\n");
321 apic_write(APIC_TASKPRI, 0xf << 4);
322 sti();
323 apic_self_ipi(32);
324 apic_self_ipi(62);
325 io_delay();
326 apic_write(APIC_TASKPRI, 0x2 << 4);
327 printf("After 33/62 TPR test\n");
328 report(test_count == 1, "TPR");
329 apic_write(APIC_TASKPRI, 0x0);
330 while(test_count != 2); /* wait for second irq */
331 cli();
332
333 /* test fault durint NP delivery */
334 printf("Before NP test\n");
335 test_count = 0;
336 handle_exception(11, np_isr);
337 set_idt_sel(33, NP_SEL);
338 flush_idt_page();
339 flush_stack();
340 asm volatile ("int $33");
341 printf("After int33\n");
342 report(test_count == 2, "NP exception");
343
344 /* generate NMI that will fault on IDT */
345 test_count = 0;
346 handle_exception(2, nmi_isr);
347 flush_idt_page();
348 printf("Sending NMI to self\n");
349 apic_self_nmi();
350 printf("After NMI to self\n");
351 /* this is needed on VMX without NMI window notification.
352 Interrupt windows is used instead, so let pending NMI
353 to be injected */
354 sti_nop_cli();
355 report(test_count == 2, "NMI");
356
357 /* generate NMI that will fault on IRET */
358 printf("Before NMI IRET test\n");
359 test_count = 0;
360 handle_exception(2, nmi_iret_isr);
361 printf("Sending NMI to self\n");
362 apic_self_nmi();
363 /* this is needed on VMX without NMI window notification.
364 Interrupt windows is used instead, so let pending NMI
365 to be injected */
366 sti_nop_cli();
367 printf("After NMI to self\n");
368 report(test_count == 2, "NMI");
369 stack_phys = (ulong)virt_to_phys(alloc_page());
370 stack_va = alloc_vpage();
371
372 /* Generate DE and PF exceptions serially */
373 test_divider = 0;
374 set_intr_alt_stack(14, pf_tss);
375 handle_exception(0, de_isr);
376 printf("Try to divide by 0\n");
377 /* install read only pte */
378 install_pte(phys_to_virt(read_cr3()), 1, stack_va,
379 stack_phys | PT_PRESENT_MASK, 0);
380 invlpg(stack_va);
381 flush_phys_addr(stack_phys);
382 switch_stack(stack_va + 4095);
383 flush_idt_page();
384 asm volatile ("divl %3": "=a"(res)
385 : "d"(0), "a"(1500), "m"(test_divider));
386 restore_stack();
387 printf("Result is %d\n", res);
388 report(res == 150, "DE PF exceptions");
389
390 /* Generate NP and PF exceptions serially */
391 printf("Before NP test\n");
392 test_count = 0;
393 set_intr_alt_stack(14, pf_tss);
394 handle_exception(11, np_isr);
395 set_idt_sel(33, NP_SEL);
396 /* install read only pte */
397 install_pte(phys_to_virt(read_cr3()), 1, stack_va,
398 stack_phys | PT_PRESENT_MASK, 0);
399 invlpg(stack_va);
400 flush_idt_page();
401 flush_phys_addr(stack_phys);
402 switch_stack(stack_va + 4095);
403 asm volatile ("int $33");
404 restore_stack();
405 printf("After int33\n");
406 report(test_count == 2, "NP PF exceptions");
407
408 pt = alloc_page();
409 cr3 = (void*)read_cr3();
410 /* use shadowed stack during interrupt delivery */
411 for (i = 0; i < 4096/sizeof(ulong); i++) {
412 if (!cr3[i]) {
413 cr3[i] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK;
414 pt[0] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK;
415 #ifndef __x86_64__
416 ((ulong*)(i<<22))[1] = 0;
417 #else
418 ((ulong*)(i<<39))[1] = 0;
419 #endif
420 write_cr3(virt_to_phys(cr3));
421 break;
422 }
423 }
424 test_count = 0;
425 printf("Try int 33 with shadowed stack\n");
426 switch_stack(((char*)pt) + 4095);
427 asm volatile("int $33");
428 restore_stack();
429 printf("After int 33 with shadowed stack\n");
430 report(test_count == 1, "int 33 with shadowed stack");
431
432 return report_summary();
433 }
434