1 #include "libcflat.h" 2 #include "vm.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "isr.h" 6 #include "apic.h" 7 #include "apic-defs.h" 8 #include "vmalloc.h" 9 #include "alloc_page.h" 10 #include "delay.h" 11 #include "fwcfg.h" 12 13 #ifdef __x86_64__ 14 # define R "r" 15 #else 16 # define R "e" 17 #endif 18 19 void do_pf_tss(void); 20 21 static void apic_self_ipi(u8 v) 22 { 23 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 24 APIC_INT_ASSERT | v, 0); 25 } 26 27 static void apic_self_nmi(void) 28 { 29 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 30 } 31 32 #define flush_phys_addr(__s) do { \ 33 if (test_device_enabled()) \ 34 outl(__s, 0xe4); \ 35 } while (0) 36 37 #define flush_stack() do { \ 38 int __l; \ 39 flush_phys_addr(virt_to_phys(&__l)); \ 40 } while (0) 41 42 extern char isr_iret_ip[]; 43 44 static void flush_idt_page(void) 45 { 46 struct descriptor_table_ptr ptr; 47 sidt(&ptr); 48 flush_phys_addr(virt_to_phys((void*)ptr.base)); 49 } 50 51 static volatile unsigned int test_divider; 52 static volatile int test_count; 53 54 ulong stack_phys; 55 void *stack_va; 56 57 void do_pf_tss(void) 58 { 59 printf("PF running\n"); 60 install_pte(phys_to_virt(read_cr3()), 1, stack_va, 61 stack_phys | PT_PRESENT_MASK | PT_WRITABLE_MASK, 0); 62 invlpg(stack_va); 63 } 64 65 extern void pf_tss(void); 66 67 asm ("pf_tss: \n\t" 68 #ifdef __x86_64__ 69 // no task on x86_64, save/restore caller-save regs 70 "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n" 71 "push %r8; push %r9; push %r10; push %r11\n" 72 #endif 73 "call do_pf_tss \n\t" 74 #ifdef __x86_64__ 75 "pop %r11; pop %r10; pop %r9; pop %r8\n" 76 "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n" 77 #endif 78 "add $"S", %"R "sp\n\t" // discard error code 79 "iret"W" \n\t" 80 "jmp pf_tss\n\t" 81 ); 82 83 84 #ifndef __x86_64__ 85 static void of_isr(struct ex_regs *r) 86 { 87 printf("OF isr running\n"); 88 test_count++; 89 } 90 #endif 91 92 static void np_isr(struct ex_regs *r) 93 { 94 printf("NP isr running %lx err=%lx\n", r->rip, r->error_code); 95 set_idt_sel(33, read_cs()); 96 test_count++; 97 } 98 99 static void de_isr(struct ex_regs *r) 100 { 101 printf("DE isr running divider is %d\n", test_divider); 102 test_divider = 10; 103 } 104 105 static void bp_isr(struct ex_regs *r) 106 { 107 printf("BP isr running\n"); 108 test_count++; 109 } 110 111 static void nested_nmi_isr(struct ex_regs *r) 112 { 113 printf("Nested NMI isr running rip=%lx\n", r->rip); 114 115 if (r->rip != (ulong)&isr_iret_ip) 116 test_count++; 117 } 118 static void nmi_isr(struct ex_regs *r) 119 { 120 printf("NMI isr running %p\n", &isr_iret_ip); 121 test_count++; 122 handle_exception(2, nested_nmi_isr); 123 printf("Sending nested NMI to self\n"); 124 apic_self_nmi(); 125 io_delay(); 126 printf("After nested NMI to self\n"); 127 } 128 129 unsigned long *iret_stack; 130 131 static void nested_nmi_iret_isr(struct ex_regs *r) 132 { 133 printf("Nested NMI isr running rip=%lx\n", r->rip); 134 135 if (r->rip == iret_stack[-3]) 136 test_count++; 137 } 138 139 extern void do_iret(ulong phys_stack, void *virt_stack); 140 141 // Return to same privilege level won't pop SS or SP, so 142 // save it in RDX while we run on the nested stack 143 144 extern bool no_test_device; 145 146 asm("do_iret:" 147 #ifdef __x86_64__ 148 "mov %rdi, %rax \n\t" // phys_stack 149 "mov %rsi, %rdx \n\t" // virt_stack 150 #else 151 "mov 4(%esp), %eax \n\t" // phys_stack 152 "mov 8(%esp), %edx \n\t" // virt_stack 153 #endif 154 "xchg %"R "dx, %"R "sp \n\t" // point to new stack 155 "pushf"W" \n\t" 156 "mov %cs, %ecx \n\t" 157 "push"W" %"R "cx \n\t" 158 "push"W" $2f \n\t" 159 160 "cmpb $0, no_test_device\n\t" // see if need to flush 161 "jnz 1f\n\t" 162 "outl %eax, $0xe4 \n\t" // flush page 163 "1: \n\t" 164 "iret"W" \n\t" 165 "2: xchg %"R "dx, %"R "sp \n\t" // point to old stack 166 "ret\n\t" 167 ); 168 169 static void nmi_iret_isr(struct ex_regs *r) 170 { 171 unsigned long *s = alloc_page(); 172 test_count++; 173 printf("NMI isr running stack %p\n", s); 174 handle_exception(2, nested_nmi_iret_isr); 175 printf("Sending nested NMI to self\n"); 176 apic_self_nmi(); 177 printf("After nested NMI to self\n"); 178 iret_stack = &s[128]; 179 do_iret(virt_to_phys(s), iret_stack); 180 printf("After iret\n"); 181 } 182 183 static void tirq0(isr_regs_t *r) 184 { 185 printf("irq0 running\n"); 186 if (test_count == 1) 187 test_count++; 188 eoi(); 189 } 190 191 static void tirq1(isr_regs_t *r) 192 { 193 printf("irq1 running\n"); 194 test_count++; 195 eoi(); 196 } 197 198 ulong saved_stack; 199 200 #define switch_stack(S) do { \ 201 asm volatile ("mov %%" R "sp, %0":"=r"(saved_stack)); \ 202 asm volatile ("mov %0, %%" R "sp"::"r"(S)); \ 203 } while(0) 204 205 #define restore_stack() do { \ 206 asm volatile ("mov %0, %%" R "sp"::"r"(saved_stack)); \ 207 } while(0) 208 209 int main(void) 210 { 211 unsigned int res; 212 ulong *pt, *cr3, i; 213 214 setup_vm(); 215 setup_alt_stack(); 216 217 handle_irq(32, tirq0); 218 handle_irq(33, tirq1); 219 220 /* generate HW exception that will fault on IDT and stack */ 221 handle_exception(0, de_isr); 222 printf("Try to divide by 0\n"); 223 flush_idt_page(); 224 flush_stack(); 225 asm volatile ("divl %3": "=a"(res) 226 : "d"(0), "a"(1500), "m"(test_divider)); 227 printf("Result is %d\n", res); 228 report(res == 150, "DE exception"); 229 230 /* generate soft exception (BP) that will fault on IDT and stack */ 231 test_count = 0; 232 handle_exception(3, bp_isr); 233 printf("Try int 3\n"); 234 flush_idt_page(); 235 flush_stack(); 236 asm volatile ("int $3"); 237 printf("After int 3\n"); 238 report(test_count == 1, "BP exception"); 239 240 #ifndef __x86_64__ 241 /* generate soft exception (OF) that will fault on IDT */ 242 test_count = 0; 243 handle_exception(4, of_isr); 244 flush_idt_page(); 245 printf("Try into\n"); 246 asm volatile ("addb $127, %b0\ninto"::"a"(127)); 247 printf("After into\n"); 248 report(test_count == 1, "OF exception"); 249 250 /* generate soft exception (OF) using two bit instruction that will 251 fault on IDT */ 252 test_count = 0; 253 handle_exception(4, of_isr); 254 flush_idt_page(); 255 printf("Try into\n"); 256 asm volatile ("addb $127, %b0\naddr16 into"::"a"(127)); 257 printf("After into\n"); 258 report(test_count == 1, "2 byte OF exception"); 259 #endif 260 261 /* generate HW interrupt that will fault on IDT */ 262 test_count = 0; 263 flush_idt_page(); 264 printf("Sending vec 33 to self\n"); 265 irq_enable(); 266 apic_self_ipi(33); 267 io_delay(); 268 irq_disable(); 269 printf("After vec 33 to self\n"); 270 report(test_count == 1, "vec 33"); 271 272 /* generate soft interrupt that will fault on IDT and stack */ 273 test_count = 0; 274 flush_idt_page(); 275 printf("Try int $33\n"); 276 flush_stack(); 277 asm volatile ("int $33"); 278 printf("After int $33\n"); 279 report(test_count == 1, "int $33"); 280 281 /* Inject two HW interrupt than open iterrupt windows. Both interrupt 282 will fault on IDT access */ 283 test_count = 0; 284 flush_idt_page(); 285 printf("Sending vec 32 and 33 to self\n"); 286 apic_self_ipi(32); 287 apic_self_ipi(33); 288 io_delay(); 289 irq_enable(); 290 asm volatile("nop"); 291 irq_disable(); 292 printf("After vec 32 and 33 to self\n"); 293 report(test_count == 2, "vec 32/33"); 294 295 296 /* Inject HW interrupt, do sti and than (while in irq shadow) inject 297 soft interrupt. Fault during soft interrupt. Soft interrup shoud be 298 handled before HW interrupt */ 299 test_count = 0; 300 flush_idt_page(); 301 printf("Sending vec 32 and int $33\n"); 302 apic_self_ipi(32); 303 flush_stack(); 304 io_delay(); 305 asm volatile ("sti; int $33"); 306 irq_disable(); 307 printf("After vec 32 and int $33\n"); 308 report(test_count == 2, "vec 32/int $33"); 309 310 /* test that TPR is honored */ 311 test_count = 0; 312 handle_irq(62, tirq1); 313 flush_idt_page(); 314 printf("Sending vec 33 and 62 and mask one with TPR\n"); 315 apic_write(APIC_TASKPRI, 0xf << 4); 316 irq_enable(); 317 apic_self_ipi(32); 318 apic_self_ipi(62); 319 io_delay(); 320 apic_write(APIC_TASKPRI, 0x2 << 4); 321 printf("After 33/62 TPR test\n"); 322 report(test_count == 1, "TPR"); 323 apic_write(APIC_TASKPRI, 0x0); 324 while(test_count != 2); /* wait for second irq */ 325 irq_disable(); 326 327 /* test fault durint NP delivery */ 328 printf("Before NP test\n"); 329 test_count = 0; 330 handle_exception(11, np_isr); 331 set_idt_sel(33, NP_SEL); 332 flush_idt_page(); 333 flush_stack(); 334 asm volatile ("int $33"); 335 printf("After int33\n"); 336 report(test_count == 2, "NP exception"); 337 338 /* generate NMI that will fault on IDT */ 339 test_count = 0; 340 handle_exception(2, nmi_isr); 341 flush_idt_page(); 342 printf("Sending NMI to self\n"); 343 apic_self_nmi(); 344 printf("After NMI to self\n"); 345 /* this is needed on VMX without NMI window notification. 346 Interrupt windows is used instead, so let pending NMI 347 to be injected */ 348 irq_enable(); 349 asm volatile ("nop"); 350 irq_disable(); 351 report(test_count == 2, "NMI"); 352 353 /* generate NMI that will fault on IRET */ 354 printf("Before NMI IRET test\n"); 355 test_count = 0; 356 handle_exception(2, nmi_iret_isr); 357 printf("Sending NMI to self\n"); 358 apic_self_nmi(); 359 /* this is needed on VMX without NMI window notification. 360 Interrupt windows is used instead, so let pending NMI 361 to be injected */ 362 irq_enable(); 363 asm volatile ("nop"); 364 irq_disable(); 365 printf("After NMI to self\n"); 366 report(test_count == 2, "NMI"); 367 stack_phys = (ulong)virt_to_phys(alloc_page()); 368 stack_va = alloc_vpage(); 369 370 /* Generate DE and PF exceptions serially */ 371 test_divider = 0; 372 set_intr_alt_stack(14, pf_tss); 373 handle_exception(0, de_isr); 374 printf("Try to divide by 0\n"); 375 /* install read only pte */ 376 install_pte(phys_to_virt(read_cr3()), 1, stack_va, 377 stack_phys | PT_PRESENT_MASK, 0); 378 invlpg(stack_va); 379 flush_phys_addr(stack_phys); 380 switch_stack(stack_va + 4095); 381 flush_idt_page(); 382 asm volatile ("divl %3": "=a"(res) 383 : "d"(0), "a"(1500), "m"(test_divider)); 384 restore_stack(); 385 printf("Result is %d\n", res); 386 report(res == 150, "DE PF exceptions"); 387 388 /* Generate NP and PF exceptions serially */ 389 printf("Before NP test\n"); 390 test_count = 0; 391 set_intr_alt_stack(14, pf_tss); 392 handle_exception(11, np_isr); 393 set_idt_sel(33, NP_SEL); 394 /* install read only pte */ 395 install_pte(phys_to_virt(read_cr3()), 1, stack_va, 396 stack_phys | PT_PRESENT_MASK, 0); 397 invlpg(stack_va); 398 flush_idt_page(); 399 flush_phys_addr(stack_phys); 400 switch_stack(stack_va + 4095); 401 asm volatile ("int $33"); 402 restore_stack(); 403 printf("After int33\n"); 404 report(test_count == 2, "NP PF exceptions"); 405 406 pt = alloc_page(); 407 cr3 = (void*)read_cr3(); 408 /* use shadowed stack during interrupt delivery */ 409 for (i = 0; i < 4096/sizeof(ulong); i++) { 410 if (!cr3[i]) { 411 cr3[i] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK; 412 pt[0] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK; 413 #ifndef __x86_64__ 414 ((ulong*)(i<<22))[1] = 0; 415 #else 416 ((ulong*)(i<<39))[1] = 0; 417 #endif 418 write_cr3(virt_to_phys(cr3)); 419 break; 420 } 421 } 422 test_count = 0; 423 printf("Try int 33 with shadowed stack\n"); 424 switch_stack(((char*)pt) + 4095); 425 asm volatile("int $33"); 426 restore_stack(); 427 printf("After int 33 with shadowed stack\n"); 428 report(test_count == 1, "int 33 with shadowed stack"); 429 430 return report_summary(); 431 } 432