1 #include "libcflat.h" 2 #include "vm.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "isr.h" 6 #include "apic.h" 7 #include "apic-defs.h" 8 #include "vmalloc.h" 9 #include "alloc_page.h" 10 #include "delay.h" 11 #include "fwcfg.h" 12 13 #ifdef __x86_64__ 14 # define R "r" 15 #else 16 # define R "e" 17 #endif 18 19 void do_pf_tss(void); 20 21 static void apic_self_ipi(u8 v) 22 { 23 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 24 APIC_INT_ASSERT | v, 0); 25 } 26 27 static void apic_self_nmi(void) 28 { 29 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 30 } 31 32 #define flush_phys_addr(__s) do { \ 33 if (test_device_enabled()) \ 34 outl(__s, 0xe4); \ 35 } while (0) 36 37 #define flush_stack() do { \ 38 int __l; \ 39 flush_phys_addr(virt_to_phys(&__l)); \ 40 } while (0) 41 42 extern char isr_iret_ip[]; 43 44 static void flush_idt_page(void) 45 { 46 struct descriptor_table_ptr ptr; 47 sidt(&ptr); 48 flush_phys_addr(virt_to_phys((void*)ptr.base)); 49 } 50 51 static volatile unsigned int test_divider; 52 static volatile int test_count; 53 54 ulong stack_phys; 55 void *stack_va; 56 57 void do_pf_tss(void) 58 { 59 printf("PF running\n"); 60 install_pte(phys_to_virt(read_cr3()), 1, stack_va, 61 stack_phys | PT_PRESENT_MASK | PT_WRITABLE_MASK, 0); 62 invlpg(stack_va); 63 } 64 65 extern void pf_tss(void); 66 67 asm ("pf_tss: \n\t" 68 #ifdef __x86_64__ 69 // no task on x86_64, save/restore caller-save regs 70 "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n" 71 "push %r8; push %r9; push %r10; push %r11\n" 72 #endif 73 "call do_pf_tss \n\t" 74 #ifdef __x86_64__ 75 "pop %r11; pop %r10; pop %r9; pop %r8\n" 76 "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n" 77 #endif 78 "add $"S", %"R "sp\n\t" // discard error code 79 "iret"W" \n\t" 80 "jmp pf_tss\n\t" 81 ); 82 83 84 #ifndef __x86_64__ 85 static void of_isr(struct ex_regs *r) 86 { 87 printf("OF isr running\n"); 88 test_count++; 89 } 90 #endif 91 92 static void np_isr(struct ex_regs *r) 93 { 94 printf("NP isr running %lx err=%lx\n", r->rip, r->error_code); 95 set_idt_sel(33, read_cs()); 96 test_count++; 97 } 98 99 static void de_isr(struct ex_regs *r) 100 { 101 printf("DE isr running divider is %d\n", test_divider); 102 test_divider = 10; 103 } 104 105 static void bp_isr(struct ex_regs *r) 106 { 107 printf("BP isr running\n"); 108 test_count++; 109 } 110 111 static void nested_nmi_isr(struct ex_regs *r) 112 { 113 printf("Nested NMI isr running rip=%lx\n", r->rip); 114 115 if (r->rip != (ulong)&isr_iret_ip) 116 test_count++; 117 } 118 static void nmi_isr(struct ex_regs *r) 119 { 120 printf("NMI isr running %p\n", &isr_iret_ip); 121 test_count++; 122 handle_exception(2, nested_nmi_isr); 123 printf("Sending nested NMI to self\n"); 124 apic_self_nmi(); 125 io_delay(); 126 printf("After nested NMI to self\n"); 127 } 128 129 unsigned long *iret_stack; 130 131 static void nested_nmi_iret_isr(struct ex_regs *r) 132 { 133 printf("Nested NMI isr running rip=%lx\n", r->rip); 134 135 if (r->rip == iret_stack[-3]) 136 test_count++; 137 } 138 139 extern void do_iret(ulong phys_stack, void *virt_stack); 140 141 // Return to same privilege level won't pop SS or SP, so 142 // save it in RDX while we run on the nested stack 143 144 extern bool no_test_device; 145 146 asm("do_iret:" 147 #ifdef __x86_64__ 148 "mov %rdi, %rax \n\t" // phys_stack 149 "mov %rsi, %rdx \n\t" // virt_stack 150 #else 151 "mov 4(%esp), %eax \n\t" // phys_stack 152 "mov 8(%esp), %edx \n\t" // virt_stack 153 #endif 154 "xchg %"R "dx, %"R "sp \n\t" // point to new stack 155 "pushf"W" \n\t" 156 "mov %cs, %ecx \n\t" 157 "push"W" %"R "cx \n\t" 158 #ifndef __x86_64__ 159 "push"W" $2f \n\t" 160 161 "cmpb $0, no_test_device\n\t" // see if need to flush 162 #else 163 "leaq 2f(%rip), %rbx \n\t" 164 "pushq %rbx \n\t" 165 166 "mov no_test_device(%rip), %bl \n\t" 167 "cmpb $0, %bl\n\t" // see if need to flush 168 #endif 169 "jnz 1f\n\t" 170 "outl %eax, $0xe4 \n\t" // flush page 171 "1: \n\t" 172 "iret"W" \n\t" 173 "2: xchg %"R "dx, %"R "sp \n\t" // point to old stack 174 "ret\n\t" 175 ); 176 177 static void nmi_iret_isr(struct ex_regs *r) 178 { 179 unsigned long *s = alloc_page(); 180 test_count++; 181 printf("NMI isr running stack %p\n", s); 182 handle_exception(2, nested_nmi_iret_isr); 183 printf("Sending nested NMI to self\n"); 184 apic_self_nmi(); 185 printf("After nested NMI to self\n"); 186 iret_stack = &s[128]; 187 do_iret(virt_to_phys(s), iret_stack); 188 printf("After iret\n"); 189 } 190 191 static void tirq0(isr_regs_t *r) 192 { 193 printf("irq0 running\n"); 194 if (test_count == 1) 195 test_count++; 196 eoi(); 197 } 198 199 static void tirq1(isr_regs_t *r) 200 { 201 printf("irq1 running\n"); 202 test_count++; 203 eoi(); 204 } 205 206 ulong saved_stack; 207 208 #define switch_stack(S) do { \ 209 asm volatile ("mov %%" R "sp, %0":"=r"(saved_stack)); \ 210 asm volatile ("mov %0, %%" R "sp"::"r"(S)); \ 211 } while(0) 212 213 #define restore_stack() do { \ 214 asm volatile ("mov %0, %%" R "sp"::"r"(saved_stack)); \ 215 } while(0) 216 217 int main(void) 218 { 219 unsigned int res; 220 ulong *pt, *cr3, i; 221 222 setup_vm(); 223 setup_alt_stack(); 224 225 handle_irq(32, tirq0); 226 handle_irq(33, tirq1); 227 228 /* generate HW exception that will fault on IDT and stack */ 229 handle_exception(0, de_isr); 230 printf("Try to divide by 0\n"); 231 flush_idt_page(); 232 flush_stack(); 233 asm volatile ("divl %3": "=a"(res) 234 : "d"(0), "a"(1500), "m"(test_divider)); 235 printf("Result is %d\n", res); 236 report(res == 150, "DE exception"); 237 238 /* generate soft exception (BP) that will fault on IDT and stack */ 239 test_count = 0; 240 handle_exception(3, bp_isr); 241 printf("Try int 3\n"); 242 flush_idt_page(); 243 flush_stack(); 244 asm volatile ("int $3"); 245 printf("After int 3\n"); 246 report(test_count == 1, "BP exception"); 247 248 #ifndef __x86_64__ 249 /* generate soft exception (OF) that will fault on IDT */ 250 test_count = 0; 251 handle_exception(4, of_isr); 252 flush_idt_page(); 253 printf("Try into\n"); 254 asm volatile ("addb $127, %b0\ninto"::"a"(127)); 255 printf("After into\n"); 256 report(test_count == 1, "OF exception"); 257 258 /* generate soft exception (OF) using two bit instruction that will 259 fault on IDT */ 260 test_count = 0; 261 handle_exception(4, of_isr); 262 flush_idt_page(); 263 printf("Try into\n"); 264 asm volatile ("addb $127, %b0\naddr16 into"::"a"(127)); 265 printf("After into\n"); 266 report(test_count == 1, "2 byte OF exception"); 267 #endif 268 269 /* generate HW interrupt that will fault on IDT */ 270 test_count = 0; 271 flush_idt_page(); 272 printf("Sending vec 33 to self\n"); 273 irq_enable(); 274 apic_self_ipi(33); 275 io_delay(); 276 irq_disable(); 277 printf("After vec 33 to self\n"); 278 report(test_count == 1, "vec 33"); 279 280 /* generate soft interrupt that will fault on IDT and stack */ 281 test_count = 0; 282 flush_idt_page(); 283 printf("Try int $33\n"); 284 flush_stack(); 285 asm volatile ("int $33"); 286 printf("After int $33\n"); 287 report(test_count == 1, "int $33"); 288 289 /* Inject two HW interrupt than open interrupt windows. Both interrupt 290 will fault on IDT access */ 291 test_count = 0; 292 flush_idt_page(); 293 printf("Sending vec 32 and 33 to self\n"); 294 apic_self_ipi(32); 295 apic_self_ipi(33); 296 io_delay(); 297 irq_enable(); 298 asm volatile("nop"); 299 irq_disable(); 300 printf("After vec 32 and 33 to self\n"); 301 report(test_count == 2, "vec 32/33"); 302 303 304 /* Inject HW interrupt, do sti and than (while in irq shadow) inject 305 soft interrupt. Fault during soft interrupt. Soft interrupt should 306 be handled before HW interrupt */ 307 test_count = 0; 308 flush_idt_page(); 309 printf("Sending vec 32 and int $33\n"); 310 apic_self_ipi(32); 311 flush_stack(); 312 io_delay(); 313 asm volatile ("sti; int $33"); 314 irq_disable(); 315 printf("After vec 32 and int $33\n"); 316 report(test_count == 2, "vec 32/int $33"); 317 318 /* test that TPR is honored */ 319 test_count = 0; 320 handle_irq(62, tirq1); 321 flush_idt_page(); 322 printf("Sending vec 33 and 62 and mask one with TPR\n"); 323 apic_write(APIC_TASKPRI, 0xf << 4); 324 irq_enable(); 325 apic_self_ipi(32); 326 apic_self_ipi(62); 327 io_delay(); 328 apic_write(APIC_TASKPRI, 0x2 << 4); 329 printf("After 33/62 TPR test\n"); 330 report(test_count == 1, "TPR"); 331 apic_write(APIC_TASKPRI, 0x0); 332 while(test_count != 2); /* wait for second irq */ 333 irq_disable(); 334 335 /* test fault durint NP delivery */ 336 printf("Before NP test\n"); 337 test_count = 0; 338 handle_exception(11, np_isr); 339 set_idt_sel(33, NP_SEL); 340 flush_idt_page(); 341 flush_stack(); 342 asm volatile ("int $33"); 343 printf("After int33\n"); 344 report(test_count == 2, "NP exception"); 345 346 /* generate NMI that will fault on IDT */ 347 test_count = 0; 348 handle_exception(2, nmi_isr); 349 flush_idt_page(); 350 printf("Sending NMI to self\n"); 351 apic_self_nmi(); 352 printf("After NMI to self\n"); 353 /* this is needed on VMX without NMI window notification. 354 Interrupt windows is used instead, so let pending NMI 355 to be injected */ 356 irq_enable(); 357 asm volatile ("nop"); 358 irq_disable(); 359 report(test_count == 2, "NMI"); 360 361 /* generate NMI that will fault on IRET */ 362 printf("Before NMI IRET test\n"); 363 test_count = 0; 364 handle_exception(2, nmi_iret_isr); 365 printf("Sending NMI to self\n"); 366 apic_self_nmi(); 367 /* this is needed on VMX without NMI window notification. 368 Interrupt windows is used instead, so let pending NMI 369 to be injected */ 370 irq_enable(); 371 asm volatile ("nop"); 372 irq_disable(); 373 printf("After NMI to self\n"); 374 report(test_count == 2, "NMI"); 375 stack_phys = (ulong)virt_to_phys(alloc_page()); 376 stack_va = alloc_vpage(); 377 378 /* Generate DE and PF exceptions serially */ 379 test_divider = 0; 380 set_intr_alt_stack(14, pf_tss); 381 handle_exception(0, de_isr); 382 printf("Try to divide by 0\n"); 383 /* install read only pte */ 384 install_pte(phys_to_virt(read_cr3()), 1, stack_va, 385 stack_phys | PT_PRESENT_MASK, 0); 386 invlpg(stack_va); 387 flush_phys_addr(stack_phys); 388 switch_stack(stack_va + 4095); 389 flush_idt_page(); 390 asm volatile ("divl %3": "=a"(res) 391 : "d"(0), "a"(1500), "m"(test_divider)); 392 restore_stack(); 393 printf("Result is %d\n", res); 394 report(res == 150, "DE PF exceptions"); 395 396 /* Generate NP and PF exceptions serially */ 397 printf("Before NP test\n"); 398 test_count = 0; 399 set_intr_alt_stack(14, pf_tss); 400 handle_exception(11, np_isr); 401 set_idt_sel(33, NP_SEL); 402 /* install read only pte */ 403 install_pte(phys_to_virt(read_cr3()), 1, stack_va, 404 stack_phys | PT_PRESENT_MASK, 0); 405 invlpg(stack_va); 406 flush_idt_page(); 407 flush_phys_addr(stack_phys); 408 switch_stack(stack_va + 4095); 409 asm volatile ("int $33"); 410 restore_stack(); 411 printf("After int33\n"); 412 report(test_count == 2, "NP PF exceptions"); 413 414 pt = alloc_page(); 415 cr3 = (void*)read_cr3(); 416 /* use shadowed stack during interrupt delivery */ 417 for (i = 0; i < 4096/sizeof(ulong); i++) { 418 if (!cr3[i]) { 419 cr3[i] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK; 420 pt[0] = virt_to_phys(pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK; 421 #ifndef __x86_64__ 422 ((ulong*)(i<<22))[1] = 0; 423 #else 424 ((ulong*)(i<<39))[1] = 0; 425 #endif 426 write_cr3(virt_to_phys(cr3)); 427 break; 428 } 429 } 430 test_count = 0; 431 printf("Try int 33 with shadowed stack\n"); 432 switch_stack(((char*)pt) + 4095); 433 asm volatile("int $33"); 434 restore_stack(); 435 printf("After int 33 with shadowed stack\n"); 436 report(test_count == 1, "int 33 with shadowed stack"); 437 438 return report_summary(); 439 } 440