1af7b0868SMatt Evans #include "kvm/kvm-cpu.h" 2af7b0868SMatt Evans 3af7b0868SMatt Evans #include "kvm/symbol.h" 4af7b0868SMatt Evans #include "kvm/util.h" 5af7b0868SMatt Evans #include "kvm/kvm.h" 6af7b0868SMatt Evans 7af7b0868SMatt Evans #include <asm/msr-index.h> 8*4b1c6f6eSSasha Levin #include <asm/apicdef.h> 9af7b0868SMatt Evans 10af7b0868SMatt Evans #include <sys/ioctl.h> 11af7b0868SMatt Evans #include <sys/mman.h> 12af7b0868SMatt Evans #include <signal.h> 13af7b0868SMatt Evans #include <stdlib.h> 14af7b0868SMatt Evans #include <string.h> 15af7b0868SMatt Evans #include <errno.h> 16af7b0868SMatt Evans #include <stdio.h> 17af7b0868SMatt Evans 18af7b0868SMatt Evans static int debug_fd; 19af7b0868SMatt Evans 20af7b0868SMatt Evans void kvm_cpu__set_debug_fd(int fd) 21af7b0868SMatt Evans { 22af7b0868SMatt Evans debug_fd = fd; 23af7b0868SMatt Evans } 24af7b0868SMatt Evans 25af7b0868SMatt Evans int kvm_cpu__get_debug_fd(void) 26af7b0868SMatt Evans { 27af7b0868SMatt Evans return debug_fd; 28af7b0868SMatt Evans } 29af7b0868SMatt Evans 30af7b0868SMatt Evans static inline bool is_in_protected_mode(struct kvm_cpu *vcpu) 31af7b0868SMatt Evans { 32af7b0868SMatt Evans return vcpu->sregs.cr0 & 0x01; 33af7b0868SMatt Evans } 34af7b0868SMatt Evans 35af7b0868SMatt Evans static inline u64 ip_to_flat(struct kvm_cpu *vcpu, u64 ip) 36af7b0868SMatt Evans { 37af7b0868SMatt Evans u64 cs; 38af7b0868SMatt Evans 39af7b0868SMatt Evans /* 40af7b0868SMatt Evans * NOTE! We should take code segment base address into account here. 41af7b0868SMatt Evans * Luckily it's usually zero because Linux uses flat memory model. 42af7b0868SMatt Evans */ 43af7b0868SMatt Evans if (is_in_protected_mode(vcpu)) 44af7b0868SMatt Evans return ip; 45af7b0868SMatt Evans 46af7b0868SMatt Evans cs = vcpu->sregs.cs.selector; 47af7b0868SMatt Evans 48af7b0868SMatt Evans return ip + (cs << 4); 49af7b0868SMatt Evans } 50af7b0868SMatt Evans 51af7b0868SMatt Evans static inline u32 selector_to_base(u16 selector) 52af7b0868SMatt Evans { 53af7b0868SMatt Evans /* 54af7b0868SMatt Evans * KVM on Intel requires 'base' to be 'selector * 16' in real mode. 55af7b0868SMatt Evans */ 56af7b0868SMatt Evans return (u32)selector * 16; 57af7b0868SMatt Evans } 58af7b0868SMatt Evans 59af7b0868SMatt Evans static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm) 60af7b0868SMatt Evans { 61af7b0868SMatt Evans struct kvm_cpu *vcpu; 62af7b0868SMatt Evans 63af7b0868SMatt Evans vcpu = calloc(1, sizeof *vcpu); 64af7b0868SMatt Evans if (!vcpu) 65af7b0868SMatt Evans return NULL; 66af7b0868SMatt Evans 67af7b0868SMatt Evans vcpu->kvm = kvm; 68af7b0868SMatt Evans 69af7b0868SMatt Evans return vcpu; 70af7b0868SMatt Evans } 71af7b0868SMatt Evans 72af7b0868SMatt Evans void kvm_cpu__delete(struct kvm_cpu *vcpu) 73af7b0868SMatt Evans { 74af7b0868SMatt Evans if (vcpu->msrs) 75af7b0868SMatt Evans free(vcpu->msrs); 76af7b0868SMatt Evans 77af7b0868SMatt Evans free(vcpu); 78af7b0868SMatt Evans } 79af7b0868SMatt Evans 80*4b1c6f6eSSasha Levin static int kvm_cpu__set_lint(struct kvm_cpu *vcpu) 81*4b1c6f6eSSasha Levin { 82*4b1c6f6eSSasha Levin struct kvm_lapic_state klapic; 83*4b1c6f6eSSasha Levin struct local_apic *lapic = (void *)&klapic; 84*4b1c6f6eSSasha Levin u32 lvt; 85*4b1c6f6eSSasha Levin 86*4b1c6f6eSSasha Levin if (ioctl(vcpu->vcpu_fd, KVM_GET_LAPIC, &klapic)) 87*4b1c6f6eSSasha Levin return -1; 88*4b1c6f6eSSasha Levin 89*4b1c6f6eSSasha Levin lvt = *(u32 *)&lapic->lvt_lint0; 90*4b1c6f6eSSasha Levin lvt = SET_APIC_DELIVERY_MODE(lvt, APIC_MODE_EXTINT); 91*4b1c6f6eSSasha Levin *(u32 *)&lapic->lvt_lint0 = lvt; 92*4b1c6f6eSSasha Levin 93*4b1c6f6eSSasha Levin lvt = *(u32 *)&lapic->lvt_lint1; 94*4b1c6f6eSSasha Levin lvt = SET_APIC_DELIVERY_MODE(lvt, APIC_MODE_NMI); 95*4b1c6f6eSSasha Levin *(u32 *)&lapic->lvt_lint1 = lvt; 96*4b1c6f6eSSasha Levin 97*4b1c6f6eSSasha Levin return ioctl(vcpu->vcpu_fd, KVM_SET_LAPIC, &klapic); 98*4b1c6f6eSSasha Levin } 99*4b1c6f6eSSasha Levin 100af7b0868SMatt Evans struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id) 101af7b0868SMatt Evans { 102af7b0868SMatt Evans struct kvm_cpu *vcpu; 103af7b0868SMatt Evans int mmap_size; 104af7b0868SMatt Evans int coalesced_offset; 105af7b0868SMatt Evans 106af7b0868SMatt Evans vcpu = kvm_cpu__new(kvm); 107af7b0868SMatt Evans if (!vcpu) 108af7b0868SMatt Evans return NULL; 109af7b0868SMatt Evans 110af7b0868SMatt Evans vcpu->cpu_id = cpu_id; 111af7b0868SMatt Evans 112af7b0868SMatt Evans vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id); 113af7b0868SMatt Evans if (vcpu->vcpu_fd < 0) 114af7b0868SMatt Evans die_perror("KVM_CREATE_VCPU ioctl"); 115af7b0868SMatt Evans 116af7b0868SMatt Evans mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0); 117af7b0868SMatt Evans if (mmap_size < 0) 118af7b0868SMatt Evans die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl"); 119af7b0868SMatt Evans 120af7b0868SMatt Evans vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0); 121af7b0868SMatt Evans if (vcpu->kvm_run == MAP_FAILED) 122af7b0868SMatt Evans die("unable to mmap vcpu fd"); 123af7b0868SMatt Evans 124af7b0868SMatt Evans coalesced_offset = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO); 125af7b0868SMatt Evans if (coalesced_offset) 126af7b0868SMatt Evans vcpu->ring = (void *)vcpu->kvm_run + (coalesced_offset * PAGE_SIZE); 127af7b0868SMatt Evans 128*4b1c6f6eSSasha Levin if (kvm_cpu__set_lint(vcpu)) 129*4b1c6f6eSSasha Levin die_perror("KVM_SET_LAPIC failed"); 130*4b1c6f6eSSasha Levin 131af7b0868SMatt Evans vcpu->is_running = true; 132af7b0868SMatt Evans 133af7b0868SMatt Evans return vcpu; 134af7b0868SMatt Evans } 135af7b0868SMatt Evans 136af7b0868SMatt Evans static struct kvm_msrs *kvm_msrs__new(size_t nmsrs) 137af7b0868SMatt Evans { 138af7b0868SMatt Evans struct kvm_msrs *vcpu = calloc(1, sizeof(*vcpu) + (sizeof(struct kvm_msr_entry) * nmsrs)); 139af7b0868SMatt Evans 140af7b0868SMatt Evans if (!vcpu) 141af7b0868SMatt Evans die("out of memory"); 142af7b0868SMatt Evans 143af7b0868SMatt Evans return vcpu; 144af7b0868SMatt Evans } 145af7b0868SMatt Evans 146af7b0868SMatt Evans #define KVM_MSR_ENTRY(_index, _data) \ 147af7b0868SMatt Evans (struct kvm_msr_entry) { .index = _index, .data = _data } 148af7b0868SMatt Evans 149af7b0868SMatt Evans static void kvm_cpu__setup_msrs(struct kvm_cpu *vcpu) 150af7b0868SMatt Evans { 151af7b0868SMatt Evans unsigned long ndx = 0; 152af7b0868SMatt Evans 153af7b0868SMatt Evans vcpu->msrs = kvm_msrs__new(100); 154af7b0868SMatt Evans 155af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS, 0x0); 156af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP, 0x0); 157af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP, 0x0); 158af7b0868SMatt Evans #ifdef CONFIG_X86_64 159af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_STAR, 0x0); 160af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_CSTAR, 0x0); 161af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_KERNEL_GS_BASE, 0x0); 162af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_SYSCALL_MASK, 0x0); 163af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_LSTAR, 0x0); 164af7b0868SMatt Evans #endif 165af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TSC, 0x0); 166af7b0868SMatt Evans vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_MISC_ENABLE, 167af7b0868SMatt Evans MSR_IA32_MISC_ENABLE_FAST_STRING); 168af7b0868SMatt Evans 169af7b0868SMatt Evans vcpu->msrs->nmsrs = ndx; 170af7b0868SMatt Evans 171af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_MSRS, vcpu->msrs) < 0) 172af7b0868SMatt Evans die_perror("KVM_SET_MSRS failed"); 173af7b0868SMatt Evans } 174af7b0868SMatt Evans 175af7b0868SMatt Evans static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu) 176af7b0868SMatt Evans { 177af7b0868SMatt Evans vcpu->fpu = (struct kvm_fpu) { 178af7b0868SMatt Evans .fcw = 0x37f, 179af7b0868SMatt Evans .mxcsr = 0x1f80, 180af7b0868SMatt Evans }; 181af7b0868SMatt Evans 182af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_FPU, &vcpu->fpu) < 0) 183af7b0868SMatt Evans die_perror("KVM_SET_FPU failed"); 184af7b0868SMatt Evans } 185af7b0868SMatt Evans 186af7b0868SMatt Evans static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu) 187af7b0868SMatt Evans { 188af7b0868SMatt Evans vcpu->regs = (struct kvm_regs) { 189af7b0868SMatt Evans /* We start the guest in 16-bit real mode */ 190af7b0868SMatt Evans .rflags = 0x0000000000000002ULL, 191af7b0868SMatt Evans 192af7b0868SMatt Evans .rip = vcpu->kvm->boot_ip, 193af7b0868SMatt Evans .rsp = vcpu->kvm->boot_sp, 194af7b0868SMatt Evans .rbp = vcpu->kvm->boot_sp, 195af7b0868SMatt Evans }; 196af7b0868SMatt Evans 197af7b0868SMatt Evans if (vcpu->regs.rip > USHRT_MAX) 198af7b0868SMatt Evans die("ip 0x%llx is too high for real mode", (u64) vcpu->regs.rip); 199af7b0868SMatt Evans 200af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0) 201af7b0868SMatt Evans die_perror("KVM_SET_REGS failed"); 202af7b0868SMatt Evans } 203af7b0868SMatt Evans 204af7b0868SMatt Evans static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu) 205af7b0868SMatt Evans { 206af7b0868SMatt Evans 207af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0) 208af7b0868SMatt Evans die_perror("KVM_GET_SREGS failed"); 209af7b0868SMatt Evans 210af7b0868SMatt Evans vcpu->sregs.cs.selector = vcpu->kvm->boot_selector; 211af7b0868SMatt Evans vcpu->sregs.cs.base = selector_to_base(vcpu->kvm->boot_selector); 212af7b0868SMatt Evans vcpu->sregs.ss.selector = vcpu->kvm->boot_selector; 213af7b0868SMatt Evans vcpu->sregs.ss.base = selector_to_base(vcpu->kvm->boot_selector); 214af7b0868SMatt Evans vcpu->sregs.ds.selector = vcpu->kvm->boot_selector; 215af7b0868SMatt Evans vcpu->sregs.ds.base = selector_to_base(vcpu->kvm->boot_selector); 216af7b0868SMatt Evans vcpu->sregs.es.selector = vcpu->kvm->boot_selector; 217af7b0868SMatt Evans vcpu->sregs.es.base = selector_to_base(vcpu->kvm->boot_selector); 218af7b0868SMatt Evans vcpu->sregs.fs.selector = vcpu->kvm->boot_selector; 219af7b0868SMatt Evans vcpu->sregs.fs.base = selector_to_base(vcpu->kvm->boot_selector); 220af7b0868SMatt Evans vcpu->sregs.gs.selector = vcpu->kvm->boot_selector; 221af7b0868SMatt Evans vcpu->sregs.gs.base = selector_to_base(vcpu->kvm->boot_selector); 222af7b0868SMatt Evans 223af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &vcpu->sregs) < 0) 224af7b0868SMatt Evans die_perror("KVM_SET_SREGS failed"); 225af7b0868SMatt Evans } 226af7b0868SMatt Evans 227af7b0868SMatt Evans /** 228af7b0868SMatt Evans * kvm_cpu__reset_vcpu - reset virtual CPU to a known state 229af7b0868SMatt Evans */ 230af7b0868SMatt Evans void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu) 231af7b0868SMatt Evans { 232af7b0868SMatt Evans kvm_cpu__setup_cpuid(vcpu); 233af7b0868SMatt Evans kvm_cpu__setup_sregs(vcpu); 234af7b0868SMatt Evans kvm_cpu__setup_regs(vcpu); 235af7b0868SMatt Evans kvm_cpu__setup_fpu(vcpu); 236af7b0868SMatt Evans kvm_cpu__setup_msrs(vcpu); 237af7b0868SMatt Evans } 238af7b0868SMatt Evans 239341ee0d4SMatt Evans bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu) 240341ee0d4SMatt Evans { 241341ee0d4SMatt Evans return false; 242341ee0d4SMatt Evans } 243341ee0d4SMatt Evans 244af7b0868SMatt Evans static void print_dtable(const char *name, struct kvm_dtable *dtable) 245af7b0868SMatt Evans { 246af7b0868SMatt Evans dprintf(debug_fd, " %s %016llx %08hx\n", 247af7b0868SMatt Evans name, (u64) dtable->base, (u16) dtable->limit); 248af7b0868SMatt Evans } 249af7b0868SMatt Evans 250af7b0868SMatt Evans static void print_segment(const char *name, struct kvm_segment *seg) 251af7b0868SMatt Evans { 252af7b0868SMatt Evans dprintf(debug_fd, " %s %04hx %016llx %08x %02hhx %x %x %x %x %x %x %x\n", 253af7b0868SMatt Evans name, (u16) seg->selector, (u64) seg->base, (u32) seg->limit, 254af7b0868SMatt Evans (u8) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl); 255af7b0868SMatt Evans } 256af7b0868SMatt Evans 257af7b0868SMatt Evans void kvm_cpu__show_registers(struct kvm_cpu *vcpu) 258af7b0868SMatt Evans { 259af7b0868SMatt Evans unsigned long cr0, cr2, cr3; 260af7b0868SMatt Evans unsigned long cr4, cr8; 261af7b0868SMatt Evans unsigned long rax, rbx, rcx; 262af7b0868SMatt Evans unsigned long rdx, rsi, rdi; 263af7b0868SMatt Evans unsigned long rbp, r8, r9; 264af7b0868SMatt Evans unsigned long r10, r11, r12; 265af7b0868SMatt Evans unsigned long r13, r14, r15; 266af7b0868SMatt Evans unsigned long rip, rsp; 267af7b0868SMatt Evans struct kvm_sregs sregs; 268af7b0868SMatt Evans unsigned long rflags; 269af7b0868SMatt Evans struct kvm_regs regs; 270af7b0868SMatt Evans int i; 271af7b0868SMatt Evans 272af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, ®s) < 0) 273af7b0868SMatt Evans die("KVM_GET_REGS failed"); 274af7b0868SMatt Evans 275af7b0868SMatt Evans rflags = regs.rflags; 276af7b0868SMatt Evans 277af7b0868SMatt Evans rip = regs.rip; rsp = regs.rsp; 278af7b0868SMatt Evans rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx; 279af7b0868SMatt Evans rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi; 280af7b0868SMatt Evans rbp = regs.rbp; r8 = regs.r8; r9 = regs.r9; 281af7b0868SMatt Evans r10 = regs.r10; r11 = regs.r11; r12 = regs.r12; 282af7b0868SMatt Evans r13 = regs.r13; r14 = regs.r14; r15 = regs.r15; 283af7b0868SMatt Evans 284af7b0868SMatt Evans dprintf(debug_fd, "\n Registers:\n"); 285af7b0868SMatt Evans dprintf(debug_fd, " ----------\n"); 286af7b0868SMatt Evans dprintf(debug_fd, " rip: %016lx rsp: %016lx flags: %016lx\n", rip, rsp, rflags); 287af7b0868SMatt Evans dprintf(debug_fd, " rax: %016lx rbx: %016lx rcx: %016lx\n", rax, rbx, rcx); 288af7b0868SMatt Evans dprintf(debug_fd, " rdx: %016lx rsi: %016lx rdi: %016lx\n", rdx, rsi, rdi); 289af7b0868SMatt Evans dprintf(debug_fd, " rbp: %016lx r8: %016lx r9: %016lx\n", rbp, r8, r9); 290af7b0868SMatt Evans dprintf(debug_fd, " r10: %016lx r11: %016lx r12: %016lx\n", r10, r11, r12); 291af7b0868SMatt Evans dprintf(debug_fd, " r13: %016lx r14: %016lx r15: %016lx\n", r13, r14, r15); 292af7b0868SMatt Evans 293af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0) 294af7b0868SMatt Evans die("KVM_GET_REGS failed"); 295af7b0868SMatt Evans 296af7b0868SMatt Evans cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3; 297af7b0868SMatt Evans cr4 = sregs.cr4; cr8 = sregs.cr8; 298af7b0868SMatt Evans 299af7b0868SMatt Evans dprintf(debug_fd, " cr0: %016lx cr2: %016lx cr3: %016lx\n", cr0, cr2, cr3); 300af7b0868SMatt Evans dprintf(debug_fd, " cr4: %016lx cr8: %016lx\n", cr4, cr8); 301af7b0868SMatt Evans dprintf(debug_fd, "\n Segment registers:\n"); 302af7b0868SMatt Evans dprintf(debug_fd, " ------------------\n"); 303af7b0868SMatt Evans dprintf(debug_fd, " register selector base limit type p dpl db s l g avl\n"); 304af7b0868SMatt Evans print_segment("cs ", &sregs.cs); 305af7b0868SMatt Evans print_segment("ss ", &sregs.ss); 306af7b0868SMatt Evans print_segment("ds ", &sregs.ds); 307af7b0868SMatt Evans print_segment("es ", &sregs.es); 308af7b0868SMatt Evans print_segment("fs ", &sregs.fs); 309af7b0868SMatt Evans print_segment("gs ", &sregs.gs); 310af7b0868SMatt Evans print_segment("tr ", &sregs.tr); 311af7b0868SMatt Evans print_segment("ldt", &sregs.ldt); 312af7b0868SMatt Evans print_dtable("gdt", &sregs.gdt); 313af7b0868SMatt Evans print_dtable("idt", &sregs.idt); 314af7b0868SMatt Evans 315af7b0868SMatt Evans dprintf(debug_fd, "\n APIC:\n"); 316af7b0868SMatt Evans dprintf(debug_fd, " -----\n"); 317af7b0868SMatt Evans dprintf(debug_fd, " efer: %016llx apic base: %016llx nmi: %s\n", 318af7b0868SMatt Evans (u64) sregs.efer, (u64) sregs.apic_base, 319af7b0868SMatt Evans (vcpu->kvm->nmi_disabled ? "disabled" : "enabled")); 320af7b0868SMatt Evans 321af7b0868SMatt Evans dprintf(debug_fd, "\n Interrupt bitmap:\n"); 322af7b0868SMatt Evans dprintf(debug_fd, " -----------------\n"); 323af7b0868SMatt Evans for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) 324af7b0868SMatt Evans dprintf(debug_fd, " %016llx", (u64) sregs.interrupt_bitmap[i]); 325af7b0868SMatt Evans dprintf(debug_fd, "\n"); 326af7b0868SMatt Evans } 327af7b0868SMatt Evans 328af7b0868SMatt Evans #define MAX_SYM_LEN 128 329af7b0868SMatt Evans 330af7b0868SMatt Evans void kvm_cpu__show_code(struct kvm_cpu *vcpu) 331af7b0868SMatt Evans { 332af7b0868SMatt Evans unsigned int code_bytes = 64; 333af7b0868SMatt Evans unsigned int code_prologue = code_bytes * 43 / 64; 334af7b0868SMatt Evans unsigned int code_len = code_bytes; 335af7b0868SMatt Evans char sym[MAX_SYM_LEN]; 336af7b0868SMatt Evans unsigned char c; 337af7b0868SMatt Evans unsigned int i; 338af7b0868SMatt Evans u8 *ip; 339af7b0868SMatt Evans 340af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0) 341af7b0868SMatt Evans die("KVM_GET_REGS failed"); 342af7b0868SMatt Evans 343af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0) 344af7b0868SMatt Evans die("KVM_GET_SREGS failed"); 345af7b0868SMatt Evans 346af7b0868SMatt Evans ip = guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip) - code_prologue); 347af7b0868SMatt Evans 348af7b0868SMatt Evans dprintf(debug_fd, "\n Code:\n"); 349af7b0868SMatt Evans dprintf(debug_fd, " -----\n"); 350af7b0868SMatt Evans 351af7b0868SMatt Evans symbol__lookup(vcpu->kvm, vcpu->regs.rip, sym, MAX_SYM_LEN); 352af7b0868SMatt Evans 353af7b0868SMatt Evans dprintf(debug_fd, " rip: [<%016lx>] %s\n\n", (unsigned long) vcpu->regs.rip, sym); 354af7b0868SMatt Evans 355af7b0868SMatt Evans for (i = 0; i < code_len; i++, ip++) { 356af7b0868SMatt Evans if (!host_ptr_in_ram(vcpu->kvm, ip)) 357af7b0868SMatt Evans break; 358af7b0868SMatt Evans 359af7b0868SMatt Evans c = *ip; 360af7b0868SMatt Evans 361af7b0868SMatt Evans if (ip == guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip))) 362af7b0868SMatt Evans dprintf(debug_fd, " <%02x>", c); 363af7b0868SMatt Evans else 364af7b0868SMatt Evans dprintf(debug_fd, " %02x", c); 365af7b0868SMatt Evans } 366af7b0868SMatt Evans 367af7b0868SMatt Evans dprintf(debug_fd, "\n"); 368af7b0868SMatt Evans 369af7b0868SMatt Evans dprintf(debug_fd, "\n Stack:\n"); 370af7b0868SMatt Evans dprintf(debug_fd, " ------\n"); 371af7b0868SMatt Evans kvm__dump_mem(vcpu->kvm, vcpu->regs.rsp, 32); 372af7b0868SMatt Evans } 373af7b0868SMatt Evans 374af7b0868SMatt Evans void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu) 375af7b0868SMatt Evans { 376af7b0868SMatt Evans u64 *pte1; 377af7b0868SMatt Evans u64 *pte2; 378af7b0868SMatt Evans u64 *pte3; 379af7b0868SMatt Evans u64 *pte4; 380af7b0868SMatt Evans 381af7b0868SMatt Evans if (!is_in_protected_mode(vcpu)) 382af7b0868SMatt Evans return; 383af7b0868SMatt Evans 384af7b0868SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0) 385af7b0868SMatt Evans die("KVM_GET_SREGS failed"); 386af7b0868SMatt Evans 387af7b0868SMatt Evans pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3); 388af7b0868SMatt Evans if (!host_ptr_in_ram(vcpu->kvm, pte4)) 389af7b0868SMatt Evans return; 390af7b0868SMatt Evans 391af7b0868SMatt Evans pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff)); 392af7b0868SMatt Evans if (!host_ptr_in_ram(vcpu->kvm, pte3)) 393af7b0868SMatt Evans return; 394af7b0868SMatt Evans 395af7b0868SMatt Evans pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff)); 396af7b0868SMatt Evans if (!host_ptr_in_ram(vcpu->kvm, pte2)) 397af7b0868SMatt Evans return; 398af7b0868SMatt Evans 399af7b0868SMatt Evans pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff)); 400af7b0868SMatt Evans if (!host_ptr_in_ram(vcpu->kvm, pte1)) 401af7b0868SMatt Evans return; 402af7b0868SMatt Evans 403af7b0868SMatt Evans dprintf(debug_fd, "Page Tables:\n"); 404af7b0868SMatt Evans if (*pte2 & (1 << 7)) 405af7b0868SMatt Evans dprintf(debug_fd, " pte4: %016llx pte3: %016llx" 406af7b0868SMatt Evans " pte2: %016llx\n", 407af7b0868SMatt Evans *pte4, *pte3, *pte2); 408af7b0868SMatt Evans else 409af7b0868SMatt Evans dprintf(debug_fd, " pte4: %016llx pte3: %016llx pte2: %016" 410af7b0868SMatt Evans "llx pte1: %016llx\n", 411af7b0868SMatt Evans *pte4, *pte3, *pte2, *pte1); 412af7b0868SMatt Evans } 413*4b1c6f6eSSasha Levin 414*4b1c6f6eSSasha Levin void kvm_cpu__arch_nmi(struct kvm_cpu *cpu) 415*4b1c6f6eSSasha Levin { 416*4b1c6f6eSSasha Levin struct kvm_lapic_state klapic; 417*4b1c6f6eSSasha Levin struct local_apic *lapic = (void *)&klapic; 418*4b1c6f6eSSasha Levin 419*4b1c6f6eSSasha Levin if (ioctl(cpu->vcpu_fd, KVM_GET_LAPIC, &klapic) != 0) 420*4b1c6f6eSSasha Levin return; 421*4b1c6f6eSSasha Levin 422*4b1c6f6eSSasha Levin if (lapic->lvt_lint1.mask) 423*4b1c6f6eSSasha Levin return; 424*4b1c6f6eSSasha Levin 425*4b1c6f6eSSasha Levin if (lapic->lvt_lint1.delivery_mode != APIC_MODE_NMI) 426*4b1c6f6eSSasha Levin return; 427*4b1c6f6eSSasha Levin 428*4b1c6f6eSSasha Levin ioctl(cpu->vcpu_fd, KVM_NMI); 429*4b1c6f6eSSasha Levin } 430