163e158a0SMatt Evans /* 263e158a0SMatt Evans * PPC64 processor support 363e158a0SMatt Evans * 463e158a0SMatt Evans * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation. 563e158a0SMatt Evans * 663e158a0SMatt Evans * This program is free software; you can redistribute it and/or modify it 763e158a0SMatt Evans * under the terms of the GNU General Public License version 2 as published 863e158a0SMatt Evans * by the Free Software Foundation. 963e158a0SMatt Evans */ 1063e158a0SMatt Evans 1163e158a0SMatt Evans #include "kvm/kvm-cpu.h" 1263e158a0SMatt Evans 1363e158a0SMatt Evans #include "kvm/symbol.h" 1463e158a0SMatt Evans #include "kvm/util.h" 1563e158a0SMatt Evans #include "kvm/kvm.h" 1663e158a0SMatt Evans 17be76823fSMatt Evans #include "spapr.h" 185e8d833bSMatt Evans #include "spapr_pci.h" 19f17e5a37SMatt Evans #include "xics.h" 20be76823fSMatt Evans 2163e158a0SMatt Evans #include <sys/ioctl.h> 2263e158a0SMatt Evans #include <sys/mman.h> 2363e158a0SMatt Evans #include <signal.h> 2463e158a0SMatt Evans #include <stdlib.h> 2563e158a0SMatt Evans #include <string.h> 2663e158a0SMatt Evans #include <errno.h> 2763e158a0SMatt Evans #include <stdio.h> 285e8d833bSMatt Evans #include <assert.h> 2963e158a0SMatt Evans 3063e158a0SMatt Evans static int debug_fd; 3163e158a0SMatt Evans 3263e158a0SMatt Evans void kvm_cpu__set_debug_fd(int fd) 3363e158a0SMatt Evans { 3463e158a0SMatt Evans debug_fd = fd; 3563e158a0SMatt Evans } 3663e158a0SMatt Evans 3763e158a0SMatt Evans int kvm_cpu__get_debug_fd(void) 3863e158a0SMatt Evans { 3963e158a0SMatt Evans return debug_fd; 4063e158a0SMatt Evans } 4163e158a0SMatt Evans 4263e158a0SMatt Evans static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm) 4363e158a0SMatt Evans { 4463e158a0SMatt Evans struct kvm_cpu *vcpu; 4563e158a0SMatt Evans 4663e158a0SMatt Evans vcpu = calloc(1, sizeof *vcpu); 4763e158a0SMatt Evans if (!vcpu) 4863e158a0SMatt Evans return NULL; 4963e158a0SMatt Evans 5063e158a0SMatt Evans vcpu->kvm = kvm; 5163e158a0SMatt Evans 5263e158a0SMatt Evans return vcpu; 5363e158a0SMatt Evans } 5463e158a0SMatt Evans 5563e158a0SMatt Evans void kvm_cpu__delete(struct kvm_cpu *vcpu) 5663e158a0SMatt Evans { 5763e158a0SMatt Evans free(vcpu); 5863e158a0SMatt Evans } 5963e158a0SMatt Evans 60*df4239fbSSasha Levin struct kvm_cpu *kvm_cpu__arch_init(struct kvm *kvm, unsigned long cpu_id) 6163e158a0SMatt Evans { 6263e158a0SMatt Evans struct kvm_cpu *vcpu; 6363e158a0SMatt Evans int mmap_size; 6463e158a0SMatt Evans struct kvm_enable_cap papr_cap = { .cap = KVM_CAP_PPC_PAPR }; 6563e158a0SMatt Evans 6663e158a0SMatt Evans vcpu = kvm_cpu__new(kvm); 6763e158a0SMatt Evans if (!vcpu) 6863e158a0SMatt Evans return NULL; 6963e158a0SMatt Evans 7063e158a0SMatt Evans vcpu->cpu_id = cpu_id; 7163e158a0SMatt Evans 7263e158a0SMatt Evans vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id); 7363e158a0SMatt Evans if (vcpu->vcpu_fd < 0) 7463e158a0SMatt Evans die_perror("KVM_CREATE_VCPU ioctl"); 7563e158a0SMatt Evans 7663e158a0SMatt Evans mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0); 7763e158a0SMatt Evans if (mmap_size < 0) 7863e158a0SMatt Evans die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl"); 7963e158a0SMatt Evans 8063e158a0SMatt Evans vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0); 8163e158a0SMatt Evans if (vcpu->kvm_run == MAP_FAILED) 8263e158a0SMatt Evans die("unable to mmap vcpu fd"); 8363e158a0SMatt Evans 84df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_ENABLE_CAP, &papr_cap) < 0) 85df129a0aSMatt Evans die("unable to enable PAPR capability"); 8663e158a0SMatt Evans 8763e158a0SMatt Evans /* 8863e158a0SMatt Evans * We start all CPUs, directing non-primary threads into the kernel's 8963e158a0SMatt Evans * secondary start point. When we come to support SLOF, we will start 9063e158a0SMatt Evans * only one and SLOF will RTAS call us to ask for others to be 9163e158a0SMatt Evans * started. (FIXME: make more generic & interface with whichever 9263e158a0SMatt Evans * firmware a platform may be using.) 9363e158a0SMatt Evans */ 9463e158a0SMatt Evans vcpu->is_running = true; 9563e158a0SMatt Evans 96f17e5a37SMatt Evans /* Register with IRQ controller (FIXME, assumes XICS) */ 97f17e5a37SMatt Evans xics_cpu_register(vcpu); 98f17e5a37SMatt Evans 9963e158a0SMatt Evans return vcpu; 10063e158a0SMatt Evans } 10163e158a0SMatt Evans 10263e158a0SMatt Evans static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu) 10363e158a0SMatt Evans { 10463e158a0SMatt Evans /* Don't have to do anything, there's no expected FPU state. */ 10563e158a0SMatt Evans } 10663e158a0SMatt Evans 10763e158a0SMatt Evans static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu) 10863e158a0SMatt Evans { 10963e158a0SMatt Evans /* 11063e158a0SMatt Evans * FIXME: This assumes PPC64 and Linux guest. It doesn't use the 11163e158a0SMatt Evans * OpenFirmware entry method, but instead the "embedded" entry which 11263e158a0SMatt Evans * passes the FDT address directly. 11363e158a0SMatt Evans */ 11463e158a0SMatt Evans struct kvm_regs *r = &vcpu->regs; 11563e158a0SMatt Evans 11663e158a0SMatt Evans if (vcpu->cpu_id == 0) { 11763e158a0SMatt Evans r->pc = KERNEL_START_ADDR; 11842ac24f9SSasha Levin r->gpr[3] = vcpu->kvm->arch.fdt_gra; 11963e158a0SMatt Evans r->gpr[5] = 0; 12063e158a0SMatt Evans } else { 12163e158a0SMatt Evans r->pc = KERNEL_SECONDARY_START_ADDR; 12263e158a0SMatt Evans r->gpr[3] = vcpu->cpu_id; 12363e158a0SMatt Evans } 12463e158a0SMatt Evans r->msr = 0x8000000000001000UL; /* 64bit, non-HV, ME */ 12563e158a0SMatt Evans 12663e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0) 12763e158a0SMatt Evans die_perror("KVM_SET_REGS failed"); 12863e158a0SMatt Evans } 12963e158a0SMatt Evans 13063e158a0SMatt Evans static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu) 13163e158a0SMatt Evans { 13263e158a0SMatt Evans /* 133df129a0aSMatt Evans * Some sregs setup to initialise SDR1/PVR/HIOR on PPC64 SPAPR 134df129a0aSMatt Evans * platforms using PR KVM. (Technically, this is all ignored on 135df129a0aSMatt Evans * SPAPR HV KVM.) Different setup is required for non-PV non-SPAPR 136df129a0aSMatt Evans * platforms! (FIXME.) 13763e158a0SMatt Evans */ 138df129a0aSMatt Evans struct kvm_sregs sregs; 139df129a0aSMatt Evans struct kvm_one_reg reg = {}; 14054cb8a31SMichael Ellerman u64 value; 141df129a0aSMatt Evans 142df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0) 143df129a0aSMatt Evans die("KVM_GET_SREGS failed"); 144df129a0aSMatt Evans 14542ac24f9SSasha Levin sregs.u.s.sdr1 = vcpu->kvm->arch.sdr1; 14642ac24f9SSasha Levin sregs.pvr = vcpu->kvm->arch.pvr; 147df129a0aSMatt Evans 148df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &sregs) < 0) 149df129a0aSMatt Evans die("KVM_SET_SREGS failed"); 150df129a0aSMatt Evans 15154cb8a31SMichael Ellerman reg.id = KVM_REG_PPC_HIOR; 15254cb8a31SMichael Ellerman value = 0; 15354cb8a31SMichael Ellerman reg.addr = (u64)&value; 154df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0) 155df129a0aSMatt Evans die("KVM_SET_ONE_REG failed"); 15663e158a0SMatt Evans } 15763e158a0SMatt Evans 15863e158a0SMatt Evans /** 15963e158a0SMatt Evans * kvm_cpu__reset_vcpu - reset virtual CPU to a known state 16063e158a0SMatt Evans */ 16163e158a0SMatt Evans void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu) 16263e158a0SMatt Evans { 16363e158a0SMatt Evans kvm_cpu__setup_regs(vcpu); 16463e158a0SMatt Evans kvm_cpu__setup_sregs(vcpu); 16563e158a0SMatt Evans kvm_cpu__setup_fpu(vcpu); 16663e158a0SMatt Evans } 16763e158a0SMatt Evans 16863e158a0SMatt Evans /* kvm_cpu__irq - set KVM's IRQ flag on this vcpu */ 16963e158a0SMatt Evans void kvm_cpu__irq(struct kvm_cpu *vcpu, int pin, int level) 17063e158a0SMatt Evans { 171f17e5a37SMatt Evans unsigned int virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET; 172f17e5a37SMatt Evans 173f17e5a37SMatt Evans /* FIXME: POWER-specific */ 174f17e5a37SMatt Evans if (pin != POWER7_EXT_IRQ) 175f17e5a37SMatt Evans return; 176f17e5a37SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0) 177f17e5a37SMatt Evans pr_warning("Could not KVM_INTERRUPT."); 17863e158a0SMatt Evans } 17963e158a0SMatt Evans 18063e158a0SMatt Evans void kvm_cpu__arch_nmi(struct kvm_cpu *cpu) 18163e158a0SMatt Evans { 18263e158a0SMatt Evans } 18363e158a0SMatt Evans 18463e158a0SMatt Evans bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu) 18563e158a0SMatt Evans { 18663e158a0SMatt Evans bool ret = true; 18763e158a0SMatt Evans struct kvm_run *run = vcpu->kvm_run; 18863e158a0SMatt Evans switch(run->exit_reason) { 189be76823fSMatt Evans case KVM_EXIT_PAPR_HCALL: 190be76823fSMatt Evans run->papr_hcall.ret = spapr_hypercall(vcpu, run->papr_hcall.nr, 191be76823fSMatt Evans (target_ulong*)run->papr_hcall.args); 192be76823fSMatt Evans break; 19363e158a0SMatt Evans default: 19463e158a0SMatt Evans ret = false; 19563e158a0SMatt Evans } 19663e158a0SMatt Evans return ret; 19763e158a0SMatt Evans } 19863e158a0SMatt Evans 1995e8d833bSMatt Evans bool kvm_cpu__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write) 2005e8d833bSMatt Evans { 2015e8d833bSMatt Evans /* 2025e8d833bSMatt Evans * FIXME: This function will need to be split in order to support 2035e8d833bSMatt Evans * various PowerPC platforms/PHB types, etc. It currently assumes SPAPR 2045e8d833bSMatt Evans * PPC64 guest. 2055e8d833bSMatt Evans */ 2065e8d833bSMatt Evans bool ret = false; 2075e8d833bSMatt Evans 2085e8d833bSMatt Evans if ((phys_addr >= SPAPR_PCI_WIN_START) && 2095e8d833bSMatt Evans (phys_addr < SPAPR_PCI_WIN_END)) { 2105e8d833bSMatt Evans ret = spapr_phb_mmio(kvm, phys_addr, data, len, is_write); 2115e8d833bSMatt Evans } else { 2125e8d833bSMatt Evans pr_warning("MMIO %s unknown address %llx (size %d)!\n", 2135e8d833bSMatt Evans is_write ? "write to" : "read from", 2145e8d833bSMatt Evans phys_addr, len); 2155e8d833bSMatt Evans } 2165e8d833bSMatt Evans return ret; 2175e8d833bSMatt Evans } 2185e8d833bSMatt Evans 21963e158a0SMatt Evans #define CONDSTR_BIT(m, b) (((m) & MSR_##b) ? #b" " : "") 22063e158a0SMatt Evans 22163e158a0SMatt Evans void kvm_cpu__show_registers(struct kvm_cpu *vcpu) 22263e158a0SMatt Evans { 22363e158a0SMatt Evans struct kvm_regs regs; 22463e158a0SMatt Evans struct kvm_sregs sregs; 22563e158a0SMatt Evans int r; 22663e158a0SMatt Evans 22763e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, ®s) < 0) 22863e158a0SMatt Evans die("KVM_GET_REGS failed"); 22963e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0) 23063e158a0SMatt Evans die("KVM_GET_SREGS failed"); 23163e158a0SMatt Evans 23263e158a0SMatt Evans dprintf(debug_fd, "\n Registers:\n"); 23363e158a0SMatt Evans dprintf(debug_fd, " NIP: %016llx MSR: %016llx " 23463e158a0SMatt Evans "( %s%s%s%s%s%s%s%s%s%s%s%s)\n", 23563e158a0SMatt Evans regs.pc, regs.msr, 23663e158a0SMatt Evans CONDSTR_BIT(regs.msr, SF), 23763e158a0SMatt Evans CONDSTR_BIT(regs.msr, HV), /* ! */ 23863e158a0SMatt Evans CONDSTR_BIT(regs.msr, VEC), 23963e158a0SMatt Evans CONDSTR_BIT(regs.msr, VSX), 24063e158a0SMatt Evans CONDSTR_BIT(regs.msr, EE), 24163e158a0SMatt Evans CONDSTR_BIT(regs.msr, PR), 24263e158a0SMatt Evans CONDSTR_BIT(regs.msr, FP), 24363e158a0SMatt Evans CONDSTR_BIT(regs.msr, ME), 24463e158a0SMatt Evans CONDSTR_BIT(regs.msr, IR), 24563e158a0SMatt Evans CONDSTR_BIT(regs.msr, DR), 24663e158a0SMatt Evans CONDSTR_BIT(regs.msr, RI), 24763e158a0SMatt Evans CONDSTR_BIT(regs.msr, LE)); 24863e158a0SMatt Evans dprintf(debug_fd, " CTR: %016llx LR: %016llx CR: %08llx\n", 24963e158a0SMatt Evans regs.ctr, regs.lr, regs.cr); 25063e158a0SMatt Evans dprintf(debug_fd, " SRR0: %016llx SRR1: %016llx XER: %016llx\n", 25163e158a0SMatt Evans regs.srr0, regs.srr1, regs.xer); 25263e158a0SMatt Evans dprintf(debug_fd, " SPRG0: %016llx SPRG1: %016llx\n", 25363e158a0SMatt Evans regs.sprg0, regs.sprg1); 25463e158a0SMatt Evans dprintf(debug_fd, " SPRG2: %016llx SPRG3: %016llx\n", 25563e158a0SMatt Evans regs.sprg2, regs.sprg3); 25663e158a0SMatt Evans dprintf(debug_fd, " SPRG4: %016llx SPRG5: %016llx\n", 25763e158a0SMatt Evans regs.sprg4, regs.sprg5); 25863e158a0SMatt Evans dprintf(debug_fd, " SPRG6: %016llx SPRG7: %016llx\n", 25963e158a0SMatt Evans regs.sprg6, regs.sprg7); 26063e158a0SMatt Evans dprintf(debug_fd, " GPRs:\n "); 26163e158a0SMatt Evans for (r = 0; r < 32; r++) { 26263e158a0SMatt Evans dprintf(debug_fd, "%016llx ", regs.gpr[r]); 26363e158a0SMatt Evans if ((r & 3) == 3) 26463e158a0SMatt Evans dprintf(debug_fd, "\n "); 26563e158a0SMatt Evans } 26663e158a0SMatt Evans dprintf(debug_fd, "\n"); 26763e158a0SMatt Evans 26863e158a0SMatt Evans /* FIXME: Assumes SLB-based (book3s) guest */ 26963e158a0SMatt Evans for (r = 0; r < 32; r++) { 27063e158a0SMatt Evans dprintf(debug_fd, " SLB%02d %016llx %016llx\n", r, 27163e158a0SMatt Evans sregs.u.s.ppc64.slb[r].slbe, 27263e158a0SMatt Evans sregs.u.s.ppc64.slb[r].slbv); 27363e158a0SMatt Evans } 27463e158a0SMatt Evans dprintf(debug_fd, "----------\n"); 27563e158a0SMatt Evans } 27663e158a0SMatt Evans 27763e158a0SMatt Evans void kvm_cpu__show_code(struct kvm_cpu *vcpu) 27863e158a0SMatt Evans { 27963e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0) 28063e158a0SMatt Evans die("KVM_GET_REGS failed"); 28163e158a0SMatt Evans 28263e158a0SMatt Evans /* FIXME: Dump/disassemble some code...! */ 28363e158a0SMatt Evans 28463e158a0SMatt Evans dprintf(debug_fd, "\n Stack:\n"); 28563e158a0SMatt Evans dprintf(debug_fd, " ------\n"); 28663e158a0SMatt Evans /* Only works in real mode: */ 28763e158a0SMatt Evans kvm__dump_mem(vcpu->kvm, vcpu->regs.gpr[1], 32); 28863e158a0SMatt Evans } 28963e158a0SMatt Evans 29063e158a0SMatt Evans void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu) 29163e158a0SMatt Evans { 29263e158a0SMatt Evans /* Does nothing yet */ 29363e158a0SMatt Evans } 294