163e158a0SMatt Evans /*
263e158a0SMatt Evans * PPC64 processor support
363e158a0SMatt Evans *
463e158a0SMatt Evans * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation.
563e158a0SMatt Evans *
663e158a0SMatt Evans * This program is free software; you can redistribute it and/or modify it
763e158a0SMatt Evans * under the terms of the GNU General Public License version 2 as published
863e158a0SMatt Evans * by the Free Software Foundation.
963e158a0SMatt Evans */
1063e158a0SMatt Evans
1163e158a0SMatt Evans #include "kvm/kvm-cpu.h"
1263e158a0SMatt Evans
1363e158a0SMatt Evans #include "kvm/symbol.h"
1463e158a0SMatt Evans #include "kvm/util.h"
1563e158a0SMatt Evans #include "kvm/kvm.h"
1663e158a0SMatt Evans
17be76823fSMatt Evans #include "spapr.h"
185e8d833bSMatt Evans #include "spapr_pci.h"
19f17e5a37SMatt Evans #include "xics.h"
20be76823fSMatt Evans
2163e158a0SMatt Evans #include <sys/ioctl.h>
2263e158a0SMatt Evans #include <sys/mman.h>
2363e158a0SMatt Evans #include <signal.h>
2463e158a0SMatt Evans #include <stdlib.h>
2563e158a0SMatt Evans #include <string.h>
2663e158a0SMatt Evans #include <errno.h>
2763e158a0SMatt Evans #include <stdio.h>
285e8d833bSMatt Evans #include <assert.h>
2963e158a0SMatt Evans
3063e158a0SMatt Evans static int debug_fd;
3163e158a0SMatt Evans
kvm_cpu__set_debug_fd(int fd)3263e158a0SMatt Evans void kvm_cpu__set_debug_fd(int fd)
3363e158a0SMatt Evans {
3463e158a0SMatt Evans debug_fd = fd;
3563e158a0SMatt Evans }
3663e158a0SMatt Evans
kvm_cpu__get_debug_fd(void)3763e158a0SMatt Evans int kvm_cpu__get_debug_fd(void)
3863e158a0SMatt Evans {
3963e158a0SMatt Evans return debug_fd;
4063e158a0SMatt Evans }
4163e158a0SMatt Evans
kvm_cpu__new(struct kvm * kvm)4263e158a0SMatt Evans static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm)
4363e158a0SMatt Evans {
4463e158a0SMatt Evans struct kvm_cpu *vcpu;
4563e158a0SMatt Evans
4663e158a0SMatt Evans vcpu = calloc(1, sizeof *vcpu);
4763e158a0SMatt Evans if (!vcpu)
4863e158a0SMatt Evans return NULL;
4963e158a0SMatt Evans
5063e158a0SMatt Evans vcpu->kvm = kvm;
5163e158a0SMatt Evans
5263e158a0SMatt Evans return vcpu;
5363e158a0SMatt Evans }
5463e158a0SMatt Evans
kvm_cpu__delete(struct kvm_cpu * vcpu)5563e158a0SMatt Evans void kvm_cpu__delete(struct kvm_cpu *vcpu)
5663e158a0SMatt Evans {
5763e158a0SMatt Evans free(vcpu);
5863e158a0SMatt Evans }
5963e158a0SMatt Evans
kvm_cpu__arch_init(struct kvm * kvm,unsigned long cpu_id)60df4239fbSSasha Levin struct kvm_cpu *kvm_cpu__arch_init(struct kvm *kvm, unsigned long cpu_id)
6163e158a0SMatt Evans {
6263e158a0SMatt Evans struct kvm_cpu *vcpu;
6363e158a0SMatt Evans int mmap_size;
6463e158a0SMatt Evans struct kvm_enable_cap papr_cap = { .cap = KVM_CAP_PPC_PAPR };
6563e158a0SMatt Evans
6663e158a0SMatt Evans vcpu = kvm_cpu__new(kvm);
6763e158a0SMatt Evans if (!vcpu)
6863e158a0SMatt Evans return NULL;
6963e158a0SMatt Evans
7063e158a0SMatt Evans vcpu->cpu_id = cpu_id;
7163e158a0SMatt Evans
7263e158a0SMatt Evans vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
7363e158a0SMatt Evans if (vcpu->vcpu_fd < 0)
7463e158a0SMatt Evans die_perror("KVM_CREATE_VCPU ioctl");
7563e158a0SMatt Evans
7663e158a0SMatt Evans mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
7763e158a0SMatt Evans if (mmap_size < 0)
7863e158a0SMatt Evans die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
7963e158a0SMatt Evans
8063e158a0SMatt Evans vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0);
8163e158a0SMatt Evans if (vcpu->kvm_run == MAP_FAILED)
8263e158a0SMatt Evans die("unable to mmap vcpu fd");
8363e158a0SMatt Evans
84df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_ENABLE_CAP, &papr_cap) < 0)
85df129a0aSMatt Evans die("unable to enable PAPR capability");
8663e158a0SMatt Evans
8763e158a0SMatt Evans /*
8863e158a0SMatt Evans * We start all CPUs, directing non-primary threads into the kernel's
8963e158a0SMatt Evans * secondary start point. When we come to support SLOF, we will start
9063e158a0SMatt Evans * only one and SLOF will RTAS call us to ask for others to be
9163e158a0SMatt Evans * started. (FIXME: make more generic & interface with whichever
9263e158a0SMatt Evans * firmware a platform may be using.)
9363e158a0SMatt Evans */
9463e158a0SMatt Evans vcpu->is_running = true;
9563e158a0SMatt Evans
9663e158a0SMatt Evans return vcpu;
9763e158a0SMatt Evans }
9863e158a0SMatt Evans
kvm_cpu__setup_fpu(struct kvm_cpu * vcpu)9963e158a0SMatt Evans static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu)
10063e158a0SMatt Evans {
10163e158a0SMatt Evans /* Don't have to do anything, there's no expected FPU state. */
10263e158a0SMatt Evans }
10363e158a0SMatt Evans
kvm_cpu__setup_regs(struct kvm_cpu * vcpu)10463e158a0SMatt Evans static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
10563e158a0SMatt Evans {
10663e158a0SMatt Evans /*
10763e158a0SMatt Evans * FIXME: This assumes PPC64 and Linux guest. It doesn't use the
10863e158a0SMatt Evans * OpenFirmware entry method, but instead the "embedded" entry which
10963e158a0SMatt Evans * passes the FDT address directly.
11063e158a0SMatt Evans */
11163e158a0SMatt Evans struct kvm_regs *r = &vcpu->regs;
11263e158a0SMatt Evans
11363e158a0SMatt Evans if (vcpu->cpu_id == 0) {
11463e158a0SMatt Evans r->pc = KERNEL_START_ADDR;
11542ac24f9SSasha Levin r->gpr[3] = vcpu->kvm->arch.fdt_gra;
11663e158a0SMatt Evans r->gpr[5] = 0;
11763e158a0SMatt Evans } else {
11863e158a0SMatt Evans r->pc = KERNEL_SECONDARY_START_ADDR;
11963e158a0SMatt Evans r->gpr[3] = vcpu->cpu_id;
12063e158a0SMatt Evans }
12163e158a0SMatt Evans r->msr = 0x8000000000001000UL; /* 64bit, non-HV, ME */
12263e158a0SMatt Evans
12363e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
12463e158a0SMatt Evans die_perror("KVM_SET_REGS failed");
12563e158a0SMatt Evans }
12663e158a0SMatt Evans
kvm_cpu__setup_sregs(struct kvm_cpu * vcpu)12763e158a0SMatt Evans static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
12863e158a0SMatt Evans {
12963e158a0SMatt Evans /*
130df129a0aSMatt Evans * Some sregs setup to initialise SDR1/PVR/HIOR on PPC64 SPAPR
131df129a0aSMatt Evans * platforms using PR KVM. (Technically, this is all ignored on
132df129a0aSMatt Evans * SPAPR HV KVM.) Different setup is required for non-PV non-SPAPR
133df129a0aSMatt Evans * platforms! (FIXME.)
13463e158a0SMatt Evans */
135df129a0aSMatt Evans struct kvm_sregs sregs;
136df129a0aSMatt Evans struct kvm_one_reg reg = {};
13754cb8a31SMichael Ellerman u64 value;
138df129a0aSMatt Evans
139df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
140df129a0aSMatt Evans die("KVM_GET_SREGS failed");
141df129a0aSMatt Evans
14242ac24f9SSasha Levin sregs.u.s.sdr1 = vcpu->kvm->arch.sdr1;
14342ac24f9SSasha Levin sregs.pvr = vcpu->kvm->arch.pvr;
144df129a0aSMatt Evans
145df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &sregs) < 0)
146df129a0aSMatt Evans die("KVM_SET_SREGS failed");
147df129a0aSMatt Evans
14854cb8a31SMichael Ellerman reg.id = KVM_REG_PPC_HIOR;
14954cb8a31SMichael Ellerman value = 0;
150*e6655b75SMichael Ellerman reg.addr = (u64)(unsigned long)&value;
151df129a0aSMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
152df129a0aSMatt Evans die("KVM_SET_ONE_REG failed");
15363e158a0SMatt Evans }
15463e158a0SMatt Evans
15563e158a0SMatt Evans /**
15663e158a0SMatt Evans * kvm_cpu__reset_vcpu - reset virtual CPU to a known state
15763e158a0SMatt Evans */
kvm_cpu__reset_vcpu(struct kvm_cpu * vcpu)15863e158a0SMatt Evans void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu)
15963e158a0SMatt Evans {
16063e158a0SMatt Evans kvm_cpu__setup_regs(vcpu);
16163e158a0SMatt Evans kvm_cpu__setup_sregs(vcpu);
16263e158a0SMatt Evans kvm_cpu__setup_fpu(vcpu);
16363e158a0SMatt Evans }
16463e158a0SMatt Evans
16563e158a0SMatt Evans /* kvm_cpu__irq - set KVM's IRQ flag on this vcpu */
kvm_cpu__irq(struct kvm_cpu * vcpu,int pin,int level)16663e158a0SMatt Evans void kvm_cpu__irq(struct kvm_cpu *vcpu, int pin, int level)
16763e158a0SMatt Evans {
168f17e5a37SMatt Evans unsigned int virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
169f17e5a37SMatt Evans
170f17e5a37SMatt Evans /* FIXME: POWER-specific */
171f17e5a37SMatt Evans if (pin != POWER7_EXT_IRQ)
172f17e5a37SMatt Evans return;
173f17e5a37SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
174f17e5a37SMatt Evans pr_warning("Could not KVM_INTERRUPT.");
17563e158a0SMatt Evans }
17663e158a0SMatt Evans
kvm_cpu__arch_nmi(struct kvm_cpu * cpu)17763e158a0SMatt Evans void kvm_cpu__arch_nmi(struct kvm_cpu *cpu)
17863e158a0SMatt Evans {
17963e158a0SMatt Evans }
18063e158a0SMatt Evans
kvm_cpu__handle_exit(struct kvm_cpu * vcpu)18163e158a0SMatt Evans bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu)
18263e158a0SMatt Evans {
18363e158a0SMatt Evans bool ret = true;
18463e158a0SMatt Evans struct kvm_run *run = vcpu->kvm_run;
18563e158a0SMatt Evans switch(run->exit_reason) {
186be76823fSMatt Evans case KVM_EXIT_PAPR_HCALL:
187be76823fSMatt Evans run->papr_hcall.ret = spapr_hypercall(vcpu, run->papr_hcall.nr,
188be76823fSMatt Evans (target_ulong*)run->papr_hcall.args);
189be76823fSMatt Evans break;
19063e158a0SMatt Evans default:
19163e158a0SMatt Evans ret = false;
19263e158a0SMatt Evans }
19363e158a0SMatt Evans return ret;
19463e158a0SMatt Evans }
19563e158a0SMatt Evans
kvm_cpu__emulate_mmio(struct kvm_cpu * vcpu,u64 phys_addr,u8 * data,u32 len,u8 is_write)1969b735910SMarc Zyngier bool kvm_cpu__emulate_mmio(struct kvm_cpu *vcpu, u64 phys_addr, u8 *data, u32 len, u8 is_write)
1975e8d833bSMatt Evans {
1985e8d833bSMatt Evans /*
1995e8d833bSMatt Evans * FIXME: This function will need to be split in order to support
2005e8d833bSMatt Evans * various PowerPC platforms/PHB types, etc. It currently assumes SPAPR
2015e8d833bSMatt Evans * PPC64 guest.
2025e8d833bSMatt Evans */
2035e8d833bSMatt Evans bool ret = false;
2045e8d833bSMatt Evans
2055e8d833bSMatt Evans if ((phys_addr >= SPAPR_PCI_WIN_START) &&
2065e8d833bSMatt Evans (phys_addr < SPAPR_PCI_WIN_END)) {
2079b735910SMarc Zyngier ret = spapr_phb_mmio(vcpu, phys_addr, data, len, is_write);
2085e8d833bSMatt Evans } else {
2095e8d833bSMatt Evans pr_warning("MMIO %s unknown address %llx (size %d)!\n",
2105e8d833bSMatt Evans is_write ? "write to" : "read from",
2115e8d833bSMatt Evans phys_addr, len);
2125e8d833bSMatt Evans }
2135e8d833bSMatt Evans return ret;
2145e8d833bSMatt Evans }
2155e8d833bSMatt Evans
21663e158a0SMatt Evans #define CONDSTR_BIT(m, b) (((m) & MSR_##b) ? #b" " : "")
21763e158a0SMatt Evans
kvm_cpu__show_registers(struct kvm_cpu * vcpu)21863e158a0SMatt Evans void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
21963e158a0SMatt Evans {
22063e158a0SMatt Evans struct kvm_regs regs;
22163e158a0SMatt Evans struct kvm_sregs sregs;
22263e158a0SMatt Evans int r;
22363e158a0SMatt Evans
22463e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, ®s) < 0)
22563e158a0SMatt Evans die("KVM_GET_REGS failed");
22663e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
22763e158a0SMatt Evans die("KVM_GET_SREGS failed");
22863e158a0SMatt Evans
22963e158a0SMatt Evans dprintf(debug_fd, "\n Registers:\n");
23063e158a0SMatt Evans dprintf(debug_fd, " NIP: %016llx MSR: %016llx "
23163e158a0SMatt Evans "( %s%s%s%s%s%s%s%s%s%s%s%s)\n",
23263e158a0SMatt Evans regs.pc, regs.msr,
23363e158a0SMatt Evans CONDSTR_BIT(regs.msr, SF),
23463e158a0SMatt Evans CONDSTR_BIT(regs.msr, HV), /* ! */
23563e158a0SMatt Evans CONDSTR_BIT(regs.msr, VEC),
23663e158a0SMatt Evans CONDSTR_BIT(regs.msr, VSX),
23763e158a0SMatt Evans CONDSTR_BIT(regs.msr, EE),
23863e158a0SMatt Evans CONDSTR_BIT(regs.msr, PR),
23963e158a0SMatt Evans CONDSTR_BIT(regs.msr, FP),
24063e158a0SMatt Evans CONDSTR_BIT(regs.msr, ME),
24163e158a0SMatt Evans CONDSTR_BIT(regs.msr, IR),
24263e158a0SMatt Evans CONDSTR_BIT(regs.msr, DR),
24363e158a0SMatt Evans CONDSTR_BIT(regs.msr, RI),
24463e158a0SMatt Evans CONDSTR_BIT(regs.msr, LE));
24563e158a0SMatt Evans dprintf(debug_fd, " CTR: %016llx LR: %016llx CR: %08llx\n",
24663e158a0SMatt Evans regs.ctr, regs.lr, regs.cr);
24763e158a0SMatt Evans dprintf(debug_fd, " SRR0: %016llx SRR1: %016llx XER: %016llx\n",
24863e158a0SMatt Evans regs.srr0, regs.srr1, regs.xer);
24963e158a0SMatt Evans dprintf(debug_fd, " SPRG0: %016llx SPRG1: %016llx\n",
25063e158a0SMatt Evans regs.sprg0, regs.sprg1);
25163e158a0SMatt Evans dprintf(debug_fd, " SPRG2: %016llx SPRG3: %016llx\n",
25263e158a0SMatt Evans regs.sprg2, regs.sprg3);
25363e158a0SMatt Evans dprintf(debug_fd, " SPRG4: %016llx SPRG5: %016llx\n",
25463e158a0SMatt Evans regs.sprg4, regs.sprg5);
25563e158a0SMatt Evans dprintf(debug_fd, " SPRG6: %016llx SPRG7: %016llx\n",
25663e158a0SMatt Evans regs.sprg6, regs.sprg7);
25763e158a0SMatt Evans dprintf(debug_fd, " GPRs:\n ");
25863e158a0SMatt Evans for (r = 0; r < 32; r++) {
25963e158a0SMatt Evans dprintf(debug_fd, "%016llx ", regs.gpr[r]);
26063e158a0SMatt Evans if ((r & 3) == 3)
26163e158a0SMatt Evans dprintf(debug_fd, "\n ");
26263e158a0SMatt Evans }
26363e158a0SMatt Evans dprintf(debug_fd, "\n");
26463e158a0SMatt Evans
26563e158a0SMatt Evans /* FIXME: Assumes SLB-based (book3s) guest */
26663e158a0SMatt Evans for (r = 0; r < 32; r++) {
26763e158a0SMatt Evans dprintf(debug_fd, " SLB%02d %016llx %016llx\n", r,
26863e158a0SMatt Evans sregs.u.s.ppc64.slb[r].slbe,
26963e158a0SMatt Evans sregs.u.s.ppc64.slb[r].slbv);
27063e158a0SMatt Evans }
27163e158a0SMatt Evans dprintf(debug_fd, "----------\n");
27263e158a0SMatt Evans }
27363e158a0SMatt Evans
kvm_cpu__show_code(struct kvm_cpu * vcpu)27463e158a0SMatt Evans void kvm_cpu__show_code(struct kvm_cpu *vcpu)
27563e158a0SMatt Evans {
27663e158a0SMatt Evans if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0)
27763e158a0SMatt Evans die("KVM_GET_REGS failed");
27863e158a0SMatt Evans
27963e158a0SMatt Evans /* FIXME: Dump/disassemble some code...! */
28063e158a0SMatt Evans
28163e158a0SMatt Evans dprintf(debug_fd, "\n Stack:\n");
28263e158a0SMatt Evans dprintf(debug_fd, " ------\n");
28363e158a0SMatt Evans /* Only works in real mode: */
284a7f046dfSMichael Ellerman kvm__dump_mem(vcpu->kvm, vcpu->regs.gpr[1], 32, debug_fd);
28563e158a0SMatt Evans }
28663e158a0SMatt Evans
kvm_cpu__show_page_tables(struct kvm_cpu * vcpu)28763e158a0SMatt Evans void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu)
28863e158a0SMatt Evans {
28963e158a0SMatt Evans /* Does nothing yet */
29063e158a0SMatt Evans }
291