1 /*
2 * PPC64 processor support
3 *
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11 #include "kvm/kvm-cpu.h"
12
13 #include "kvm/symbol.h"
14 #include "kvm/util.h"
15 #include "kvm/kvm.h"
16
17 #include "spapr.h"
18 #include "spapr_pci.h"
19 #include "xics.h"
20
21 #include <sys/ioctl.h>
22 #include <sys/mman.h>
23 #include <signal.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <stdio.h>
28 #include <assert.h>
29
30 static int debug_fd;
31
kvm_cpu__set_debug_fd(int fd)32 void kvm_cpu__set_debug_fd(int fd)
33 {
34 debug_fd = fd;
35 }
36
kvm_cpu__get_debug_fd(void)37 int kvm_cpu__get_debug_fd(void)
38 {
39 return debug_fd;
40 }
41
kvm_cpu__new(struct kvm * kvm)42 static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm)
43 {
44 struct kvm_cpu *vcpu;
45
46 vcpu = calloc(1, sizeof *vcpu);
47 if (!vcpu)
48 return NULL;
49
50 vcpu->kvm = kvm;
51
52 return vcpu;
53 }
54
kvm_cpu__delete(struct kvm_cpu * vcpu)55 void kvm_cpu__delete(struct kvm_cpu *vcpu)
56 {
57 free(vcpu);
58 }
59
kvm_cpu__arch_init(struct kvm * kvm,unsigned long cpu_id)60 struct kvm_cpu *kvm_cpu__arch_init(struct kvm *kvm, unsigned long cpu_id)
61 {
62 struct kvm_cpu *vcpu;
63 int mmap_size;
64 struct kvm_enable_cap papr_cap = { .cap = KVM_CAP_PPC_PAPR };
65
66 vcpu = kvm_cpu__new(kvm);
67 if (!vcpu)
68 return NULL;
69
70 vcpu->cpu_id = cpu_id;
71
72 vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
73 if (vcpu->vcpu_fd < 0)
74 die_perror("KVM_CREATE_VCPU ioctl");
75
76 mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
77 if (mmap_size < 0)
78 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
79
80 vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0);
81 if (vcpu->kvm_run == MAP_FAILED)
82 die("unable to mmap vcpu fd");
83
84 if (ioctl(vcpu->vcpu_fd, KVM_ENABLE_CAP, &papr_cap) < 0)
85 die("unable to enable PAPR capability");
86
87 /*
88 * We start all CPUs, directing non-primary threads into the kernel's
89 * secondary start point. When we come to support SLOF, we will start
90 * only one and SLOF will RTAS call us to ask for others to be
91 * started. (FIXME: make more generic & interface with whichever
92 * firmware a platform may be using.)
93 */
94 vcpu->is_running = true;
95
96 return vcpu;
97 }
98
kvm_cpu__setup_fpu(struct kvm_cpu * vcpu)99 static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu)
100 {
101 /* Don't have to do anything, there's no expected FPU state. */
102 }
103
kvm_cpu__setup_regs(struct kvm_cpu * vcpu)104 static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
105 {
106 /*
107 * FIXME: This assumes PPC64 and Linux guest. It doesn't use the
108 * OpenFirmware entry method, but instead the "embedded" entry which
109 * passes the FDT address directly.
110 */
111 struct kvm_regs *r = &vcpu->regs;
112
113 if (vcpu->cpu_id == 0) {
114 r->pc = KERNEL_START_ADDR;
115 r->gpr[3] = vcpu->kvm->arch.fdt_gra;
116 r->gpr[5] = 0;
117 } else {
118 r->pc = KERNEL_SECONDARY_START_ADDR;
119 r->gpr[3] = vcpu->cpu_id;
120 }
121 r->msr = 0x8000000000001000UL; /* 64bit, non-HV, ME */
122
123 if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
124 die_perror("KVM_SET_REGS failed");
125 }
126
kvm_cpu__setup_sregs(struct kvm_cpu * vcpu)127 static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
128 {
129 /*
130 * Some sregs setup to initialise SDR1/PVR/HIOR on PPC64 SPAPR
131 * platforms using PR KVM. (Technically, this is all ignored on
132 * SPAPR HV KVM.) Different setup is required for non-PV non-SPAPR
133 * platforms! (FIXME.)
134 */
135 struct kvm_sregs sregs;
136 struct kvm_one_reg reg = {};
137 u64 value;
138
139 if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
140 die("KVM_GET_SREGS failed");
141
142 sregs.u.s.sdr1 = vcpu->kvm->arch.sdr1;
143 sregs.pvr = vcpu->kvm->arch.pvr;
144
145 if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &sregs) < 0)
146 die("KVM_SET_SREGS failed");
147
148 reg.id = KVM_REG_PPC_HIOR;
149 value = 0;
150 reg.addr = (u64)(unsigned long)&value;
151 if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
152 die("KVM_SET_ONE_REG failed");
153 }
154
155 /**
156 * kvm_cpu__reset_vcpu - reset virtual CPU to a known state
157 */
kvm_cpu__reset_vcpu(struct kvm_cpu * vcpu)158 void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu)
159 {
160 kvm_cpu__setup_regs(vcpu);
161 kvm_cpu__setup_sregs(vcpu);
162 kvm_cpu__setup_fpu(vcpu);
163 }
164
165 /* kvm_cpu__irq - set KVM's IRQ flag on this vcpu */
kvm_cpu__irq(struct kvm_cpu * vcpu,int pin,int level)166 void kvm_cpu__irq(struct kvm_cpu *vcpu, int pin, int level)
167 {
168 unsigned int virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
169
170 /* FIXME: POWER-specific */
171 if (pin != POWER7_EXT_IRQ)
172 return;
173 if (ioctl(vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
174 pr_warning("Could not KVM_INTERRUPT.");
175 }
176
kvm_cpu__arch_nmi(struct kvm_cpu * cpu)177 void kvm_cpu__arch_nmi(struct kvm_cpu *cpu)
178 {
179 }
180
kvm_cpu__handle_exit(struct kvm_cpu * vcpu)181 bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu)
182 {
183 bool ret = true;
184 struct kvm_run *run = vcpu->kvm_run;
185 switch(run->exit_reason) {
186 case KVM_EXIT_PAPR_HCALL:
187 run->papr_hcall.ret = spapr_hypercall(vcpu, run->papr_hcall.nr,
188 (target_ulong*)run->papr_hcall.args);
189 break;
190 default:
191 ret = false;
192 }
193 return ret;
194 }
195
kvm_cpu__emulate_mmio(struct kvm_cpu * vcpu,u64 phys_addr,u8 * data,u32 len,u8 is_write)196 bool kvm_cpu__emulate_mmio(struct kvm_cpu *vcpu, u64 phys_addr, u8 *data, u32 len, u8 is_write)
197 {
198 /*
199 * FIXME: This function will need to be split in order to support
200 * various PowerPC platforms/PHB types, etc. It currently assumes SPAPR
201 * PPC64 guest.
202 */
203 bool ret = false;
204
205 if ((phys_addr >= SPAPR_PCI_WIN_START) &&
206 (phys_addr < SPAPR_PCI_WIN_END)) {
207 ret = spapr_phb_mmio(vcpu, phys_addr, data, len, is_write);
208 } else {
209 pr_warning("MMIO %s unknown address %llx (size %d)!\n",
210 is_write ? "write to" : "read from",
211 phys_addr, len);
212 }
213 return ret;
214 }
215
216 #define CONDSTR_BIT(m, b) (((m) & MSR_##b) ? #b" " : "")
217
kvm_cpu__show_registers(struct kvm_cpu * vcpu)218 void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
219 {
220 struct kvm_regs regs;
221 struct kvm_sregs sregs;
222 int r;
223
224 if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, ®s) < 0)
225 die("KVM_GET_REGS failed");
226 if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
227 die("KVM_GET_SREGS failed");
228
229 dprintf(debug_fd, "\n Registers:\n");
230 dprintf(debug_fd, " NIP: %016llx MSR: %016llx "
231 "( %s%s%s%s%s%s%s%s%s%s%s%s)\n",
232 regs.pc, regs.msr,
233 CONDSTR_BIT(regs.msr, SF),
234 CONDSTR_BIT(regs.msr, HV), /* ! */
235 CONDSTR_BIT(regs.msr, VEC),
236 CONDSTR_BIT(regs.msr, VSX),
237 CONDSTR_BIT(regs.msr, EE),
238 CONDSTR_BIT(regs.msr, PR),
239 CONDSTR_BIT(regs.msr, FP),
240 CONDSTR_BIT(regs.msr, ME),
241 CONDSTR_BIT(regs.msr, IR),
242 CONDSTR_BIT(regs.msr, DR),
243 CONDSTR_BIT(regs.msr, RI),
244 CONDSTR_BIT(regs.msr, LE));
245 dprintf(debug_fd, " CTR: %016llx LR: %016llx CR: %08llx\n",
246 regs.ctr, regs.lr, regs.cr);
247 dprintf(debug_fd, " SRR0: %016llx SRR1: %016llx XER: %016llx\n",
248 regs.srr0, regs.srr1, regs.xer);
249 dprintf(debug_fd, " SPRG0: %016llx SPRG1: %016llx\n",
250 regs.sprg0, regs.sprg1);
251 dprintf(debug_fd, " SPRG2: %016llx SPRG3: %016llx\n",
252 regs.sprg2, regs.sprg3);
253 dprintf(debug_fd, " SPRG4: %016llx SPRG5: %016llx\n",
254 regs.sprg4, regs.sprg5);
255 dprintf(debug_fd, " SPRG6: %016llx SPRG7: %016llx\n",
256 regs.sprg6, regs.sprg7);
257 dprintf(debug_fd, " GPRs:\n ");
258 for (r = 0; r < 32; r++) {
259 dprintf(debug_fd, "%016llx ", regs.gpr[r]);
260 if ((r & 3) == 3)
261 dprintf(debug_fd, "\n ");
262 }
263 dprintf(debug_fd, "\n");
264
265 /* FIXME: Assumes SLB-based (book3s) guest */
266 for (r = 0; r < 32; r++) {
267 dprintf(debug_fd, " SLB%02d %016llx %016llx\n", r,
268 sregs.u.s.ppc64.slb[r].slbe,
269 sregs.u.s.ppc64.slb[r].slbv);
270 }
271 dprintf(debug_fd, "----------\n");
272 }
273
kvm_cpu__show_code(struct kvm_cpu * vcpu)274 void kvm_cpu__show_code(struct kvm_cpu *vcpu)
275 {
276 if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0)
277 die("KVM_GET_REGS failed");
278
279 /* FIXME: Dump/disassemble some code...! */
280
281 dprintf(debug_fd, "\n Stack:\n");
282 dprintf(debug_fd, " ------\n");
283 /* Only works in real mode: */
284 kvm__dump_mem(vcpu->kvm, vcpu->regs.gpr[1], 32, debug_fd);
285 }
286
kvm_cpu__show_page_tables(struct kvm_cpu * vcpu)287 void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu)
288 {
289 /* Does nothing yet */
290 }
291