xref: /kvmtool/powerpc/kvm-cpu.c (revision 63e158a0a0a05551e8e983826bb26a2c0a78ef4b)
1*63e158a0SMatt Evans /*
2*63e158a0SMatt Evans  * PPC64 processor support
3*63e158a0SMatt Evans  *
4*63e158a0SMatt Evans  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation.
5*63e158a0SMatt Evans  *
6*63e158a0SMatt Evans  * This program is free software; you can redistribute it and/or modify it
7*63e158a0SMatt Evans  * under the terms of the GNU General Public License version 2 as published
8*63e158a0SMatt Evans  * by the Free Software Foundation.
9*63e158a0SMatt Evans  */
10*63e158a0SMatt Evans 
11*63e158a0SMatt Evans #include "kvm/kvm-cpu.h"
12*63e158a0SMatt Evans 
13*63e158a0SMatt Evans #include "kvm/symbol.h"
14*63e158a0SMatt Evans #include "kvm/util.h"
15*63e158a0SMatt Evans #include "kvm/kvm.h"
16*63e158a0SMatt Evans 
17*63e158a0SMatt Evans #include <sys/ioctl.h>
18*63e158a0SMatt Evans #include <sys/mman.h>
19*63e158a0SMatt Evans #include <signal.h>
20*63e158a0SMatt Evans #include <stdlib.h>
21*63e158a0SMatt Evans #include <string.h>
22*63e158a0SMatt Evans #include <errno.h>
23*63e158a0SMatt Evans #include <stdio.h>
24*63e158a0SMatt Evans 
25*63e158a0SMatt Evans static int debug_fd;
26*63e158a0SMatt Evans 
27*63e158a0SMatt Evans void kvm_cpu__set_debug_fd(int fd)
28*63e158a0SMatt Evans {
29*63e158a0SMatt Evans 	debug_fd = fd;
30*63e158a0SMatt Evans }
31*63e158a0SMatt Evans 
32*63e158a0SMatt Evans int kvm_cpu__get_debug_fd(void)
33*63e158a0SMatt Evans {
34*63e158a0SMatt Evans 	return debug_fd;
35*63e158a0SMatt Evans }
36*63e158a0SMatt Evans 
37*63e158a0SMatt Evans static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm)
38*63e158a0SMatt Evans {
39*63e158a0SMatt Evans 	struct kvm_cpu *vcpu;
40*63e158a0SMatt Evans 
41*63e158a0SMatt Evans 	vcpu		= calloc(1, sizeof *vcpu);
42*63e158a0SMatt Evans 	if (!vcpu)
43*63e158a0SMatt Evans 		return NULL;
44*63e158a0SMatt Evans 
45*63e158a0SMatt Evans 	vcpu->kvm	= kvm;
46*63e158a0SMatt Evans 
47*63e158a0SMatt Evans 	return vcpu;
48*63e158a0SMatt Evans }
49*63e158a0SMatt Evans 
50*63e158a0SMatt Evans void kvm_cpu__delete(struct kvm_cpu *vcpu)
51*63e158a0SMatt Evans {
52*63e158a0SMatt Evans 	free(vcpu);
53*63e158a0SMatt Evans }
54*63e158a0SMatt Evans 
55*63e158a0SMatt Evans struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id)
56*63e158a0SMatt Evans {
57*63e158a0SMatt Evans 	struct kvm_cpu *vcpu;
58*63e158a0SMatt Evans 	int mmap_size;
59*63e158a0SMatt Evans 	struct kvm_enable_cap papr_cap = { .cap = KVM_CAP_PPC_PAPR };
60*63e158a0SMatt Evans 
61*63e158a0SMatt Evans 	vcpu		= kvm_cpu__new(kvm);
62*63e158a0SMatt Evans 	if (!vcpu)
63*63e158a0SMatt Evans 		return NULL;
64*63e158a0SMatt Evans 
65*63e158a0SMatt Evans 	vcpu->cpu_id	= cpu_id;
66*63e158a0SMatt Evans 
67*63e158a0SMatt Evans 	vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
68*63e158a0SMatt Evans 	if (vcpu->vcpu_fd < 0)
69*63e158a0SMatt Evans 		die_perror("KVM_CREATE_VCPU ioctl");
70*63e158a0SMatt Evans 
71*63e158a0SMatt Evans 	mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
72*63e158a0SMatt Evans 	if (mmap_size < 0)
73*63e158a0SMatt Evans 		die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
74*63e158a0SMatt Evans 
75*63e158a0SMatt Evans 	vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0);
76*63e158a0SMatt Evans 	if (vcpu->kvm_run == MAP_FAILED)
77*63e158a0SMatt Evans 		die("unable to mmap vcpu fd");
78*63e158a0SMatt Evans 
79*63e158a0SMatt Evans 	ioctl(vcpu->vcpu_fd, KVM_ENABLE_CAP, &papr_cap);
80*63e158a0SMatt Evans 
81*63e158a0SMatt Evans 	/*
82*63e158a0SMatt Evans 	 * We start all CPUs, directing non-primary threads into the kernel's
83*63e158a0SMatt Evans 	 * secondary start point.  When we come to support SLOF, we will start
84*63e158a0SMatt Evans 	 * only one and SLOF will RTAS call us to ask for others to be
85*63e158a0SMatt Evans 	 * started.  (FIXME: make more generic & interface with whichever
86*63e158a0SMatt Evans 	 * firmware a platform may be using.)
87*63e158a0SMatt Evans 	 */
88*63e158a0SMatt Evans 	vcpu->is_running = true;
89*63e158a0SMatt Evans 
90*63e158a0SMatt Evans 	return vcpu;
91*63e158a0SMatt Evans }
92*63e158a0SMatt Evans 
93*63e158a0SMatt Evans static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu)
94*63e158a0SMatt Evans {
95*63e158a0SMatt Evans 	/* Don't have to do anything, there's no expected FPU state. */
96*63e158a0SMatt Evans }
97*63e158a0SMatt Evans 
98*63e158a0SMatt Evans static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
99*63e158a0SMatt Evans {
100*63e158a0SMatt Evans 	/*
101*63e158a0SMatt Evans 	 * FIXME: This assumes PPC64 and Linux guest.  It doesn't use the
102*63e158a0SMatt Evans 	 * OpenFirmware entry method, but instead the "embedded" entry which
103*63e158a0SMatt Evans 	 * passes the FDT address directly.
104*63e158a0SMatt Evans 	 */
105*63e158a0SMatt Evans 	struct kvm_regs *r = &vcpu->regs;
106*63e158a0SMatt Evans 
107*63e158a0SMatt Evans 	if (vcpu->cpu_id == 0) {
108*63e158a0SMatt Evans 		r->pc = KERNEL_START_ADDR;
109*63e158a0SMatt Evans 		r->gpr[3] = vcpu->kvm->fdt_gra;
110*63e158a0SMatt Evans 		r->gpr[5] = 0;
111*63e158a0SMatt Evans 	} else {
112*63e158a0SMatt Evans 		r->pc = KERNEL_SECONDARY_START_ADDR;
113*63e158a0SMatt Evans 		r->gpr[3] = vcpu->cpu_id;
114*63e158a0SMatt Evans 	}
115*63e158a0SMatt Evans 	r->msr = 0x8000000000001000UL; /* 64bit, non-HV, ME */
116*63e158a0SMatt Evans 
117*63e158a0SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
118*63e158a0SMatt Evans 		die_perror("KVM_SET_REGS failed");
119*63e158a0SMatt Evans }
120*63e158a0SMatt Evans 
121*63e158a0SMatt Evans static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
122*63e158a0SMatt Evans {
123*63e158a0SMatt Evans 	/*
124*63e158a0SMatt Evans 	 * No sregs setup is required on PPC64/SPAPR (but there may be setup
125*63e158a0SMatt Evans 	 * required for non-paravirtualised platforms, e.g. TLB/SLB setup).
126*63e158a0SMatt Evans 	 */
127*63e158a0SMatt Evans }
128*63e158a0SMatt Evans 
129*63e158a0SMatt Evans /**
130*63e158a0SMatt Evans  * kvm_cpu__reset_vcpu - reset virtual CPU to a known state
131*63e158a0SMatt Evans  */
132*63e158a0SMatt Evans void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu)
133*63e158a0SMatt Evans {
134*63e158a0SMatt Evans 	kvm_cpu__setup_regs(vcpu);
135*63e158a0SMatt Evans 	kvm_cpu__setup_sregs(vcpu);
136*63e158a0SMatt Evans 	kvm_cpu__setup_fpu(vcpu);
137*63e158a0SMatt Evans }
138*63e158a0SMatt Evans 
139*63e158a0SMatt Evans /* kvm_cpu__irq - set KVM's IRQ flag on this vcpu */
140*63e158a0SMatt Evans void kvm_cpu__irq(struct kvm_cpu *vcpu, int pin, int level)
141*63e158a0SMatt Evans {
142*63e158a0SMatt Evans }
143*63e158a0SMatt Evans 
144*63e158a0SMatt Evans void kvm_cpu__arch_nmi(struct kvm_cpu *cpu)
145*63e158a0SMatt Evans {
146*63e158a0SMatt Evans }
147*63e158a0SMatt Evans 
148*63e158a0SMatt Evans bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu)
149*63e158a0SMatt Evans {
150*63e158a0SMatt Evans 	bool ret = true;
151*63e158a0SMatt Evans 	struct kvm_run *run = vcpu->kvm_run;
152*63e158a0SMatt Evans 	switch(run->exit_reason) {
153*63e158a0SMatt Evans 	default:
154*63e158a0SMatt Evans 		ret = false;
155*63e158a0SMatt Evans 	}
156*63e158a0SMatt Evans 	return ret;
157*63e158a0SMatt Evans }
158*63e158a0SMatt Evans 
159*63e158a0SMatt Evans #define CONDSTR_BIT(m, b) (((m) & MSR_##b) ? #b" " : "")
160*63e158a0SMatt Evans 
161*63e158a0SMatt Evans void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
162*63e158a0SMatt Evans {
163*63e158a0SMatt Evans 	struct kvm_regs regs;
164*63e158a0SMatt Evans 	struct kvm_sregs sregs;
165*63e158a0SMatt Evans 	int r;
166*63e158a0SMatt Evans 
167*63e158a0SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &regs) < 0)
168*63e158a0SMatt Evans 		die("KVM_GET_REGS failed");
169*63e158a0SMatt Evans         if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
170*63e158a0SMatt Evans 		die("KVM_GET_SREGS failed");
171*63e158a0SMatt Evans 
172*63e158a0SMatt Evans 	dprintf(debug_fd, "\n Registers:\n");
173*63e158a0SMatt Evans 	dprintf(debug_fd, " NIP:   %016llx  MSR:   %016llx "
174*63e158a0SMatt Evans 		"( %s%s%s%s%s%s%s%s%s%s%s%s)\n",
175*63e158a0SMatt Evans 		regs.pc, regs.msr,
176*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, SF),
177*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, HV), /* ! */
178*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, VEC),
179*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, VSX),
180*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, EE),
181*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, PR),
182*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, FP),
183*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, ME),
184*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, IR),
185*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, DR),
186*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, RI),
187*63e158a0SMatt Evans 		CONDSTR_BIT(regs.msr, LE));
188*63e158a0SMatt Evans 	dprintf(debug_fd, " CTR:   %016llx  LR:    %016llx  CR:   %08llx\n",
189*63e158a0SMatt Evans 		regs.ctr, regs.lr, regs.cr);
190*63e158a0SMatt Evans 	dprintf(debug_fd, " SRR0:  %016llx  SRR1:  %016llx  XER:  %016llx\n",
191*63e158a0SMatt Evans 		regs.srr0, regs.srr1, regs.xer);
192*63e158a0SMatt Evans 	dprintf(debug_fd, " SPRG0: %016llx  SPRG1: %016llx\n",
193*63e158a0SMatt Evans 		regs.sprg0, regs.sprg1);
194*63e158a0SMatt Evans 	dprintf(debug_fd, " SPRG2: %016llx  SPRG3: %016llx\n",
195*63e158a0SMatt Evans 		regs.sprg2, regs.sprg3);
196*63e158a0SMatt Evans 	dprintf(debug_fd, " SPRG4: %016llx  SPRG5: %016llx\n",
197*63e158a0SMatt Evans 		regs.sprg4, regs.sprg5);
198*63e158a0SMatt Evans 	dprintf(debug_fd, " SPRG6: %016llx  SPRG7: %016llx\n",
199*63e158a0SMatt Evans 		regs.sprg6, regs.sprg7);
200*63e158a0SMatt Evans 	dprintf(debug_fd, " GPRs:\n ");
201*63e158a0SMatt Evans 	for (r = 0; r < 32; r++) {
202*63e158a0SMatt Evans 		dprintf(debug_fd, "%016llx  ", regs.gpr[r]);
203*63e158a0SMatt Evans 		if ((r & 3) == 3)
204*63e158a0SMatt Evans 			dprintf(debug_fd, "\n ");
205*63e158a0SMatt Evans 	}
206*63e158a0SMatt Evans 	dprintf(debug_fd, "\n");
207*63e158a0SMatt Evans 
208*63e158a0SMatt Evans 	/* FIXME: Assumes SLB-based (book3s) guest */
209*63e158a0SMatt Evans 	for (r = 0; r < 32; r++) {
210*63e158a0SMatt Evans 		dprintf(debug_fd, " SLB%02d  %016llx %016llx\n", r,
211*63e158a0SMatt Evans 			sregs.u.s.ppc64.slb[r].slbe,
212*63e158a0SMatt Evans 			sregs.u.s.ppc64.slb[r].slbv);
213*63e158a0SMatt Evans 	}
214*63e158a0SMatt Evans 	dprintf(debug_fd, "----------\n");
215*63e158a0SMatt Evans }
216*63e158a0SMatt Evans 
217*63e158a0SMatt Evans void kvm_cpu__show_code(struct kvm_cpu *vcpu)
218*63e158a0SMatt Evans {
219*63e158a0SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0)
220*63e158a0SMatt Evans 		die("KVM_GET_REGS failed");
221*63e158a0SMatt Evans 
222*63e158a0SMatt Evans 	/* FIXME: Dump/disassemble some code...! */
223*63e158a0SMatt Evans 
224*63e158a0SMatt Evans 	dprintf(debug_fd, "\n Stack:\n");
225*63e158a0SMatt Evans 	dprintf(debug_fd,   " ------\n");
226*63e158a0SMatt Evans 	/* Only works in real mode: */
227*63e158a0SMatt Evans 	kvm__dump_mem(vcpu->kvm, vcpu->regs.gpr[1], 32);
228*63e158a0SMatt Evans }
229*63e158a0SMatt Evans 
230*63e158a0SMatt Evans void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu)
231*63e158a0SMatt Evans {
232*63e158a0SMatt Evans 	/* Does nothing yet */
233*63e158a0SMatt Evans }
234