xref: /kvmtool/x86/kvm-cpu.c (revision df4239fb27a9670b554d71fb4a79bacfb804d477)
1af7b0868SMatt Evans #include "kvm/kvm-cpu.h"
2af7b0868SMatt Evans 
3af7b0868SMatt Evans #include "kvm/symbol.h"
4af7b0868SMatt Evans #include "kvm/util.h"
5af7b0868SMatt Evans #include "kvm/kvm.h"
6af7b0868SMatt Evans 
7af7b0868SMatt Evans #include <asm/msr-index.h>
84b1c6f6eSSasha Levin #include <asm/apicdef.h>
94932d174SSasha Levin #include <linux/err.h>
10af7b0868SMatt Evans #include <sys/ioctl.h>
11af7b0868SMatt Evans #include <sys/mman.h>
12af7b0868SMatt Evans #include <signal.h>
13af7b0868SMatt Evans #include <stdlib.h>
14af7b0868SMatt Evans #include <string.h>
15af7b0868SMatt Evans #include <errno.h>
16af7b0868SMatt Evans #include <stdio.h>
17af7b0868SMatt Evans 
18af7b0868SMatt Evans static int debug_fd;
19af7b0868SMatt Evans 
20af7b0868SMatt Evans void kvm_cpu__set_debug_fd(int fd)
21af7b0868SMatt Evans {
22af7b0868SMatt Evans 	debug_fd = fd;
23af7b0868SMatt Evans }
24af7b0868SMatt Evans 
25af7b0868SMatt Evans int kvm_cpu__get_debug_fd(void)
26af7b0868SMatt Evans {
27af7b0868SMatt Evans 	return debug_fd;
28af7b0868SMatt Evans }
29af7b0868SMatt Evans 
30af7b0868SMatt Evans static inline bool is_in_protected_mode(struct kvm_cpu *vcpu)
31af7b0868SMatt Evans {
32af7b0868SMatt Evans 	return vcpu->sregs.cr0 & 0x01;
33af7b0868SMatt Evans }
34af7b0868SMatt Evans 
35af7b0868SMatt Evans static inline u64 ip_to_flat(struct kvm_cpu *vcpu, u64 ip)
36af7b0868SMatt Evans {
37af7b0868SMatt Evans 	u64 cs;
38af7b0868SMatt Evans 
39af7b0868SMatt Evans 	/*
40af7b0868SMatt Evans 	 * NOTE! We should take code segment base address into account here.
41af7b0868SMatt Evans 	 * Luckily it's usually zero because Linux uses flat memory model.
42af7b0868SMatt Evans 	 */
43af7b0868SMatt Evans 	if (is_in_protected_mode(vcpu))
44af7b0868SMatt Evans 		return ip;
45af7b0868SMatt Evans 
46af7b0868SMatt Evans 	cs = vcpu->sregs.cs.selector;
47af7b0868SMatt Evans 
48af7b0868SMatt Evans 	return ip + (cs << 4);
49af7b0868SMatt Evans }
50af7b0868SMatt Evans 
51af7b0868SMatt Evans static inline u32 selector_to_base(u16 selector)
52af7b0868SMatt Evans {
53af7b0868SMatt Evans 	/*
54af7b0868SMatt Evans 	 * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
55af7b0868SMatt Evans 	 */
563a60be06SSasha Levin 	return (u32)selector << 4;
57af7b0868SMatt Evans }
58af7b0868SMatt Evans 
59af7b0868SMatt Evans static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm)
60af7b0868SMatt Evans {
61af7b0868SMatt Evans 	struct kvm_cpu *vcpu;
62af7b0868SMatt Evans 
633a60be06SSasha Levin 	vcpu = calloc(1, sizeof(*vcpu));
64af7b0868SMatt Evans 	if (!vcpu)
65af7b0868SMatt Evans 		return NULL;
66af7b0868SMatt Evans 
67af7b0868SMatt Evans 	vcpu->kvm = kvm;
68af7b0868SMatt Evans 
69af7b0868SMatt Evans 	return vcpu;
70af7b0868SMatt Evans }
71af7b0868SMatt Evans 
72af7b0868SMatt Evans void kvm_cpu__delete(struct kvm_cpu *vcpu)
73af7b0868SMatt Evans {
74af7b0868SMatt Evans 	if (vcpu->msrs)
75af7b0868SMatt Evans 		free(vcpu->msrs);
76af7b0868SMatt Evans 
77af7b0868SMatt Evans 	free(vcpu);
78af7b0868SMatt Evans }
79af7b0868SMatt Evans 
804b1c6f6eSSasha Levin static int kvm_cpu__set_lint(struct kvm_cpu *vcpu)
814b1c6f6eSSasha Levin {
82e49921b8SMatt Evans 	struct local_apic lapic;
834b1c6f6eSSasha Levin 
84e49921b8SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_LAPIC, &lapic))
854b1c6f6eSSasha Levin 		return -1;
864b1c6f6eSSasha Levin 
87e49921b8SMatt Evans 	lapic.lvt_lint0.delivery_mode = APIC_MODE_EXTINT;
88e49921b8SMatt Evans 	lapic.lvt_lint1.delivery_mode = APIC_MODE_NMI;
894b1c6f6eSSasha Levin 
90e49921b8SMatt Evans 	return ioctl(vcpu->vcpu_fd, KVM_SET_LAPIC, &lapic);
914b1c6f6eSSasha Levin }
924b1c6f6eSSasha Levin 
93*df4239fbSSasha Levin struct kvm_cpu *kvm_cpu__arch_init(struct kvm *kvm, unsigned long cpu_id)
94af7b0868SMatt Evans {
95af7b0868SMatt Evans 	struct kvm_cpu *vcpu;
96af7b0868SMatt Evans 	int mmap_size;
97af7b0868SMatt Evans 	int coalesced_offset;
98af7b0868SMatt Evans 
99af7b0868SMatt Evans 	vcpu = kvm_cpu__new(kvm);
100af7b0868SMatt Evans 	if (!vcpu)
101af7b0868SMatt Evans 		return NULL;
102af7b0868SMatt Evans 
103af7b0868SMatt Evans 	vcpu->cpu_id = cpu_id;
104af7b0868SMatt Evans 
105af7b0868SMatt Evans 	vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
106af7b0868SMatt Evans 	if (vcpu->vcpu_fd < 0)
107af7b0868SMatt Evans 		die_perror("KVM_CREATE_VCPU ioctl");
108af7b0868SMatt Evans 
109af7b0868SMatt Evans 	mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
110af7b0868SMatt Evans 	if (mmap_size < 0)
111af7b0868SMatt Evans 		die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
112af7b0868SMatt Evans 
113af7b0868SMatt Evans 	vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0);
114af7b0868SMatt Evans 	if (vcpu->kvm_run == MAP_FAILED)
115af7b0868SMatt Evans 		die("unable to mmap vcpu fd");
116af7b0868SMatt Evans 
117af7b0868SMatt Evans 	coalesced_offset = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
118af7b0868SMatt Evans 	if (coalesced_offset)
119af7b0868SMatt Evans 		vcpu->ring = (void *)vcpu->kvm_run + (coalesced_offset * PAGE_SIZE);
120af7b0868SMatt Evans 
1214b1c6f6eSSasha Levin 	if (kvm_cpu__set_lint(vcpu))
1224b1c6f6eSSasha Levin 		die_perror("KVM_SET_LAPIC failed");
1234b1c6f6eSSasha Levin 
124af7b0868SMatt Evans 	vcpu->is_running = true;
125af7b0868SMatt Evans 
126af7b0868SMatt Evans 	return vcpu;
127af7b0868SMatt Evans }
128af7b0868SMatt Evans 
129af7b0868SMatt Evans static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
130af7b0868SMatt Evans {
131af7b0868SMatt Evans 	struct kvm_msrs *vcpu = calloc(1, sizeof(*vcpu) + (sizeof(struct kvm_msr_entry) * nmsrs));
132af7b0868SMatt Evans 
133af7b0868SMatt Evans 	if (!vcpu)
134af7b0868SMatt Evans 		die("out of memory");
135af7b0868SMatt Evans 
136af7b0868SMatt Evans 	return vcpu;
137af7b0868SMatt Evans }
138af7b0868SMatt Evans 
139af7b0868SMatt Evans #define KVM_MSR_ENTRY(_index, _data)	\
140af7b0868SMatt Evans 	(struct kvm_msr_entry) { .index = _index, .data = _data }
141af7b0868SMatt Evans 
142af7b0868SMatt Evans static void kvm_cpu__setup_msrs(struct kvm_cpu *vcpu)
143af7b0868SMatt Evans {
144af7b0868SMatt Evans 	unsigned long ndx = 0;
145af7b0868SMatt Evans 
146af7b0868SMatt Evans 	vcpu->msrs = kvm_msrs__new(100);
147af7b0868SMatt Evans 
148af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS,	0x0);
149af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP,	0x0);
150af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP,	0x0);
151af7b0868SMatt Evans #ifdef CONFIG_X86_64
152af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_STAR,			0x0);
153af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_CSTAR,			0x0);
154af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_KERNEL_GS_BASE,		0x0);
155af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_SYSCALL_MASK,		0x0);
156af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_LSTAR,			0x0);
157af7b0868SMatt Evans #endif
158af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TSC,		0x0);
159af7b0868SMatt Evans 	vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_MISC_ENABLE,
160af7b0868SMatt Evans 						MSR_IA32_MISC_ENABLE_FAST_STRING);
161af7b0868SMatt Evans 
162af7b0868SMatt Evans 	vcpu->msrs->nmsrs = ndx;
163af7b0868SMatt Evans 
164af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_SET_MSRS, vcpu->msrs) < 0)
165af7b0868SMatt Evans 		die_perror("KVM_SET_MSRS failed");
166af7b0868SMatt Evans }
167af7b0868SMatt Evans 
168af7b0868SMatt Evans static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu)
169af7b0868SMatt Evans {
170af7b0868SMatt Evans 	vcpu->fpu = (struct kvm_fpu) {
171af7b0868SMatt Evans 		.fcw	= 0x37f,
172af7b0868SMatt Evans 		.mxcsr	= 0x1f80,
173af7b0868SMatt Evans 	};
174af7b0868SMatt Evans 
175af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_SET_FPU, &vcpu->fpu) < 0)
176af7b0868SMatt Evans 		die_perror("KVM_SET_FPU failed");
177af7b0868SMatt Evans }
178af7b0868SMatt Evans 
179af7b0868SMatt Evans static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
180af7b0868SMatt Evans {
181af7b0868SMatt Evans 	vcpu->regs = (struct kvm_regs) {
182af7b0868SMatt Evans 		/* We start the guest in 16-bit real mode  */
183af7b0868SMatt Evans 		.rflags	= 0x0000000000000002ULL,
184af7b0868SMatt Evans 
18542ac24f9SSasha Levin 		.rip	= vcpu->kvm->arch.boot_ip,
18642ac24f9SSasha Levin 		.rsp	= vcpu->kvm->arch.boot_sp,
18742ac24f9SSasha Levin 		.rbp	= vcpu->kvm->arch.boot_sp,
188af7b0868SMatt Evans 	};
189af7b0868SMatt Evans 
190af7b0868SMatt Evans 	if (vcpu->regs.rip > USHRT_MAX)
191af7b0868SMatt Evans 		die("ip 0x%llx is too high for real mode", (u64)vcpu->regs.rip);
192af7b0868SMatt Evans 
193af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
194af7b0868SMatt Evans 		die_perror("KVM_SET_REGS failed");
195af7b0868SMatt Evans }
196af7b0868SMatt Evans 
197af7b0868SMatt Evans static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
198af7b0868SMatt Evans {
199af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
200af7b0868SMatt Evans 		die_perror("KVM_GET_SREGS failed");
201af7b0868SMatt Evans 
20242ac24f9SSasha Levin 	vcpu->sregs.cs.selector	= vcpu->kvm->arch.boot_selector;
20342ac24f9SSasha Levin 	vcpu->sregs.cs.base	= selector_to_base(vcpu->kvm->arch.boot_selector);
20442ac24f9SSasha Levin 	vcpu->sregs.ss.selector	= vcpu->kvm->arch.boot_selector;
20542ac24f9SSasha Levin 	vcpu->sregs.ss.base	= selector_to_base(vcpu->kvm->arch.boot_selector);
20642ac24f9SSasha Levin 	vcpu->sregs.ds.selector	= vcpu->kvm->arch.boot_selector;
20742ac24f9SSasha Levin 	vcpu->sregs.ds.base	= selector_to_base(vcpu->kvm->arch.boot_selector);
20842ac24f9SSasha Levin 	vcpu->sregs.es.selector	= vcpu->kvm->arch.boot_selector;
20942ac24f9SSasha Levin 	vcpu->sregs.es.base	= selector_to_base(vcpu->kvm->arch.boot_selector);
21042ac24f9SSasha Levin 	vcpu->sregs.fs.selector	= vcpu->kvm->arch.boot_selector;
21142ac24f9SSasha Levin 	vcpu->sregs.fs.base	= selector_to_base(vcpu->kvm->arch.boot_selector);
21242ac24f9SSasha Levin 	vcpu->sregs.gs.selector	= vcpu->kvm->arch.boot_selector;
21342ac24f9SSasha Levin 	vcpu->sregs.gs.base	= selector_to_base(vcpu->kvm->arch.boot_selector);
214af7b0868SMatt Evans 
215af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &vcpu->sregs) < 0)
216af7b0868SMatt Evans 		die_perror("KVM_SET_SREGS failed");
217af7b0868SMatt Evans }
218af7b0868SMatt Evans 
219af7b0868SMatt Evans /**
220af7b0868SMatt Evans  * kvm_cpu__reset_vcpu - reset virtual CPU to a known state
221af7b0868SMatt Evans  */
222af7b0868SMatt Evans void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu)
223af7b0868SMatt Evans {
224af7b0868SMatt Evans 	kvm_cpu__setup_cpuid(vcpu);
225af7b0868SMatt Evans 	kvm_cpu__setup_sregs(vcpu);
226af7b0868SMatt Evans 	kvm_cpu__setup_regs(vcpu);
227af7b0868SMatt Evans 	kvm_cpu__setup_fpu(vcpu);
228af7b0868SMatt Evans 	kvm_cpu__setup_msrs(vcpu);
229af7b0868SMatt Evans }
230af7b0868SMatt Evans 
231341ee0d4SMatt Evans bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu)
232341ee0d4SMatt Evans {
233341ee0d4SMatt Evans 	return false;
234341ee0d4SMatt Evans }
235341ee0d4SMatt Evans 
236af7b0868SMatt Evans static void print_dtable(const char *name, struct kvm_dtable *dtable)
237af7b0868SMatt Evans {
238af7b0868SMatt Evans 	dprintf(debug_fd, " %s                 %016llx  %08hx\n",
239af7b0868SMatt Evans 		name, (u64) dtable->base, (u16) dtable->limit);
240af7b0868SMatt Evans }
241af7b0868SMatt Evans 
242af7b0868SMatt Evans static void print_segment(const char *name, struct kvm_segment *seg)
243af7b0868SMatt Evans {
244af7b0868SMatt Evans 	dprintf(debug_fd, " %s       %04hx      %016llx  %08x  %02hhx    %x %x   %x  %x %x %x %x\n",
245af7b0868SMatt Evans 		name, (u16) seg->selector, (u64) seg->base, (u32) seg->limit,
246af7b0868SMatt Evans 		(u8) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
247af7b0868SMatt Evans }
248af7b0868SMatt Evans 
249af7b0868SMatt Evans void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
250af7b0868SMatt Evans {
251af7b0868SMatt Evans 	unsigned long cr0, cr2, cr3;
252af7b0868SMatt Evans 	unsigned long cr4, cr8;
253af7b0868SMatt Evans 	unsigned long rax, rbx, rcx;
254af7b0868SMatt Evans 	unsigned long rdx, rsi, rdi;
255af7b0868SMatt Evans 	unsigned long rbp,  r8,  r9;
256af7b0868SMatt Evans 	unsigned long r10, r11, r12;
257af7b0868SMatt Evans 	unsigned long r13, r14, r15;
258af7b0868SMatt Evans 	unsigned long rip, rsp;
259af7b0868SMatt Evans 	struct kvm_sregs sregs;
260af7b0868SMatt Evans 	unsigned long rflags;
261af7b0868SMatt Evans 	struct kvm_regs regs;
262af7b0868SMatt Evans 	int i;
263af7b0868SMatt Evans 
264af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &regs) < 0)
265af7b0868SMatt Evans 		die("KVM_GET_REGS failed");
266af7b0868SMatt Evans 
267af7b0868SMatt Evans 	rflags = regs.rflags;
268af7b0868SMatt Evans 
269af7b0868SMatt Evans 	rip = regs.rip; rsp = regs.rsp;
270af7b0868SMatt Evans 	rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
271af7b0868SMatt Evans 	rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
272af7b0868SMatt Evans 	rbp = regs.rbp; r8  = regs.r8;  r9  = regs.r9;
273af7b0868SMatt Evans 	r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
274af7b0868SMatt Evans 	r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
275af7b0868SMatt Evans 
276af7b0868SMatt Evans 	dprintf(debug_fd, "\n Registers:\n");
277af7b0868SMatt Evans 	dprintf(debug_fd,   " ----------\n");
278af7b0868SMatt Evans 	dprintf(debug_fd, " rip: %016lx   rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
279af7b0868SMatt Evans 	dprintf(debug_fd, " rax: %016lx   rbx: %016lx   rcx: %016lx\n", rax, rbx, rcx);
280af7b0868SMatt Evans 	dprintf(debug_fd, " rdx: %016lx   rsi: %016lx   rdi: %016lx\n", rdx, rsi, rdi);
281af7b0868SMatt Evans 	dprintf(debug_fd, " rbp: %016lx    r8: %016lx    r9: %016lx\n", rbp, r8,  r9);
282af7b0868SMatt Evans 	dprintf(debug_fd, " r10: %016lx   r11: %016lx   r12: %016lx\n", r10, r11, r12);
283af7b0868SMatt Evans 	dprintf(debug_fd, " r13: %016lx   r14: %016lx   r15: %016lx\n", r13, r14, r15);
284af7b0868SMatt Evans 
285af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
286af7b0868SMatt Evans 		die("KVM_GET_REGS failed");
287af7b0868SMatt Evans 
288af7b0868SMatt Evans 	cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
289af7b0868SMatt Evans 	cr4 = sregs.cr4; cr8 = sregs.cr8;
290af7b0868SMatt Evans 
291af7b0868SMatt Evans 	dprintf(debug_fd, " cr0: %016lx   cr2: %016lx   cr3: %016lx\n", cr0, cr2, cr3);
292af7b0868SMatt Evans 	dprintf(debug_fd, " cr4: %016lx   cr8: %016lx\n", cr4, cr8);
293af7b0868SMatt Evans 	dprintf(debug_fd, "\n Segment registers:\n");
294af7b0868SMatt Evans 	dprintf(debug_fd,   " ------------------\n");
295af7b0868SMatt Evans 	dprintf(debug_fd, " register  selector  base              limit     type  p dpl db s l g avl\n");
296af7b0868SMatt Evans 	print_segment("cs ", &sregs.cs);
297af7b0868SMatt Evans 	print_segment("ss ", &sregs.ss);
298af7b0868SMatt Evans 	print_segment("ds ", &sregs.ds);
299af7b0868SMatt Evans 	print_segment("es ", &sregs.es);
300af7b0868SMatt Evans 	print_segment("fs ", &sregs.fs);
301af7b0868SMatt Evans 	print_segment("gs ", &sregs.gs);
302af7b0868SMatt Evans 	print_segment("tr ", &sregs.tr);
303af7b0868SMatt Evans 	print_segment("ldt", &sregs.ldt);
304af7b0868SMatt Evans 	print_dtable("gdt", &sregs.gdt);
305af7b0868SMatt Evans 	print_dtable("idt", &sregs.idt);
306af7b0868SMatt Evans 
307af7b0868SMatt Evans 	dprintf(debug_fd, "\n APIC:\n");
308af7b0868SMatt Evans 	dprintf(debug_fd,   " -----\n");
309af7b0868SMatt Evans 	dprintf(debug_fd, " efer: %016llx  apic base: %016llx  nmi: %s\n",
310af7b0868SMatt Evans 		(u64) sregs.efer, (u64) sregs.apic_base,
311af7b0868SMatt Evans 		(vcpu->kvm->nmi_disabled ? "disabled" : "enabled"));
312af7b0868SMatt Evans 
313af7b0868SMatt Evans 	dprintf(debug_fd, "\n Interrupt bitmap:\n");
314af7b0868SMatt Evans 	dprintf(debug_fd,   " -----------------\n");
315af7b0868SMatt Evans 	for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
316af7b0868SMatt Evans 		dprintf(debug_fd, " %016llx", (u64) sregs.interrupt_bitmap[i]);
317af7b0868SMatt Evans 	dprintf(debug_fd, "\n");
318af7b0868SMatt Evans }
319af7b0868SMatt Evans 
320af7b0868SMatt Evans #define MAX_SYM_LEN 128
321af7b0868SMatt Evans 
322af7b0868SMatt Evans void kvm_cpu__show_code(struct kvm_cpu *vcpu)
323af7b0868SMatt Evans {
324af7b0868SMatt Evans 	unsigned int code_bytes = 64;
3253a60be06SSasha Levin 	unsigned int code_prologue = 43;
326af7b0868SMatt Evans 	unsigned int code_len = code_bytes;
3274932d174SSasha Levin 	char sym[MAX_SYM_LEN] = SYMBOL_DEFAULT_UNKNOWN, *psym;
328af7b0868SMatt Evans 	unsigned char c;
329af7b0868SMatt Evans 	unsigned int i;
330af7b0868SMatt Evans 	u8 *ip;
331af7b0868SMatt Evans 
332af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0)
333af7b0868SMatt Evans 		die("KVM_GET_REGS failed");
334af7b0868SMatt Evans 
335af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
336af7b0868SMatt Evans 		die("KVM_GET_SREGS failed");
337af7b0868SMatt Evans 
338af7b0868SMatt Evans 	ip = guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip) - code_prologue);
339af7b0868SMatt Evans 
340af7b0868SMatt Evans 	dprintf(debug_fd, "\n Code:\n");
341af7b0868SMatt Evans 	dprintf(debug_fd,   " -----\n");
342af7b0868SMatt Evans 
343807b77b9SCyrill Gorcunov 	psym = symbol_lookup(vcpu->kvm, vcpu->regs.rip, sym, MAX_SYM_LEN);
3444932d174SSasha Levin 	if (IS_ERR(psym))
3454932d174SSasha Levin 		dprintf(debug_fd,
346807b77b9SCyrill Gorcunov 			"Warning: symbol_lookup() failed to find symbol "
3474932d174SSasha Levin 			"with error: %ld\n", PTR_ERR(psym));
348af7b0868SMatt Evans 
349af7b0868SMatt Evans 	dprintf(debug_fd, " rip: [<%016lx>] %s\n\n", (unsigned long) vcpu->regs.rip, sym);
350af7b0868SMatt Evans 
351af7b0868SMatt Evans 	for (i = 0; i < code_len; i++, ip++) {
352af7b0868SMatt Evans 		if (!host_ptr_in_ram(vcpu->kvm, ip))
353af7b0868SMatt Evans 			break;
354af7b0868SMatt Evans 
355af7b0868SMatt Evans 		c = *ip;
356af7b0868SMatt Evans 
357af7b0868SMatt Evans 		if (ip == guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip)))
358af7b0868SMatt Evans 			dprintf(debug_fd, " <%02x>", c);
359af7b0868SMatt Evans 		else
360af7b0868SMatt Evans 			dprintf(debug_fd, " %02x", c);
361af7b0868SMatt Evans 	}
362af7b0868SMatt Evans 
363af7b0868SMatt Evans 	dprintf(debug_fd, "\n");
364af7b0868SMatt Evans 
365af7b0868SMatt Evans 	dprintf(debug_fd, "\n Stack:\n");
366af7b0868SMatt Evans 	dprintf(debug_fd,   " ------\n");
367af7b0868SMatt Evans 	kvm__dump_mem(vcpu->kvm, vcpu->regs.rsp, 32);
368af7b0868SMatt Evans }
369af7b0868SMatt Evans 
370af7b0868SMatt Evans void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu)
371af7b0868SMatt Evans {
372af7b0868SMatt Evans 	u64 *pte1;
373af7b0868SMatt Evans 	u64 *pte2;
374af7b0868SMatt Evans 	u64 *pte3;
375af7b0868SMatt Evans 	u64 *pte4;
376af7b0868SMatt Evans 
377af7b0868SMatt Evans 	if (!is_in_protected_mode(vcpu))
378af7b0868SMatt Evans 		return;
379af7b0868SMatt Evans 
380af7b0868SMatt Evans 	if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
381af7b0868SMatt Evans 		die("KVM_GET_SREGS failed");
382af7b0868SMatt Evans 
383af7b0868SMatt Evans 	pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3);
384af7b0868SMatt Evans 	if (!host_ptr_in_ram(vcpu->kvm, pte4))
385af7b0868SMatt Evans 		return;
386af7b0868SMatt Evans 
387af7b0868SMatt Evans 	pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff));
388af7b0868SMatt Evans 	if (!host_ptr_in_ram(vcpu->kvm, pte3))
389af7b0868SMatt Evans 		return;
390af7b0868SMatt Evans 
391af7b0868SMatt Evans 	pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff));
392af7b0868SMatt Evans 	if (!host_ptr_in_ram(vcpu->kvm, pte2))
393af7b0868SMatt Evans 		return;
394af7b0868SMatt Evans 
395af7b0868SMatt Evans 	pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff));
396af7b0868SMatt Evans 	if (!host_ptr_in_ram(vcpu->kvm, pte1))
397af7b0868SMatt Evans 		return;
398af7b0868SMatt Evans 
399af7b0868SMatt Evans 	dprintf(debug_fd, "Page Tables:\n");
400af7b0868SMatt Evans 	if (*pte2 & (1 << 7))
401af7b0868SMatt Evans 		dprintf(debug_fd, " pte4: %016llx   pte3: %016llx"
402af7b0868SMatt Evans 			"   pte2: %016llx\n",
403af7b0868SMatt Evans 			*pte4, *pte3, *pte2);
404af7b0868SMatt Evans 	else
405af7b0868SMatt Evans 		dprintf(debug_fd, " pte4: %016llx  pte3: %016llx   pte2: %016"
406af7b0868SMatt Evans 			"llx   pte1: %016llx\n",
407af7b0868SMatt Evans 			*pte4, *pte3, *pte2, *pte1);
408af7b0868SMatt Evans }
4094b1c6f6eSSasha Levin 
4104b1c6f6eSSasha Levin void kvm_cpu__arch_nmi(struct kvm_cpu *cpu)
4114b1c6f6eSSasha Levin {
4124b1c6f6eSSasha Levin 	struct kvm_lapic_state klapic;
4134b1c6f6eSSasha Levin 	struct local_apic *lapic = (void *)&klapic;
4144b1c6f6eSSasha Levin 
4154b1c6f6eSSasha Levin 	if (ioctl(cpu->vcpu_fd, KVM_GET_LAPIC, &klapic) != 0)
4164b1c6f6eSSasha Levin 		return;
4174b1c6f6eSSasha Levin 
4184b1c6f6eSSasha Levin 	if (lapic->lvt_lint1.mask)
4194b1c6f6eSSasha Levin 		return;
4204b1c6f6eSSasha Levin 
4214b1c6f6eSSasha Levin 	if (lapic->lvt_lint1.delivery_mode != APIC_MODE_NMI)
4224b1c6f6eSSasha Levin 		return;
4234b1c6f6eSSasha Levin 
4244b1c6f6eSSasha Levin 	ioctl(cpu->vcpu_fd, KVM_NMI);
4254b1c6f6eSSasha Levin }
426