xref: /kvmtool/kvm.c (revision 9667701c38a1f3c6a962e3aa8ec77ff8aafe2864)
1ae1fae34SPekka Enberg #include "kvm/kvm.h"
2ae1fae34SPekka Enberg 
3b3594ec7SCyrill Gorcunov #include "kvm/boot-protocol.h"
472811558SPekka Enberg #include "kvm/cpufeature.h"
572811558SPekka Enberg #include "kvm/read-write.h"
672811558SPekka Enberg #include "kvm/interrupt.h"
70c7c14a7SCyrill Gorcunov #include "kvm/mptable.h"
872811558SPekka Enberg #include "kvm/util.h"
94298ddadSSasha Levin #include "kvm/mutex.h"
104298ddadSSasha Levin #include "kvm/kvm-cpu.h"
11eda03319SPekka Enberg 
126c7d8514SPekka Enberg #include <linux/kvm.h>
13f5ab5f67SPekka Enberg 
14f5ab5f67SPekka Enberg #include <asm/bootparam.h>
15f5ab5f67SPekka Enberg 
16ae1fae34SPekka Enberg #include <sys/ioctl.h>
171f9cff23SPekka Enberg #include <sys/mman.h>
18ce79f1caSPekka Enberg #include <sys/stat.h>
192da26a59SPekka Enberg #include <stdbool.h>
206e5e8b8dSPekka Enberg #include <assert.h>
2106e41eeaSPekka Enberg #include <limits.h>
22ce79f1caSPekka Enberg #include <signal.h>
23f5ab5f67SPekka Enberg #include <stdarg.h>
24b8f6afcdSPekka Enberg #include <stdlib.h>
25f5ab5f67SPekka Enberg #include <string.h>
260d1f17ecSPekka Enberg #include <unistd.h>
271f9cff23SPekka Enberg #include <stdio.h>
28b8f6afcdSPekka Enberg #include <fcntl.h>
29ce79f1caSPekka Enberg #include <time.h>
304298ddadSSasha Levin #include <sys/eventfd.h>
31c7828731SSasha Levin #include <asm/unistd.h>
3263bc8503SSasha Levin #include <dirent.h>
33b8f6afcdSPekka Enberg 
34ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
350d1f17ecSPekka Enberg 
36ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = {
37ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
38ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
39ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
40ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
41ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
42ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
43ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
44ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
45ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
46ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
47ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
48ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
49ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
50ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
51ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
52ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
53ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
54ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
559b1fb1c3SPekka Enberg };
569b1fb1c3SPekka Enberg 
5755e19624SCyrill Gorcunov #define DEFINE_KVM_EXT(ext)		\
5855e19624SCyrill Gorcunov 	.name = #ext,			\
5955e19624SCyrill Gorcunov 	.code = ext
6055e19624SCyrill Gorcunov 
6155e19624SCyrill Gorcunov struct {
6255e19624SCyrill Gorcunov 	const char *name;
6355e19624SCyrill Gorcunov 	int code;
6455e19624SCyrill Gorcunov } kvm_req_ext[] = {
6555e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
6655e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
6755e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_PIT2) },
6855e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
6955e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
7055e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
717c0ec28fSCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_HLT) },
7255e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
73d38ad31aSCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
7455e19624SCyrill Gorcunov };
7555e19624SCyrill Gorcunov 
764298ddadSSasha Levin extern struct kvm *kvm;
774298ddadSSasha Levin extern struct kvm_cpu *kvm_cpus[KVM_NR_CPUS];
784298ddadSSasha Levin static int pause_event;
794298ddadSSasha Levin static DEFINE_MUTEX(pause_lock);
804298ddadSSasha Levin 
81*9667701cSPekka Enberg static char kvm_dir[PATH_MAX];
82*9667701cSPekka Enberg 
83*9667701cSPekka Enberg static void set_dir(const char *fmt, va_list args)
84*9667701cSPekka Enberg {
85*9667701cSPekka Enberg 	vsnprintf(kvm_dir, sizeof(kvm_dir), fmt, args);
86*9667701cSPekka Enberg }
87*9667701cSPekka Enberg 
88*9667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...)
89*9667701cSPekka Enberg {
90*9667701cSPekka Enberg 	va_list args;
91*9667701cSPekka Enberg 
92*9667701cSPekka Enberg 	va_start(args, fmt);
93*9667701cSPekka Enberg 	set_dir(fmt, args);
94*9667701cSPekka Enberg 	va_end(args);
95*9667701cSPekka Enberg }
96*9667701cSPekka Enberg 
97*9667701cSPekka Enberg const char *kvm__get_dir(void)
98*9667701cSPekka Enberg {
99*9667701cSPekka Enberg 	return kvm_dir;
100*9667701cSPekka Enberg }
101*9667701cSPekka Enberg 
10243835ac9SSasha Levin static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
103b8f6afcdSPekka Enberg {
10428fa19c0SPekka Enberg 	int ret;
105b8f6afcdSPekka Enberg 
10643835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
1074076b041SPekka Enberg 	if (ret < 0)
1084076b041SPekka Enberg 		return false;
1094076b041SPekka Enberg 
1104076b041SPekka Enberg 	return ret;
1114076b041SPekka Enberg }
1124076b041SPekka Enberg 
11343835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm)
11455e19624SCyrill Gorcunov {
11555e19624SCyrill Gorcunov 	unsigned int i;
11655e19624SCyrill Gorcunov 
11755e19624SCyrill Gorcunov 	for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
11843835ac9SSasha Levin 		if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
1194542f276SCyrill Gorcunov 			pr_error("Unsuppored KVM extension detected: %s",
12055e19624SCyrill Gorcunov 				kvm_req_ext[i].name);
12155e19624SCyrill Gorcunov 			return (int)-i;
12255e19624SCyrill Gorcunov 		}
12355e19624SCyrill Gorcunov 	}
12455e19624SCyrill Gorcunov 
12555e19624SCyrill Gorcunov 	return 0;
12655e19624SCyrill Gorcunov }
12755e19624SCyrill Gorcunov 
1284076b041SPekka Enberg static struct kvm *kvm__new(void)
1294076b041SPekka Enberg {
13043835ac9SSasha Levin 	struct kvm *kvm = calloc(1, sizeof *kvm);
1314076b041SPekka Enberg 
13243835ac9SSasha Levin 	if (!kvm)
1334076b041SPekka Enberg 		die("out of memory");
1344076b041SPekka Enberg 
13543835ac9SSasha Levin 	return kvm;
1364076b041SPekka Enberg }
1374076b041SPekka Enberg 
1385358b0e6SSasha Levin static void kvm__create_pidfile(struct kvm *kvm)
1395358b0e6SSasha Levin {
1405358b0e6SSasha Levin 	int fd;
1415358b0e6SSasha Levin 	char full_name[PATH_MAX], pid[10];
1425358b0e6SSasha Levin 
1435358b0e6SSasha Levin 	if (!kvm->name)
1445358b0e6SSasha Levin 		return;
1455358b0e6SSasha Levin 
146*9667701cSPekka Enberg 	sprintf(full_name, "%s", kvm__get_dir());
147c71efd96SSasha Levin 	mkdir(full_name, 0777);
148*9667701cSPekka Enberg 	sprintf(full_name, "%s/%s.pid", kvm__get_dir(), kvm->name);
1495358b0e6SSasha Levin 	fd = open(full_name, O_CREAT | O_WRONLY, 0666);
1505358b0e6SSasha Levin 	sprintf(pid, "%u\n", getpid());
1515358b0e6SSasha Levin 	if (write(fd, pid, strlen(pid)) <= 0)
1525358b0e6SSasha Levin 		die("Failed creating PID file");
1535358b0e6SSasha Levin 	close(fd);
1545358b0e6SSasha Levin }
1555358b0e6SSasha Levin 
156dc4cd384SSasha Levin void kvm__remove_pidfile(const char *name)
1575358b0e6SSasha Levin {
1585358b0e6SSasha Levin 	char full_name[PATH_MAX];
1595358b0e6SSasha Levin 
160*9667701cSPekka Enberg 	sprintf(full_name, "%s/%s.pid", kvm__get_dir(), name);
1615358b0e6SSasha Levin 	unlink(full_name);
1625358b0e6SSasha Levin }
1635358b0e6SSasha Levin 
16466ce4f5eSSasha Levin pid_t kvm__get_pid_by_instance(const char *name)
1655358b0e6SSasha Levin {
16666ce4f5eSSasha Levin 	int fd;
16766ce4f5eSSasha Levin 	pid_t pid;
1685358b0e6SSasha Levin 	char pid_str[10], pid_file[PATH_MAX];
1695358b0e6SSasha Levin 
170*9667701cSPekka Enberg 	sprintf(pid_file, "%s/%s.pid", kvm__get_dir(), name);
1715358b0e6SSasha Levin 	fd = open(pid_file, O_RDONLY);
1725358b0e6SSasha Levin 	if (fd < 0)
1735358b0e6SSasha Levin 		return -1;
1745358b0e6SSasha Levin 
1755358b0e6SSasha Levin 	if (read(fd, pid_str, 10) == 0)
1765358b0e6SSasha Levin 		return -1;
1775358b0e6SSasha Levin 
1785358b0e6SSasha Levin 	pid = atoi(pid_str);
1795358b0e6SSasha Levin 	if (pid < 0)
1805358b0e6SSasha Levin 		return -1;
1815358b0e6SSasha Levin 
1821a0ef251SSasha Levin 	close(fd);
1831a0ef251SSasha Levin 
1845358b0e6SSasha Levin 	return pid;
1855358b0e6SSasha Levin }
1865358b0e6SSasha Levin 
187886af5f2SLiming Wang int kvm__enumerate_instances(int (*callback)(const char *name, int pid))
18863bc8503SSasha Levin {
18963bc8503SSasha Levin 	char full_name[PATH_MAX];
19063bc8503SSasha Levin 	int pid;
19163bc8503SSasha Levin 	DIR *dir;
19263bc8503SSasha Levin 	struct dirent entry, *result;
193886af5f2SLiming Wang 	int ret = 0;
19463bc8503SSasha Levin 
195*9667701cSPekka Enberg 	sprintf(full_name, "%s", kvm__get_dir());
19663bc8503SSasha Levin 	dir = opendir(full_name);
19763bc8503SSasha Levin 
198f2e556f3SKonstantin Khlebnikov 	while (dir != NULL) {
19963bc8503SSasha Levin 		readdir_r(dir, &entry, &result);
20063bc8503SSasha Levin 		if (result == NULL)
20163bc8503SSasha Levin 			break;
20263bc8503SSasha Levin 		if (entry.d_type == DT_REG) {
20363bc8503SSasha Levin 			entry.d_name[strlen(entry.d_name)-4] = 0;
20463bc8503SSasha Levin 			pid = kvm__get_pid_by_instance(entry.d_name);
205886af5f2SLiming Wang 			ret = callback(entry.d_name, pid);
206886af5f2SLiming Wang 			if (ret < 0)
207886af5f2SLiming Wang 				break;
20863bc8503SSasha Levin 		}
20963bc8503SSasha Levin 	}
21063bc8503SSasha Levin 
2111a0ef251SSasha Levin 	closedir(dir);
2121a0ef251SSasha Levin 
213886af5f2SLiming Wang 	return ret;
21463bc8503SSasha Levin }
21563bc8503SSasha Levin 
21643835ac9SSasha Levin void kvm__delete(struct kvm *kvm)
2179ef4c68eSPekka Enberg {
21843835ac9SSasha Levin 	kvm__stop_timer(kvm);
219fbfe68b7SSasha Levin 
22043835ac9SSasha Levin 	munmap(kvm->ram_start, kvm->ram_size);
221dc4cd384SSasha Levin 	kvm__remove_pidfile(kvm->name);
22243835ac9SSasha Levin 	free(kvm);
2239ef4c68eSPekka Enberg }
2249ef4c68eSPekka Enberg 
225c78b8713SAsias He static bool kvm__cpu_supports_vm(void)
226c78b8713SAsias He {
227c78b8713SAsias He 	struct cpuid_regs regs;
2283fdf659dSSasha Levin 	u32 eax_base;
229831fbf23SPekka Enberg 	int feature;
230c78b8713SAsias He 
231c78b8713SAsias He 	regs	= (struct cpuid_regs) {
232831fbf23SPekka Enberg 		.eax		= 0x00,
233c78b8713SAsias He 	};
234c78b8713SAsias He 	host_cpuid(&regs);
235c78b8713SAsias He 
236ae87afbfSCyrill Gorcunov 	switch (regs.ebx) {
237ae87afbfSCyrill Gorcunov 	case CPUID_VENDOR_INTEL_1:
238831fbf23SPekka Enberg 		eax_base	= 0x00;
239831fbf23SPekka Enberg 		feature		= KVM__X86_FEATURE_VMX;
240ae87afbfSCyrill Gorcunov 		break;
24134649df9SPekka Enberg 
242ae87afbfSCyrill Gorcunov 	case CPUID_VENDOR_AMD_1:
243831fbf23SPekka Enberg 		eax_base	= 0x80000000;
244831fbf23SPekka Enberg 		feature		= KVM__X86_FEATURE_SVM;
245ae87afbfSCyrill Gorcunov 		break;
24634649df9SPekka Enberg 
24734649df9SPekka Enberg 	default:
24834649df9SPekka Enberg 		return false;
249ae87afbfSCyrill Gorcunov 	}
250ae87afbfSCyrill Gorcunov 
251831fbf23SPekka Enberg 	regs	= (struct cpuid_regs) {
252831fbf23SPekka Enberg 		.eax		= eax_base,
253831fbf23SPekka Enberg 	};
254831fbf23SPekka Enberg 	host_cpuid(&regs);
255831fbf23SPekka Enberg 
256831fbf23SPekka Enberg 	if (regs.eax < eax_base + 0x01)
257831fbf23SPekka Enberg 		return false;
258831fbf23SPekka Enberg 
259831fbf23SPekka Enberg 	regs	= (struct cpuid_regs) {
260831fbf23SPekka Enberg 		.eax		= eax_base + 0x01
261831fbf23SPekka Enberg 	};
262831fbf23SPekka Enberg 	host_cpuid(&regs);
263831fbf23SPekka Enberg 
264831fbf23SPekka Enberg 	return regs.ecx & (1 << feature);
265c78b8713SAsias He }
266c78b8713SAsias He 
26796feb589SPekka Enberg /*
26896feb589SPekka Enberg  * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
26996feb589SPekka Enberg  * memory regions to it. Therefore, be careful if you use this function for
27096feb589SPekka Enberg  * registering memory regions for emulating hardware.
27196feb589SPekka Enberg  */
27296feb589SPekka Enberg void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
2734076b041SPekka Enberg {
2742b0e3342SPekka Enberg 	struct kvm_userspace_memory_region mem;
275839051d9SSasha Levin 	int ret;
276839051d9SSasha Levin 
277839051d9SSasha Levin 	mem = (struct kvm_userspace_memory_region) {
27896feb589SPekka Enberg 		.slot			= kvm->mem_slots++,
279874467f8SSasha Levin 		.guest_phys_addr	= guest_phys,
280874467f8SSasha Levin 		.memory_size		= size,
281c4acb611SIngo Molnar 		.userspace_addr		= (unsigned long)userspace_addr,
282839051d9SSasha Levin 	};
283839051d9SSasha Levin 
284874467f8SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
285839051d9SSasha Levin 	if (ret < 0)
286839051d9SSasha Levin 		die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
287839051d9SSasha Levin }
288839051d9SSasha Levin 
289874467f8SSasha Levin /*
290874467f8SSasha Levin  * Allocating RAM size bigger than 4GB requires us to leave a gap
291874467f8SSasha Levin  * in the RAM which is used for PCI MMIO, hotplug, and unconfigured
292874467f8SSasha Levin  * devices (see documentation of e820_setup_gap() for details).
293874467f8SSasha Levin  *
294874467f8SSasha Levin  * If we're required to initialize RAM bigger than 4GB, we will create
295874467f8SSasha Levin  * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space.
296874467f8SSasha Levin  */
297874467f8SSasha Levin 
29843835ac9SSasha Levin void kvm__init_ram(struct kvm *kvm)
299874467f8SSasha Levin {
300874467f8SSasha Levin 	u64	phys_start, phys_size;
301874467f8SSasha Levin 	void	*host_mem;
302874467f8SSasha Levin 
30343835ac9SSasha Levin 	if (kvm->ram_size < KVM_32BIT_GAP_START) {
304874467f8SSasha Levin 		/* Use a single block of RAM for 32bit RAM */
305874467f8SSasha Levin 
306874467f8SSasha Levin 		phys_start = 0;
30743835ac9SSasha Levin 		phys_size  = kvm->ram_size;
30843835ac9SSasha Levin 		host_mem   = kvm->ram_start;
309874467f8SSasha Levin 
31096feb589SPekka Enberg 		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
311874467f8SSasha Levin 	} else {
312874467f8SSasha Levin 		/* First RAM range from zero to the PCI gap: */
313874467f8SSasha Levin 
314874467f8SSasha Levin 		phys_start = 0;
315874467f8SSasha Levin 		phys_size  = KVM_32BIT_GAP_START;
31643835ac9SSasha Levin 		host_mem   = kvm->ram_start;
317874467f8SSasha Levin 
31896feb589SPekka Enberg 		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
319874467f8SSasha Levin 
320874467f8SSasha Levin 		/* Second RAM range from 4GB to the end of RAM: */
321874467f8SSasha Levin 
322874467f8SSasha Levin 		phys_start = 0x100000000ULL;
32343835ac9SSasha Levin 		phys_size  = kvm->ram_size - phys_size;
32443835ac9SSasha Levin 		host_mem   = kvm->ram_start + phys_start;
325874467f8SSasha Levin 
32696feb589SPekka Enberg 		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
327874467f8SSasha Levin 	}
328874467f8SSasha Levin }
329874467f8SSasha Levin 
3308259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm)
331384922b3SPekka Enberg {
332384922b3SPekka Enberg 	int ret;
333384922b3SPekka Enberg 
33443835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
3358259b8ccSSasha Levin 	if (ret <= 0)
336384922b3SPekka Enberg 		die_perror("KVM_CAP_NR_VCPUS");
337384922b3SPekka Enberg 
338384922b3SPekka Enberg 	return ret;
339384922b3SPekka Enberg }
340384922b3SPekka Enberg 
3418259b8ccSSasha Levin /*
3428259b8ccSSasha Levin  * The following hack should be removed once 'x86: Raise the hard
3438259b8ccSSasha Levin  * VCPU count limit' makes it's way into the mainline.
3448259b8ccSSasha Levin  */
3458259b8ccSSasha Levin #ifndef KVM_CAP_MAX_VCPUS
3468259b8ccSSasha Levin #define KVM_CAP_MAX_VCPUS 66
3478259b8ccSSasha Levin #endif
3488259b8ccSSasha Levin 
3498259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm)
3508259b8ccSSasha Levin {
3518259b8ccSSasha Levin 	int ret;
3528259b8ccSSasha Levin 
3538259b8ccSSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS);
3548259b8ccSSasha Levin 	if (ret <= 0)
3558259b8ccSSasha Levin 		ret = kvm__recommended_cpus(kvm);
3568259b8ccSSasha Levin 
3578259b8ccSSasha Levin 	return ret;
3588259b8ccSSasha Levin }
3598259b8ccSSasha Levin 
3605358b0e6SSasha Levin struct kvm *kvm__init(const char *kvm_dev, u64 ram_size, const char *name)
361839051d9SSasha Levin {
3629687927dSAsias He 	struct kvm_pit_config pit_config = { .flags = 0, };
36343835ac9SSasha Levin 	struct kvm *kvm;
3644076b041SPekka Enberg 	int ret;
3654076b041SPekka Enberg 
366c78b8713SAsias He 	if (!kvm__cpu_supports_vm())
367c78b8713SAsias He 		die("Your CPU does not support hardware virtualization");
368c78b8713SAsias He 
36943835ac9SSasha Levin 	kvm = kvm__new();
3704076b041SPekka Enberg 
37143835ac9SSasha Levin 	kvm->sys_fd = open(kvm_dev, O_RDWR);
37243835ac9SSasha Levin 	if (kvm->sys_fd < 0) {
3736d7c36ceSPekka Enberg 		if (errno == ENOENT)
374e907b83fSPekka Enberg 			die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
375f8334800SIngo Molnar 		if (errno == ENODEV)
376f8334800SIngo Molnar 			die("'%s' KVM driver not available.\n  # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev);
3776d7c36ceSPekka Enberg 
378f8334800SIngo Molnar 		fprintf(stderr, "  Fatal, could not open %s: ", kvm_dev);
379f8334800SIngo Molnar 		perror(NULL);
380f8334800SIngo Molnar 		exit(1);
3816d7c36ceSPekka Enberg 	}
382b8f6afcdSPekka Enberg 
38343835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
3846c7d8514SPekka Enberg 	if (ret != KVM_API_VERSION)
385f5ab5f67SPekka Enberg 		die_perror("KVM_API_VERSION ioctl");
3866c7d8514SPekka Enberg 
38743835ac9SSasha Levin 	kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
38843835ac9SSasha Levin 	if (kvm->vm_fd < 0)
389f5ab5f67SPekka Enberg 		die_perror("KVM_CREATE_VM ioctl");
39028fa19c0SPekka Enberg 
39143835ac9SSasha Levin 	if (kvm__check_extensions(kvm))
39255e19624SCyrill Gorcunov 		die("A required KVM extention is not supported by OS");
3939687927dSAsias He 
39443835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
3959687927dSAsias He 	if (ret < 0)
3969687927dSAsias He 		die_perror("KVM_SET_TSS_ADDR ioctl");
3979687927dSAsias He 
39843835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config);
3999687927dSAsias He 	if (ret < 0)
4009687927dSAsias He 		die_perror("KVM_CREATE_PIT2 ioctl");
4019687927dSAsias He 
40243835ac9SSasha Levin 	kvm->ram_size		= ram_size;
4030d1f17ecSPekka Enberg 
40443835ac9SSasha Levin 	if (kvm->ram_size < KVM_32BIT_GAP_START) {
40537c34ca8SSasha Levin 		kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
406874467f8SSasha Levin 	} else {
40737c34ca8SSasha Levin 		kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
40843835ac9SSasha Levin 		if (kvm->ram_start != MAP_FAILED) {
409874467f8SSasha Levin 			/*
410874467f8SSasha Levin 			 * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that
411874467f8SSasha Levin 			 * if we accidently write to it, we will know.
412874467f8SSasha Levin 			 */
41343835ac9SSasha Levin 			mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
414874467f8SSasha Levin 		}
415874467f8SSasha Levin 	}
41643835ac9SSasha Levin 	if (kvm->ram_start == MAP_FAILED)
4170d1f17ecSPekka Enberg 		die("out of memory");
4180d1f17ecSPekka Enberg 
4197f4f39a4SSasha Levin 	madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE);
4207f4f39a4SSasha Levin 
42143835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
422895c2fefSPekka Enberg 	if (ret < 0)
4239687927dSAsias He 		die_perror("KVM_CREATE_IRQCHIP ioctl");
4249687927dSAsias He 
4255358b0e6SSasha Levin 	kvm->name = name;
4265358b0e6SSasha Levin 
4275358b0e6SSasha Levin 	kvm__create_pidfile(kvm);
4285358b0e6SSasha Levin 
42943835ac9SSasha Levin 	return kvm;
4304076b041SPekka Enberg }
4314076b041SPekka Enberg 
4325f6772b8SCyrill Gorcunov #define BOOT_LOADER_SELECTOR	0x1000
433b08e9ec4SPekka Enberg #define BOOT_LOADER_IP		0x0000
434dbdb74c2SPekka Enberg #define BOOT_LOADER_SP		0x8000
4352dd4a4edSCyrill Gorcunov #define BOOT_CMDLINE_OFFSET	0x20000
4362dd4a4edSCyrill Gorcunov 
4379a4ecdc5SPekka Enberg #define BOOT_PROTOCOL_REQUIRED	0x206
438a43f6460SCyrill Gorcunov #define LOAD_HIGH		0x01
439009b0758SPekka Enberg 
44043835ac9SSasha Levin static int load_flat_binary(struct kvm *kvm, int fd)
441009b0758SPekka Enberg {
442009b0758SPekka Enberg 	void *p;
443009b0758SPekka Enberg 	int nr;
444009b0758SPekka Enberg 
445009b0758SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
446009b0758SPekka Enberg 		die_perror("lseek");
447009b0758SPekka Enberg 
44843835ac9SSasha Levin 	p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
449009b0758SPekka Enberg 
450009b0758SPekka Enberg 	while ((nr = read(fd, p, 65536)) > 0)
451009b0758SPekka Enberg 		p += nr;
452009b0758SPekka Enberg 
45343835ac9SSasha Levin 	kvm->boot_selector	= BOOT_LOADER_SELECTOR;
45443835ac9SSasha Levin 	kvm->boot_ip		= BOOT_LOADER_IP;
45543835ac9SSasha Levin 	kvm->boot_sp		= BOOT_LOADER_SP;
456edc8a14dSPekka Enberg 
4577fb218bdSPekka Enberg 	return true;
458009b0758SPekka Enberg }
459009b0758SPekka Enberg 
460ae1fae34SPekka Enberg static const char *BZIMAGE_MAGIC	= "HdrS";
461ae1fae34SPekka Enberg 
46243835ac9SSasha Levin static bool load_bzimage(struct kvm *kvm, int fd_kernel,
46353861c74SJohn Floren 			int fd_initrd, const char *kernel_cmdline, u16 vidmode)
464ae1fae34SPekka Enberg {
465b9271160SPekka Enberg 	struct boot_params *kern_boot;
4664b62331fSPekka Enberg 	unsigned long setup_sects;
467b9271160SPekka Enberg 	struct boot_params boot;
4682dd4a4edSCyrill Gorcunov 	size_t cmdline_size;
4697fb218bdSPekka Enberg 	ssize_t setup_size;
47022489bb0SCyrill Gorcunov 	void *p;
471ae1fae34SPekka Enberg 	int nr;
472ae1fae34SPekka Enberg 
4735d67eaf6SPekka Enberg 	/*
4745d67eaf6SPekka Enberg 	 * See Documentation/x86/boot.txt for details no bzImage on-disk and
4755d67eaf6SPekka Enberg 	 * memory layout.
4765d67eaf6SPekka Enberg 	 */
4775d67eaf6SPekka Enberg 
4782065a6f7SCyrill Gorcunov 	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
479009b0758SPekka Enberg 		die_perror("lseek");
480009b0758SPekka Enberg 
4810b62d2bbSPekka Enberg 	if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot))
4822346d461SPekka Enberg 		return false;
483ae1fae34SPekka Enberg 
4840b62d2bbSPekka Enberg 	if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)))
4857fb218bdSPekka Enberg 		return false;
486ae1fae34SPekka Enberg 
4870ea58e5bSPekka Enberg 	if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED)
4880b62d2bbSPekka Enberg 		die("Too old kernel");
489ad681038SCyrill Gorcunov 
4902065a6f7SCyrill Gorcunov 	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
491e93ab78aSPekka Enberg 		die_perror("lseek");
492e93ab78aSPekka Enberg 
4934cf542bbSCyrill Gorcunov 	if (!boot.hdr.setup_sects)
4944cf542bbSCyrill Gorcunov 		boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
49510943d14SPekka Enberg 	setup_sects = boot.hdr.setup_sects + 1;
49610943d14SPekka Enberg 
49754d4a626SPekka Enberg 	setup_size = setup_sects << 9;
49843835ac9SSasha Levin 	p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
499ae1fae34SPekka Enberg 
5002065a6f7SCyrill Gorcunov 	/* copy setup.bin to mem*/
5012065a6f7SCyrill Gorcunov 	if (read(fd_kernel, p, setup_size) != setup_size)
5027fb218bdSPekka Enberg 		die_perror("read");
5037fb218bdSPekka Enberg 
5042065a6f7SCyrill Gorcunov 	/* copy vmlinux.bin to BZ_KERNEL_START*/
50543835ac9SSasha Levin 	p = guest_flat_to_host(kvm, BZ_KERNEL_START);
506ae1fae34SPekka Enberg 
5072065a6f7SCyrill Gorcunov 	while ((nr = read(fd_kernel, p, 65536)) > 0)
508ae1fae34SPekka Enberg 		p += nr;
509ae1fae34SPekka Enberg 
51043835ac9SSasha Levin 	p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET);
511debcfac0SCyrill Gorcunov 	if (kernel_cmdline) {
512debcfac0SCyrill Gorcunov 		cmdline_size = strlen(kernel_cmdline) + 1;
513debcfac0SCyrill Gorcunov 		if (cmdline_size > boot.hdr.cmdline_size)
514debcfac0SCyrill Gorcunov 			cmdline_size = boot.hdr.cmdline_size;
515ad681038SCyrill Gorcunov 
5162dd4a4edSCyrill Gorcunov 		memset(p, 0, boot.hdr.cmdline_size);
5172dd4a4edSCyrill Gorcunov 		memcpy(p, kernel_cmdline, cmdline_size - 1);
518debcfac0SCyrill Gorcunov 	}
519debcfac0SCyrill Gorcunov 
52043835ac9SSasha Levin 	kern_boot	= guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00);
521a43f6460SCyrill Gorcunov 
522b9271160SPekka Enberg 	kern_boot->hdr.cmd_line_ptr	= BOOT_CMDLINE_OFFSET;
523b9271160SPekka Enberg 	kern_boot->hdr.type_of_loader	= 0xff;
524b9271160SPekka Enberg 	kern_boot->hdr.heap_end_ptr	= 0xfe00;
525b9271160SPekka Enberg 	kern_boot->hdr.loadflags	|= CAN_USE_HEAP;
52653861c74SJohn Floren 	kern_boot->hdr.vid_mode		= vidmode;
527a43f6460SCyrill Gorcunov 
5282065a6f7SCyrill Gorcunov 	/*
5292065a6f7SCyrill Gorcunov 	 * Read initrd image into guest memory
5302065a6f7SCyrill Gorcunov 	 */
5312065a6f7SCyrill Gorcunov 	if (fd_initrd >= 0) {
5322065a6f7SCyrill Gorcunov 		struct stat initrd_stat;
5332065a6f7SCyrill Gorcunov 		unsigned long addr;
5342065a6f7SCyrill Gorcunov 
5352065a6f7SCyrill Gorcunov 		if (fstat(fd_initrd, &initrd_stat))
5362065a6f7SCyrill Gorcunov 			die_perror("fstat");
5372065a6f7SCyrill Gorcunov 
5382065a6f7SCyrill Gorcunov 		addr = boot.hdr.initrd_addr_max & ~0xfffff;
5392065a6f7SCyrill Gorcunov 		for (;;) {
5402065a6f7SCyrill Gorcunov 			if (addr < BZ_KERNEL_START)
5412065a6f7SCyrill Gorcunov 				die("Not enough memory for initrd");
54243835ac9SSasha Levin 			else if (addr < (kvm->ram_size - initrd_stat.st_size))
5432065a6f7SCyrill Gorcunov 				break;
5442065a6f7SCyrill Gorcunov 			addr -= 0x100000;
5452065a6f7SCyrill Gorcunov 		}
5462065a6f7SCyrill Gorcunov 
54743835ac9SSasha Levin 		p = guest_flat_to_host(kvm, addr);
5482065a6f7SCyrill Gorcunov 		nr = read(fd_initrd, p, initrd_stat.st_size);
5492065a6f7SCyrill Gorcunov 		if (nr != initrd_stat.st_size)
5502065a6f7SCyrill Gorcunov 			die("Failed to read initrd");
5512065a6f7SCyrill Gorcunov 
5522065a6f7SCyrill Gorcunov 		kern_boot->hdr.ramdisk_image	= addr;
5532065a6f7SCyrill Gorcunov 		kern_boot->hdr.ramdisk_size	= initrd_stat.st_size;
5542065a6f7SCyrill Gorcunov 	}
5552065a6f7SCyrill Gorcunov 
55643835ac9SSasha Levin 	kvm->boot_selector	= BOOT_LOADER_SELECTOR;
557edc8a14dSPekka Enberg 	/*
558edc8a14dSPekka Enberg 	 * The real-mode setup code starts at offset 0x200 of a bzImage. See
559edc8a14dSPekka Enberg 	 * Documentation/x86/boot.txt for details.
560edc8a14dSPekka Enberg 	 */
56143835ac9SSasha Levin 	kvm->boot_ip		= BOOT_LOADER_IP + 0x200;
56243835ac9SSasha Levin 	kvm->boot_sp		= BOOT_LOADER_SP;
563edc8a14dSPekka Enberg 
5647fb218bdSPekka Enberg 	return true;
565ae1fae34SPekka Enberg }
566ae1fae34SPekka Enberg 
56772811558SPekka Enberg /* RFC 1952 */
56872811558SPekka Enberg #define GZIP_ID1		0x1f
56972811558SPekka Enberg #define GZIP_ID2		0x8b
57072811558SPekka Enberg 
57172811558SPekka Enberg static bool initrd_check(int fd)
57272811558SPekka Enberg {
57372811558SPekka Enberg 	unsigned char id[2];
57472811558SPekka Enberg 
57572811558SPekka Enberg 	if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0)
57672811558SPekka Enberg 		return false;
57772811558SPekka Enberg 
57872811558SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
57972811558SPekka Enberg 		die_perror("lseek");
58072811558SPekka Enberg 
58172811558SPekka Enberg 	return id[0] == GZIP_ID1 && id[1] == GZIP_ID2;
58272811558SPekka Enberg }
58372811558SPekka Enberg 
5846d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
58553861c74SJohn Floren 		const char *initrd_filename, const char *kernel_cmdline, u16 vidmode)
586ae1fae34SPekka Enberg {
5877fb218bdSPekka Enberg 	bool ret;
5882065a6f7SCyrill Gorcunov 	int fd_kernel = -1, fd_initrd = -1;
589ae1fae34SPekka Enberg 
5902065a6f7SCyrill Gorcunov 	fd_kernel = open(kernel_filename, O_RDONLY);
5912065a6f7SCyrill Gorcunov 	if (fd_kernel < 0)
5920b62d2bbSPekka Enberg 		die("Unable to open kernel %s", kernel_filename);
593ae1fae34SPekka Enberg 
5942065a6f7SCyrill Gorcunov 	if (initrd_filename) {
5952065a6f7SCyrill Gorcunov 		fd_initrd = open(initrd_filename, O_RDONLY);
5962065a6f7SCyrill Gorcunov 		if (fd_initrd < 0)
5970b62d2bbSPekka Enberg 			die("Unable to open initrd %s", initrd_filename);
59872811558SPekka Enberg 
59972811558SPekka Enberg 		if (!initrd_check(fd_initrd))
60072811558SPekka Enberg 			die("%s is not an initrd", initrd_filename);
6012065a6f7SCyrill Gorcunov 	}
6022065a6f7SCyrill Gorcunov 
60353861c74SJohn Floren 	ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode);
60428972750SCyrill Gorcunov 
60528972750SCyrill Gorcunov 	if (initrd_filename)
60628972750SCyrill Gorcunov 		close(fd_initrd);
60728972750SCyrill Gorcunov 
608009b0758SPekka Enberg 	if (ret)
609009b0758SPekka Enberg 		goto found_kernel;
610ae1fae34SPekka Enberg 
6114542f276SCyrill Gorcunov 	pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
6120b62d2bbSPekka Enberg 
6132065a6f7SCyrill Gorcunov 	ret = load_flat_binary(kvm, fd_kernel);
614009b0758SPekka Enberg 	if (ret)
615009b0758SPekka Enberg 		goto found_kernel;
616009b0758SPekka Enberg 
6175a6ac675SSasha Levin 	close(fd_kernel);
6185a6ac675SSasha Levin 
619009b0758SPekka Enberg 	die("%s is not a valid bzImage or flat binary", kernel_filename);
620009b0758SPekka Enberg 
621009b0758SPekka Enberg found_kernel:
6225a6ac675SSasha Levin 	close(fd_kernel);
6235a6ac675SSasha Levin 
624ae1fae34SPekka Enberg 	return ret;
625ae1fae34SPekka Enberg }
626ae1fae34SPekka Enberg 
627b3594ec7SCyrill Gorcunov /**
628b3594ec7SCyrill Gorcunov  * kvm__setup_bios - inject BIOS into guest system memory
62943835ac9SSasha Levin  * @kvm - guest system descriptor
630b3594ec7SCyrill Gorcunov  *
631b3594ec7SCyrill Gorcunov  * This function is a main routine where we poke guest memory
632b3594ec7SCyrill Gorcunov  * and install BIOS there.
633b3594ec7SCyrill Gorcunov  */
63443835ac9SSasha Levin void kvm__setup_bios(struct kvm *kvm)
6352f3976eeSPekka Enberg {
636b3594ec7SCyrill Gorcunov 	/* standart minimal configuration */
63743835ac9SSasha Levin 	setup_bios(kvm);
6382f3976eeSPekka Enberg 
639b3594ec7SCyrill Gorcunov 	/* FIXME: SMP, ACPI and friends here */
6400c7c14a7SCyrill Gorcunov 
6410c7c14a7SCyrill Gorcunov 	/* MP table */
64243835ac9SSasha Levin 	mptable_setup(kvm, kvm->nrcpus);
6432f3976eeSPekka Enberg }
6442f3976eeSPekka Enberg 
645ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000	/* 1 msec */
646ce79f1caSPekka Enberg 
647ce79f1caSPekka Enberg /*
648ce79f1caSPekka Enberg  * This function sets up a timer that's used to inject interrupts from the
649ce79f1caSPekka Enberg  * userspace hypervisor into the guest at periodical intervals. Please note
650ce79f1caSPekka Enberg  * that clock interrupt, for example, is not handled here.
651ce79f1caSPekka Enberg  */
65243835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm)
653ce79f1caSPekka Enberg {
654ce79f1caSPekka Enberg 	struct itimerspec its;
655ce79f1caSPekka Enberg 	struct sigevent sev;
656ce79f1caSPekka Enberg 
657ce79f1caSPekka Enberg 	memset(&sev, 0, sizeof(struct sigevent));
658ce79f1caSPekka Enberg 	sev.sigev_value.sival_int	= 0;
659c7828731SSasha Levin 	sev.sigev_notify		= SIGEV_THREAD_ID;
660ce79f1caSPekka Enberg 	sev.sigev_signo			= SIGALRM;
661c7828731SSasha Levin 	sev._sigev_un._tid		= syscall(__NR_gettid);
662ce79f1caSPekka Enberg 
66343835ac9SSasha Levin 	if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0)
664ce79f1caSPekka Enberg 		die("timer_create()");
665ce79f1caSPekka Enberg 
666ce79f1caSPekka Enberg 	its.it_value.tv_sec		= TIMER_INTERVAL_NS / 1000000000;
667ce79f1caSPekka Enberg 	its.it_value.tv_nsec		= TIMER_INTERVAL_NS % 1000000000;
668ce79f1caSPekka Enberg 	its.it_interval.tv_sec		= its.it_value.tv_sec;
669ce79f1caSPekka Enberg 	its.it_interval.tv_nsec		= its.it_value.tv_nsec;
670ce79f1caSPekka Enberg 
67143835ac9SSasha Levin 	if (timer_settime(kvm->timerid, 0, &its, NULL) < 0)
672ce79f1caSPekka Enberg 		die("timer_settime()");
673ce79f1caSPekka Enberg }
674ce79f1caSPekka Enberg 
67543835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm)
676fbfe68b7SSasha Levin {
67743835ac9SSasha Levin 	if (kvm->timerid)
67843835ac9SSasha Levin 		if (timer_delete(kvm->timerid) < 0)
679fbfe68b7SSasha Levin 			die("timer_delete()");
680fbfe68b7SSasha Levin 
68143835ac9SSasha Levin 	kvm->timerid = 0;
682fbfe68b7SSasha Levin }
683fbfe68b7SSasha Levin 
68443835ac9SSasha Levin void kvm__irq_line(struct kvm *kvm, int irq, int level)
6858b1ff07eSPekka Enberg {
6868b1ff07eSPekka Enberg 	struct kvm_irq_level irq_level;
6878b1ff07eSPekka Enberg 
6888b1ff07eSPekka Enberg 	irq_level	= (struct kvm_irq_level) {
6898b1ff07eSPekka Enberg 		{
6908b1ff07eSPekka Enberg 			.irq		= irq,
6918b1ff07eSPekka Enberg 		},
6928b1ff07eSPekka Enberg 		.level		= level,
6938b1ff07eSPekka Enberg 	};
6948b1ff07eSPekka Enberg 
69543835ac9SSasha Levin 	if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
6968b1ff07eSPekka Enberg 		die_perror("KVM_IRQ_LINE failed");
6978b1ff07eSPekka Enberg }
6988b1ff07eSPekka Enberg 
699bfaed61cSSasha Levin void kvm__irq_trigger(struct kvm *kvm, int irq)
700bfaed61cSSasha Levin {
701bfaed61cSSasha Levin 	kvm__irq_line(kvm, irq, 1);
702bfaed61cSSasha Levin 	kvm__irq_line(kvm, irq, 0);
703bfaed61cSSasha Levin }
704bfaed61cSSasha Levin 
70543835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
706090f898eSCyrill Gorcunov {
707090f898eSCyrill Gorcunov 	unsigned char *p;
708090f898eSCyrill Gorcunov 	unsigned long n;
709090f898eSCyrill Gorcunov 
710090f898eSCyrill Gorcunov 	size &= ~7; /* mod 8 */
711090f898eSCyrill Gorcunov 	if (!size)
712090f898eSCyrill Gorcunov 		return;
713090f898eSCyrill Gorcunov 
71443835ac9SSasha Levin 	p = guest_flat_to_host(kvm, addr);
715090f898eSCyrill Gorcunov 
71648cf3877SPekka Enberg 	for (n = 0; n < size; n += 8) {
71743835ac9SSasha Levin 		if (!host_ptr_in_ram(kvm, p + n))
71848cf3877SPekka Enberg 			break;
71948cf3877SPekka Enberg 
720090f898eSCyrill Gorcunov 		printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
721090f898eSCyrill Gorcunov 			addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
722090f898eSCyrill Gorcunov 				  p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
723090f898eSCyrill Gorcunov 	}
72448cf3877SPekka Enberg }
7254298ddadSSasha Levin 
7264298ddadSSasha Levin void kvm__pause(void)
7274298ddadSSasha Levin {
7284298ddadSSasha Levin 	int i, paused_vcpus = 0;
7294298ddadSSasha Levin 
7304298ddadSSasha Levin 	/* Check if the guest is running */
7314298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
7324298ddadSSasha Levin 		return;
7334298ddadSSasha Levin 
7344298ddadSSasha Levin 	mutex_lock(&pause_lock);
7354298ddadSSasha Levin 
7364298ddadSSasha Levin 	pause_event = eventfd(0, 0);
7374298ddadSSasha Levin 	if (pause_event < 0)
7384298ddadSSasha Levin 		die("Failed creating pause notification event");
7394298ddadSSasha Levin 	for (i = 0; i < kvm->nrcpus; i++)
7404298ddadSSasha Levin 		pthread_kill(kvm_cpus[i]->thread, SIGKVMPAUSE);
7414298ddadSSasha Levin 
7424298ddadSSasha Levin 	while (paused_vcpus < kvm->nrcpus) {
7434298ddadSSasha Levin 		u64 cur_read;
7444298ddadSSasha Levin 
7454298ddadSSasha Levin 		if (read(pause_event, &cur_read, sizeof(cur_read)) < 0)
7464298ddadSSasha Levin 			die("Failed reading pause event");
7474298ddadSSasha Levin 		paused_vcpus += cur_read;
7484298ddadSSasha Levin 	}
7494298ddadSSasha Levin 	close(pause_event);
7504298ddadSSasha Levin }
7514298ddadSSasha Levin 
7524298ddadSSasha Levin void kvm__continue(void)
7534298ddadSSasha Levin {
7544298ddadSSasha Levin 	/* Check if the guest is running */
7554298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
7564298ddadSSasha Levin 		return;
7574298ddadSSasha Levin 
7584298ddadSSasha Levin 	mutex_unlock(&pause_lock);
7594298ddadSSasha Levin }
7604298ddadSSasha Levin 
7614298ddadSSasha Levin void kvm__notify_paused(void)
7624298ddadSSasha Levin {
7634298ddadSSasha Levin 	u64 p = 1;
7644298ddadSSasha Levin 
7654298ddadSSasha Levin 	if (write(pause_event, &p, sizeof(p)) < 0)
7664298ddadSSasha Levin 		die("Failed notifying of paused VCPU.");
7674298ddadSSasha Levin 
7684298ddadSSasha Levin 	mutex_lock(&pause_lock);
7694298ddadSSasha Levin 	mutex_unlock(&pause_lock);
7704298ddadSSasha Levin }
771