xref: /kvmtool/kvm.c (revision 2bc995fbc0ba33821527d78b51746c24ccd79e44)
1ae1fae34SPekka Enberg #include "kvm/kvm.h"
2ae1fae34SPekka Enberg 
3b3594ec7SCyrill Gorcunov #include "kvm/boot-protocol.h"
472811558SPekka Enberg #include "kvm/cpufeature.h"
572811558SPekka Enberg #include "kvm/read-write.h"
672811558SPekka Enberg #include "kvm/interrupt.h"
70c7c14a7SCyrill Gorcunov #include "kvm/mptable.h"
872811558SPekka Enberg #include "kvm/util.h"
94298ddadSSasha Levin #include "kvm/mutex.h"
104298ddadSSasha Levin #include "kvm/kvm-cpu.h"
114b1addaeSSasha Levin #include "kvm/kvm-ipc.h"
12eda03319SPekka Enberg 
136c7d8514SPekka Enberg #include <linux/kvm.h>
14f5ab5f67SPekka Enberg 
15f5ab5f67SPekka Enberg #include <asm/bootparam.h>
16f5ab5f67SPekka Enberg 
174b1addaeSSasha Levin #include <sys/un.h>
184b1addaeSSasha Levin #include <sys/types.h>
194b1addaeSSasha Levin #include <sys/socket.h>
20ae1fae34SPekka Enberg #include <sys/ioctl.h>
211f9cff23SPekka Enberg #include <sys/mman.h>
22ce79f1caSPekka Enberg #include <sys/stat.h>
232da26a59SPekka Enberg #include <stdbool.h>
246e5e8b8dSPekka Enberg #include <assert.h>
2506e41eeaSPekka Enberg #include <limits.h>
26ce79f1caSPekka Enberg #include <signal.h>
27f5ab5f67SPekka Enberg #include <stdarg.h>
28b8f6afcdSPekka Enberg #include <stdlib.h>
29f5ab5f67SPekka Enberg #include <string.h>
300d1f17ecSPekka Enberg #include <unistd.h>
311f9cff23SPekka Enberg #include <stdio.h>
32b8f6afcdSPekka Enberg #include <fcntl.h>
33ce79f1caSPekka Enberg #include <time.h>
344298ddadSSasha Levin #include <sys/eventfd.h>
35c7828731SSasha Levin #include <asm/unistd.h>
3663bc8503SSasha Levin #include <dirent.h>
37b8f6afcdSPekka Enberg 
38ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
390d1f17ecSPekka Enberg 
40ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = {
41ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
42ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
43ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
44ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
45ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
46ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
47ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
48ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
49ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
50ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
51ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
52ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
53ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
54ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
55ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
56ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
57ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
58ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
599b1fb1c3SPekka Enberg };
609b1fb1c3SPekka Enberg 
6155e19624SCyrill Gorcunov #define DEFINE_KVM_EXT(ext)		\
6255e19624SCyrill Gorcunov 	.name = #ext,			\
6355e19624SCyrill Gorcunov 	.code = ext
6455e19624SCyrill Gorcunov 
6555e19624SCyrill Gorcunov struct {
6655e19624SCyrill Gorcunov 	const char *name;
6755e19624SCyrill Gorcunov 	int code;
6855e19624SCyrill Gorcunov } kvm_req_ext[] = {
6955e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
7055e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
7155e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_PIT2) },
7255e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
7355e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
7455e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
757c0ec28fSCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_HLT) },
7655e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
77d38ad31aSCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
7855e19624SCyrill Gorcunov };
7955e19624SCyrill Gorcunov 
804298ddadSSasha Levin extern struct kvm *kvm;
814298ddadSSasha Levin extern struct kvm_cpu *kvm_cpus[KVM_NR_CPUS];
824298ddadSSasha Levin static int pause_event;
834298ddadSSasha Levin static DEFINE_MUTEX(pause_lock);
844298ddadSSasha Levin 
859667701cSPekka Enberg static char kvm_dir[PATH_MAX];
869667701cSPekka Enberg 
879667701cSPekka Enberg static void set_dir(const char *fmt, va_list args)
889667701cSPekka Enberg {
89dd188f9fSPekka Enberg 	char tmp[PATH_MAX];
90dd188f9fSPekka Enberg 
91dd188f9fSPekka Enberg 	vsnprintf(tmp, sizeof(tmp), fmt, args);
92dd188f9fSPekka Enberg 
93*2bc995fbSPekka Enberg 	mkdir(tmp, 0777);
94*2bc995fbSPekka Enberg 
95dd188f9fSPekka Enberg 	if (!realpath(tmp, kvm_dir))
96dd188f9fSPekka Enberg 		die("Unable to set KVM tool directory");
979667701cSPekka Enberg }
989667701cSPekka Enberg 
999667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...)
1009667701cSPekka Enberg {
1019667701cSPekka Enberg 	va_list args;
1029667701cSPekka Enberg 
1039667701cSPekka Enberg 	va_start(args, fmt);
1049667701cSPekka Enberg 	set_dir(fmt, args);
1059667701cSPekka Enberg 	va_end(args);
1069667701cSPekka Enberg }
1079667701cSPekka Enberg 
1089667701cSPekka Enberg const char *kvm__get_dir(void)
1099667701cSPekka Enberg {
1109667701cSPekka Enberg 	return kvm_dir;
1119667701cSPekka Enberg }
1129667701cSPekka Enberg 
11343835ac9SSasha Levin static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
114b8f6afcdSPekka Enberg {
11528fa19c0SPekka Enberg 	int ret;
116b8f6afcdSPekka Enberg 
11743835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
1184076b041SPekka Enberg 	if (ret < 0)
1194076b041SPekka Enberg 		return false;
1204076b041SPekka Enberg 
1214076b041SPekka Enberg 	return ret;
1224076b041SPekka Enberg }
1234076b041SPekka Enberg 
12443835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm)
12555e19624SCyrill Gorcunov {
12655e19624SCyrill Gorcunov 	unsigned int i;
12755e19624SCyrill Gorcunov 
12855e19624SCyrill Gorcunov 	for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
12943835ac9SSasha Levin 		if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
1304542f276SCyrill Gorcunov 			pr_error("Unsuppored KVM extension detected: %s",
13155e19624SCyrill Gorcunov 				kvm_req_ext[i].name);
13255e19624SCyrill Gorcunov 			return (int)-i;
13355e19624SCyrill Gorcunov 		}
13455e19624SCyrill Gorcunov 	}
13555e19624SCyrill Gorcunov 
13655e19624SCyrill Gorcunov 	return 0;
13755e19624SCyrill Gorcunov }
13855e19624SCyrill Gorcunov 
1394076b041SPekka Enberg static struct kvm *kvm__new(void)
1404076b041SPekka Enberg {
14143835ac9SSasha Levin 	struct kvm *kvm = calloc(1, sizeof *kvm);
1424076b041SPekka Enberg 
14343835ac9SSasha Levin 	if (!kvm)
1444076b041SPekka Enberg 		die("out of memory");
1454076b041SPekka Enberg 
14643835ac9SSasha Levin 	return kvm;
1474076b041SPekka Enberg }
1484076b041SPekka Enberg 
1494b1addaeSSasha Levin static int kvm__create_socket(struct kvm *kvm)
1505358b0e6SSasha Levin {
1514b1addaeSSasha Levin 	char full_name[PATH_MAX];
1524b1addaeSSasha Levin 	unsigned int s;
1534b1addaeSSasha Levin 	struct sockaddr_un local;
1544b1addaeSSasha Levin 	int len, r;
1555358b0e6SSasha Levin 
1565358b0e6SSasha Levin 	if (!kvm->name)
1574b1addaeSSasha Levin 		return -1;
1585358b0e6SSasha Levin 
1599667701cSPekka Enberg 	sprintf(full_name, "%s", kvm__get_dir());
160c71efd96SSasha Levin 	mkdir(full_name, 0777);
1614b1addaeSSasha Levin 	sprintf(full_name, "%s/%s.sock", kvm__get_dir(), kvm->name);
162fa0022d2SSasha Levin 	if (access(full_name, F_OK) == 0)
163fa0022d2SSasha Levin 		die("Socket file %s already exist", full_name);
164fa0022d2SSasha Levin 
1654b1addaeSSasha Levin 	s = socket(AF_UNIX, SOCK_STREAM, 0);
1664b1addaeSSasha Levin 	if (s < 0)
1674b1addaeSSasha Levin 		return s;
1684b1addaeSSasha Levin 	local.sun_family = AF_UNIX;
1694b1addaeSSasha Levin 	strcpy(local.sun_path, full_name);
1704b1addaeSSasha Levin 	unlink(local.sun_path);
1714b1addaeSSasha Levin 	len = strlen(local.sun_path) + sizeof(local.sun_family);
1724b1addaeSSasha Levin 	r = bind(s, (struct sockaddr *)&local, len);
1734b1addaeSSasha Levin 	if (r < 0)
1744b1addaeSSasha Levin 		goto fail;
1754b1addaeSSasha Levin 
1764b1addaeSSasha Levin 	r = listen(s, 5);
1774b1addaeSSasha Levin 	if (r < 0)
1784b1addaeSSasha Levin 		goto fail;
1794b1addaeSSasha Levin 
1804b1addaeSSasha Levin 	return s;
1814b1addaeSSasha Levin 
1824b1addaeSSasha Levin fail:
1834b1addaeSSasha Levin 	close(s);
1844b1addaeSSasha Levin 	return -1;
1855358b0e6SSasha Levin }
1865358b0e6SSasha Levin 
1874b1addaeSSasha Levin void kvm__remove_socket(const char *name)
1885358b0e6SSasha Levin {
1895358b0e6SSasha Levin 	char full_name[PATH_MAX];
1905358b0e6SSasha Levin 
1914b1addaeSSasha Levin 	sprintf(full_name, "%s/%s.sock", kvm__get_dir(), name);
1925358b0e6SSasha Levin 	unlink(full_name);
1935358b0e6SSasha Levin }
1945358b0e6SSasha Levin 
1954b1addaeSSasha Levin int kvm__get_sock_by_instance(const char *name)
1965358b0e6SSasha Levin {
1974b1addaeSSasha Levin 	int s, len, r;
1984b1addaeSSasha Levin 	char sock_file[PATH_MAX];
1994b1addaeSSasha Levin 	struct sockaddr_un local;
2005358b0e6SSasha Levin 
2014b1addaeSSasha Levin 	sprintf(sock_file, "%s/%s.sock", kvm__get_dir(), name);
2024b1addaeSSasha Levin 	s = socket(AF_UNIX, SOCK_STREAM, 0);
2035358b0e6SSasha Levin 
2044b1addaeSSasha Levin 	local.sun_family = AF_UNIX;
2054b1addaeSSasha Levin 	strcpy(local.sun_path, sock_file);
2064b1addaeSSasha Levin 	len = strlen(local.sun_path) + sizeof(local.sun_family);
2075358b0e6SSasha Levin 
2084b1addaeSSasha Levin 	r = connect(s, &local, len);
2094b1addaeSSasha Levin 	if (r < 0)
2104b1addaeSSasha Levin 		die("Failed connecting to instance");
2115358b0e6SSasha Levin 
2124b1addaeSSasha Levin 	return s;
2135358b0e6SSasha Levin }
2145358b0e6SSasha Levin 
2154b1addaeSSasha Levin int kvm__enumerate_instances(int (*callback)(const char *name, int fd))
21663bc8503SSasha Levin {
21763bc8503SSasha Levin 	char full_name[PATH_MAX];
2184b1addaeSSasha Levin 	int sock;
21963bc8503SSasha Levin 	DIR *dir;
22063bc8503SSasha Levin 	struct dirent entry, *result;
221886af5f2SLiming Wang 	int ret = 0;
22263bc8503SSasha Levin 
2239667701cSPekka Enberg 	sprintf(full_name, "%s", kvm__get_dir());
22463bc8503SSasha Levin 	dir = opendir(full_name);
22563bc8503SSasha Levin 
226f2e556f3SKonstantin Khlebnikov 	while (dir != NULL) {
22763bc8503SSasha Levin 		readdir_r(dir, &entry, &result);
22863bc8503SSasha Levin 		if (result == NULL)
22963bc8503SSasha Levin 			break;
2304b1addaeSSasha Levin 		if (entry.d_type == DT_SOCK) {
2314b1addaeSSasha Levin 			entry.d_name[strlen(entry.d_name)-5] = 0;
2324b1addaeSSasha Levin 			sock = kvm__get_sock_by_instance(entry.d_name);
2334b1addaeSSasha Levin 			ret = callback(entry.d_name, sock);
2344b1addaeSSasha Levin 			close(sock);
235886af5f2SLiming Wang 			if (ret < 0)
236886af5f2SLiming Wang 				break;
23763bc8503SSasha Levin 		}
23863bc8503SSasha Levin 	}
23963bc8503SSasha Levin 
2401a0ef251SSasha Levin 	closedir(dir);
2411a0ef251SSasha Levin 
242886af5f2SLiming Wang 	return ret;
24363bc8503SSasha Levin }
24463bc8503SSasha Levin 
24543835ac9SSasha Levin void kvm__delete(struct kvm *kvm)
2469ef4c68eSPekka Enberg {
24743835ac9SSasha Levin 	kvm__stop_timer(kvm);
248fbfe68b7SSasha Levin 
24943835ac9SSasha Levin 	munmap(kvm->ram_start, kvm->ram_size);
250c733c80bSSasha Levin 	kvm_ipc__stop();
2514b1addaeSSasha Levin 	kvm__remove_socket(kvm->name);
25243835ac9SSasha Levin 	free(kvm);
2539ef4c68eSPekka Enberg }
2549ef4c68eSPekka Enberg 
255c78b8713SAsias He static bool kvm__cpu_supports_vm(void)
256c78b8713SAsias He {
257c78b8713SAsias He 	struct cpuid_regs regs;
2583fdf659dSSasha Levin 	u32 eax_base;
259831fbf23SPekka Enberg 	int feature;
260c78b8713SAsias He 
261c78b8713SAsias He 	regs	= (struct cpuid_regs) {
262831fbf23SPekka Enberg 		.eax		= 0x00,
263c78b8713SAsias He 	};
264c78b8713SAsias He 	host_cpuid(&regs);
265c78b8713SAsias He 
266ae87afbfSCyrill Gorcunov 	switch (regs.ebx) {
267ae87afbfSCyrill Gorcunov 	case CPUID_VENDOR_INTEL_1:
268831fbf23SPekka Enberg 		eax_base	= 0x00;
269831fbf23SPekka Enberg 		feature		= KVM__X86_FEATURE_VMX;
270ae87afbfSCyrill Gorcunov 		break;
27134649df9SPekka Enberg 
272ae87afbfSCyrill Gorcunov 	case CPUID_VENDOR_AMD_1:
273831fbf23SPekka Enberg 		eax_base	= 0x80000000;
274831fbf23SPekka Enberg 		feature		= KVM__X86_FEATURE_SVM;
275ae87afbfSCyrill Gorcunov 		break;
27634649df9SPekka Enberg 
27734649df9SPekka Enberg 	default:
27834649df9SPekka Enberg 		return false;
279ae87afbfSCyrill Gorcunov 	}
280ae87afbfSCyrill Gorcunov 
281831fbf23SPekka Enberg 	regs	= (struct cpuid_regs) {
282831fbf23SPekka Enberg 		.eax		= eax_base,
283831fbf23SPekka Enberg 	};
284831fbf23SPekka Enberg 	host_cpuid(&regs);
285831fbf23SPekka Enberg 
286831fbf23SPekka Enberg 	if (regs.eax < eax_base + 0x01)
287831fbf23SPekka Enberg 		return false;
288831fbf23SPekka Enberg 
289831fbf23SPekka Enberg 	regs	= (struct cpuid_regs) {
290831fbf23SPekka Enberg 		.eax		= eax_base + 0x01
291831fbf23SPekka Enberg 	};
292831fbf23SPekka Enberg 	host_cpuid(&regs);
293831fbf23SPekka Enberg 
294831fbf23SPekka Enberg 	return regs.ecx & (1 << feature);
295c78b8713SAsias He }
296c78b8713SAsias He 
29796feb589SPekka Enberg /*
29896feb589SPekka Enberg  * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
29996feb589SPekka Enberg  * memory regions to it. Therefore, be careful if you use this function for
30096feb589SPekka Enberg  * registering memory regions for emulating hardware.
30196feb589SPekka Enberg  */
30296feb589SPekka Enberg void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
3034076b041SPekka Enberg {
3042b0e3342SPekka Enberg 	struct kvm_userspace_memory_region mem;
305839051d9SSasha Levin 	int ret;
306839051d9SSasha Levin 
307839051d9SSasha Levin 	mem = (struct kvm_userspace_memory_region) {
30896feb589SPekka Enberg 		.slot			= kvm->mem_slots++,
309874467f8SSasha Levin 		.guest_phys_addr	= guest_phys,
310874467f8SSasha Levin 		.memory_size		= size,
311c4acb611SIngo Molnar 		.userspace_addr		= (unsigned long)userspace_addr,
312839051d9SSasha Levin 	};
313839051d9SSasha Levin 
314874467f8SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
315839051d9SSasha Levin 	if (ret < 0)
316839051d9SSasha Levin 		die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
317839051d9SSasha Levin }
318839051d9SSasha Levin 
319874467f8SSasha Levin /*
320874467f8SSasha Levin  * Allocating RAM size bigger than 4GB requires us to leave a gap
321874467f8SSasha Levin  * in the RAM which is used for PCI MMIO, hotplug, and unconfigured
322874467f8SSasha Levin  * devices (see documentation of e820_setup_gap() for details).
323874467f8SSasha Levin  *
324874467f8SSasha Levin  * If we're required to initialize RAM bigger than 4GB, we will create
325874467f8SSasha Levin  * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space.
326874467f8SSasha Levin  */
327874467f8SSasha Levin 
32843835ac9SSasha Levin void kvm__init_ram(struct kvm *kvm)
329874467f8SSasha Levin {
330874467f8SSasha Levin 	u64	phys_start, phys_size;
331874467f8SSasha Levin 	void	*host_mem;
332874467f8SSasha Levin 
33343835ac9SSasha Levin 	if (kvm->ram_size < KVM_32BIT_GAP_START) {
334874467f8SSasha Levin 		/* Use a single block of RAM for 32bit RAM */
335874467f8SSasha Levin 
336874467f8SSasha Levin 		phys_start = 0;
33743835ac9SSasha Levin 		phys_size  = kvm->ram_size;
33843835ac9SSasha Levin 		host_mem   = kvm->ram_start;
339874467f8SSasha Levin 
34096feb589SPekka Enberg 		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
341874467f8SSasha Levin 	} else {
342874467f8SSasha Levin 		/* First RAM range from zero to the PCI gap: */
343874467f8SSasha Levin 
344874467f8SSasha Levin 		phys_start = 0;
345874467f8SSasha Levin 		phys_size  = KVM_32BIT_GAP_START;
34643835ac9SSasha Levin 		host_mem   = kvm->ram_start;
347874467f8SSasha Levin 
34896feb589SPekka Enberg 		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
349874467f8SSasha Levin 
350874467f8SSasha Levin 		/* Second RAM range from 4GB to the end of RAM: */
351874467f8SSasha Levin 
352874467f8SSasha Levin 		phys_start = 0x100000000ULL;
35343835ac9SSasha Levin 		phys_size  = kvm->ram_size - phys_size;
35443835ac9SSasha Levin 		host_mem   = kvm->ram_start + phys_start;
355874467f8SSasha Levin 
35696feb589SPekka Enberg 		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
357874467f8SSasha Levin 	}
358874467f8SSasha Levin }
359874467f8SSasha Levin 
3608259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm)
361384922b3SPekka Enberg {
362384922b3SPekka Enberg 	int ret;
363384922b3SPekka Enberg 
36443835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
3658259b8ccSSasha Levin 	if (ret <= 0)
366384922b3SPekka Enberg 		die_perror("KVM_CAP_NR_VCPUS");
367384922b3SPekka Enberg 
368384922b3SPekka Enberg 	return ret;
369384922b3SPekka Enberg }
370384922b3SPekka Enberg 
3714b1addaeSSasha Levin static void kvm__pid(int fd, u32 type, u32 len, u8 *msg)
3724b1addaeSSasha Levin {
3734b1addaeSSasha Levin 	pid_t pid = getpid();
3744b1addaeSSasha Levin 	int r = 0;
3754b1addaeSSasha Levin 
3764b1addaeSSasha Levin 	if (type == KVM_IPC_PID)
3774b1addaeSSasha Levin 		r = write(fd, &pid, sizeof(pid));
3784b1addaeSSasha Levin 
3794b1addaeSSasha Levin 	if (r < 0)
3804b1addaeSSasha Levin 		pr_warning("Failed sending PID");
3814b1addaeSSasha Levin }
3824b1addaeSSasha Levin 
3838259b8ccSSasha Levin /*
3848259b8ccSSasha Levin  * The following hack should be removed once 'x86: Raise the hard
3858259b8ccSSasha Levin  * VCPU count limit' makes it's way into the mainline.
3868259b8ccSSasha Levin  */
3878259b8ccSSasha Levin #ifndef KVM_CAP_MAX_VCPUS
3888259b8ccSSasha Levin #define KVM_CAP_MAX_VCPUS 66
3898259b8ccSSasha Levin #endif
3908259b8ccSSasha Levin 
3918259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm)
3928259b8ccSSasha Levin {
3938259b8ccSSasha Levin 	int ret;
3948259b8ccSSasha Levin 
3958259b8ccSSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS);
3968259b8ccSSasha Levin 	if (ret <= 0)
3978259b8ccSSasha Levin 		ret = kvm__recommended_cpus(kvm);
3988259b8ccSSasha Levin 
3998259b8ccSSasha Levin 	return ret;
4008259b8ccSSasha Levin }
4018259b8ccSSasha Levin 
4025358b0e6SSasha Levin struct kvm *kvm__init(const char *kvm_dev, u64 ram_size, const char *name)
403839051d9SSasha Levin {
4049687927dSAsias He 	struct kvm_pit_config pit_config = { .flags = 0, };
40543835ac9SSasha Levin 	struct kvm *kvm;
4064076b041SPekka Enberg 	int ret;
4074076b041SPekka Enberg 
408c78b8713SAsias He 	if (!kvm__cpu_supports_vm())
409c78b8713SAsias He 		die("Your CPU does not support hardware virtualization");
410c78b8713SAsias He 
41143835ac9SSasha Levin 	kvm = kvm__new();
4124076b041SPekka Enberg 
41343835ac9SSasha Levin 	kvm->sys_fd = open(kvm_dev, O_RDWR);
41443835ac9SSasha Levin 	if (kvm->sys_fd < 0) {
4156d7c36ceSPekka Enberg 		if (errno == ENOENT)
416e907b83fSPekka Enberg 			die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
417f8334800SIngo Molnar 		if (errno == ENODEV)
418f8334800SIngo Molnar 			die("'%s' KVM driver not available.\n  # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev);
4196d7c36ceSPekka Enberg 
420f8334800SIngo Molnar 		fprintf(stderr, "  Fatal, could not open %s: ", kvm_dev);
421f8334800SIngo Molnar 		perror(NULL);
422f8334800SIngo Molnar 		exit(1);
4236d7c36ceSPekka Enberg 	}
424b8f6afcdSPekka Enberg 
42543835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
4266c7d8514SPekka Enberg 	if (ret != KVM_API_VERSION)
427f5ab5f67SPekka Enberg 		die_perror("KVM_API_VERSION ioctl");
4286c7d8514SPekka Enberg 
42943835ac9SSasha Levin 	kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
43043835ac9SSasha Levin 	if (kvm->vm_fd < 0)
431f5ab5f67SPekka Enberg 		die_perror("KVM_CREATE_VM ioctl");
43228fa19c0SPekka Enberg 
43343835ac9SSasha Levin 	if (kvm__check_extensions(kvm))
43455e19624SCyrill Gorcunov 		die("A required KVM extention is not supported by OS");
4359687927dSAsias He 
43643835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
4379687927dSAsias He 	if (ret < 0)
4389687927dSAsias He 		die_perror("KVM_SET_TSS_ADDR ioctl");
4399687927dSAsias He 
44043835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config);
4419687927dSAsias He 	if (ret < 0)
4429687927dSAsias He 		die_perror("KVM_CREATE_PIT2 ioctl");
4439687927dSAsias He 
44443835ac9SSasha Levin 	kvm->ram_size		= ram_size;
4450d1f17ecSPekka Enberg 
44643835ac9SSasha Levin 	if (kvm->ram_size < KVM_32BIT_GAP_START) {
44737c34ca8SSasha Levin 		kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
448874467f8SSasha Levin 	} else {
44937c34ca8SSasha Levin 		kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
45043835ac9SSasha Levin 		if (kvm->ram_start != MAP_FAILED) {
451874467f8SSasha Levin 			/*
452874467f8SSasha Levin 			 * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that
453874467f8SSasha Levin 			 * if we accidently write to it, we will know.
454874467f8SSasha Levin 			 */
45543835ac9SSasha Levin 			mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
456874467f8SSasha Levin 		}
457874467f8SSasha Levin 	}
45843835ac9SSasha Levin 	if (kvm->ram_start == MAP_FAILED)
4590d1f17ecSPekka Enberg 		die("out of memory");
4600d1f17ecSPekka Enberg 
4617f4f39a4SSasha Levin 	madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE);
4627f4f39a4SSasha Levin 
46343835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
464895c2fefSPekka Enberg 	if (ret < 0)
4659687927dSAsias He 		die_perror("KVM_CREATE_IRQCHIP ioctl");
4669687927dSAsias He 
4675358b0e6SSasha Levin 	kvm->name = name;
4685358b0e6SSasha Levin 
4694b1addaeSSasha Levin 	kvm_ipc__start(kvm__create_socket(kvm));
4704b1addaeSSasha Levin 	kvm_ipc__register_handler(KVM_IPC_PID, kvm__pid);
47143835ac9SSasha Levin 	return kvm;
4724076b041SPekka Enberg }
4734076b041SPekka Enberg 
4745f6772b8SCyrill Gorcunov #define BOOT_LOADER_SELECTOR	0x1000
475b08e9ec4SPekka Enberg #define BOOT_LOADER_IP		0x0000
476dbdb74c2SPekka Enberg #define BOOT_LOADER_SP		0x8000
4772dd4a4edSCyrill Gorcunov #define BOOT_CMDLINE_OFFSET	0x20000
4782dd4a4edSCyrill Gorcunov 
4799a4ecdc5SPekka Enberg #define BOOT_PROTOCOL_REQUIRED	0x206
480a43f6460SCyrill Gorcunov #define LOAD_HIGH		0x01
481009b0758SPekka Enberg 
48243835ac9SSasha Levin static int load_flat_binary(struct kvm *kvm, int fd)
483009b0758SPekka Enberg {
484009b0758SPekka Enberg 	void *p;
485009b0758SPekka Enberg 	int nr;
486009b0758SPekka Enberg 
487009b0758SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
488009b0758SPekka Enberg 		die_perror("lseek");
489009b0758SPekka Enberg 
49043835ac9SSasha Levin 	p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
491009b0758SPekka Enberg 
492009b0758SPekka Enberg 	while ((nr = read(fd, p, 65536)) > 0)
493009b0758SPekka Enberg 		p += nr;
494009b0758SPekka Enberg 
49543835ac9SSasha Levin 	kvm->boot_selector	= BOOT_LOADER_SELECTOR;
49643835ac9SSasha Levin 	kvm->boot_ip		= BOOT_LOADER_IP;
49743835ac9SSasha Levin 	kvm->boot_sp		= BOOT_LOADER_SP;
498edc8a14dSPekka Enberg 
4997fb218bdSPekka Enberg 	return true;
500009b0758SPekka Enberg }
501009b0758SPekka Enberg 
502ae1fae34SPekka Enberg static const char *BZIMAGE_MAGIC	= "HdrS";
503ae1fae34SPekka Enberg 
50443835ac9SSasha Levin static bool load_bzimage(struct kvm *kvm, int fd_kernel,
50553861c74SJohn Floren 			int fd_initrd, const char *kernel_cmdline, u16 vidmode)
506ae1fae34SPekka Enberg {
507b9271160SPekka Enberg 	struct boot_params *kern_boot;
5084b62331fSPekka Enberg 	unsigned long setup_sects;
509b9271160SPekka Enberg 	struct boot_params boot;
5102dd4a4edSCyrill Gorcunov 	size_t cmdline_size;
5117fb218bdSPekka Enberg 	ssize_t setup_size;
51222489bb0SCyrill Gorcunov 	void *p;
513ae1fae34SPekka Enberg 	int nr;
514ae1fae34SPekka Enberg 
5155d67eaf6SPekka Enberg 	/*
5165d67eaf6SPekka Enberg 	 * See Documentation/x86/boot.txt for details no bzImage on-disk and
5175d67eaf6SPekka Enberg 	 * memory layout.
5185d67eaf6SPekka Enberg 	 */
5195d67eaf6SPekka Enberg 
5202065a6f7SCyrill Gorcunov 	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
521009b0758SPekka Enberg 		die_perror("lseek");
522009b0758SPekka Enberg 
5230b62d2bbSPekka Enberg 	if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot))
5242346d461SPekka Enberg 		return false;
525ae1fae34SPekka Enberg 
5260b62d2bbSPekka Enberg 	if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)))
5277fb218bdSPekka Enberg 		return false;
528ae1fae34SPekka Enberg 
5290ea58e5bSPekka Enberg 	if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED)
5300b62d2bbSPekka Enberg 		die("Too old kernel");
531ad681038SCyrill Gorcunov 
5322065a6f7SCyrill Gorcunov 	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
533e93ab78aSPekka Enberg 		die_perror("lseek");
534e93ab78aSPekka Enberg 
5354cf542bbSCyrill Gorcunov 	if (!boot.hdr.setup_sects)
5364cf542bbSCyrill Gorcunov 		boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
53710943d14SPekka Enberg 	setup_sects = boot.hdr.setup_sects + 1;
53810943d14SPekka Enberg 
53954d4a626SPekka Enberg 	setup_size = setup_sects << 9;
54043835ac9SSasha Levin 	p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
541ae1fae34SPekka Enberg 
5422065a6f7SCyrill Gorcunov 	/* copy setup.bin to mem*/
5432065a6f7SCyrill Gorcunov 	if (read(fd_kernel, p, setup_size) != setup_size)
5447fb218bdSPekka Enberg 		die_perror("read");
5457fb218bdSPekka Enberg 
5462065a6f7SCyrill Gorcunov 	/* copy vmlinux.bin to BZ_KERNEL_START*/
54743835ac9SSasha Levin 	p = guest_flat_to_host(kvm, BZ_KERNEL_START);
548ae1fae34SPekka Enberg 
5492065a6f7SCyrill Gorcunov 	while ((nr = read(fd_kernel, p, 65536)) > 0)
550ae1fae34SPekka Enberg 		p += nr;
551ae1fae34SPekka Enberg 
55243835ac9SSasha Levin 	p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET);
553debcfac0SCyrill Gorcunov 	if (kernel_cmdline) {
554debcfac0SCyrill Gorcunov 		cmdline_size = strlen(kernel_cmdline) + 1;
555debcfac0SCyrill Gorcunov 		if (cmdline_size > boot.hdr.cmdline_size)
556debcfac0SCyrill Gorcunov 			cmdline_size = boot.hdr.cmdline_size;
557ad681038SCyrill Gorcunov 
5582dd4a4edSCyrill Gorcunov 		memset(p, 0, boot.hdr.cmdline_size);
5592dd4a4edSCyrill Gorcunov 		memcpy(p, kernel_cmdline, cmdline_size - 1);
560debcfac0SCyrill Gorcunov 	}
561debcfac0SCyrill Gorcunov 
56243835ac9SSasha Levin 	kern_boot	= guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00);
563a43f6460SCyrill Gorcunov 
564b9271160SPekka Enberg 	kern_boot->hdr.cmd_line_ptr	= BOOT_CMDLINE_OFFSET;
565b9271160SPekka Enberg 	kern_boot->hdr.type_of_loader	= 0xff;
566b9271160SPekka Enberg 	kern_boot->hdr.heap_end_ptr	= 0xfe00;
567b9271160SPekka Enberg 	kern_boot->hdr.loadflags	|= CAN_USE_HEAP;
56853861c74SJohn Floren 	kern_boot->hdr.vid_mode		= vidmode;
569a43f6460SCyrill Gorcunov 
5702065a6f7SCyrill Gorcunov 	/*
5712065a6f7SCyrill Gorcunov 	 * Read initrd image into guest memory
5722065a6f7SCyrill Gorcunov 	 */
5732065a6f7SCyrill Gorcunov 	if (fd_initrd >= 0) {
5742065a6f7SCyrill Gorcunov 		struct stat initrd_stat;
5752065a6f7SCyrill Gorcunov 		unsigned long addr;
5762065a6f7SCyrill Gorcunov 
5772065a6f7SCyrill Gorcunov 		if (fstat(fd_initrd, &initrd_stat))
5782065a6f7SCyrill Gorcunov 			die_perror("fstat");
5792065a6f7SCyrill Gorcunov 
5802065a6f7SCyrill Gorcunov 		addr = boot.hdr.initrd_addr_max & ~0xfffff;
5812065a6f7SCyrill Gorcunov 		for (;;) {
5822065a6f7SCyrill Gorcunov 			if (addr < BZ_KERNEL_START)
5832065a6f7SCyrill Gorcunov 				die("Not enough memory for initrd");
58443835ac9SSasha Levin 			else if (addr < (kvm->ram_size - initrd_stat.st_size))
5852065a6f7SCyrill Gorcunov 				break;
5862065a6f7SCyrill Gorcunov 			addr -= 0x100000;
5872065a6f7SCyrill Gorcunov 		}
5882065a6f7SCyrill Gorcunov 
58943835ac9SSasha Levin 		p = guest_flat_to_host(kvm, addr);
5902065a6f7SCyrill Gorcunov 		nr = read(fd_initrd, p, initrd_stat.st_size);
5912065a6f7SCyrill Gorcunov 		if (nr != initrd_stat.st_size)
5922065a6f7SCyrill Gorcunov 			die("Failed to read initrd");
5932065a6f7SCyrill Gorcunov 
5942065a6f7SCyrill Gorcunov 		kern_boot->hdr.ramdisk_image	= addr;
5952065a6f7SCyrill Gorcunov 		kern_boot->hdr.ramdisk_size	= initrd_stat.st_size;
5962065a6f7SCyrill Gorcunov 	}
5972065a6f7SCyrill Gorcunov 
59843835ac9SSasha Levin 	kvm->boot_selector	= BOOT_LOADER_SELECTOR;
599edc8a14dSPekka Enberg 	/*
600edc8a14dSPekka Enberg 	 * The real-mode setup code starts at offset 0x200 of a bzImage. See
601edc8a14dSPekka Enberg 	 * Documentation/x86/boot.txt for details.
602edc8a14dSPekka Enberg 	 */
60343835ac9SSasha Levin 	kvm->boot_ip		= BOOT_LOADER_IP + 0x200;
60443835ac9SSasha Levin 	kvm->boot_sp		= BOOT_LOADER_SP;
605edc8a14dSPekka Enberg 
6067fb218bdSPekka Enberg 	return true;
607ae1fae34SPekka Enberg }
608ae1fae34SPekka Enberg 
60972811558SPekka Enberg /* RFC 1952 */
61072811558SPekka Enberg #define GZIP_ID1		0x1f
61172811558SPekka Enberg #define GZIP_ID2		0x8b
61272811558SPekka Enberg 
61372811558SPekka Enberg static bool initrd_check(int fd)
61472811558SPekka Enberg {
61572811558SPekka Enberg 	unsigned char id[2];
61672811558SPekka Enberg 
61772811558SPekka Enberg 	if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0)
61872811558SPekka Enberg 		return false;
61972811558SPekka Enberg 
62072811558SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
62172811558SPekka Enberg 		die_perror("lseek");
62272811558SPekka Enberg 
62372811558SPekka Enberg 	return id[0] == GZIP_ID1 && id[1] == GZIP_ID2;
62472811558SPekka Enberg }
62572811558SPekka Enberg 
6266d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
62753861c74SJohn Floren 		const char *initrd_filename, const char *kernel_cmdline, u16 vidmode)
628ae1fae34SPekka Enberg {
6297fb218bdSPekka Enberg 	bool ret;
6302065a6f7SCyrill Gorcunov 	int fd_kernel = -1, fd_initrd = -1;
631ae1fae34SPekka Enberg 
6322065a6f7SCyrill Gorcunov 	fd_kernel = open(kernel_filename, O_RDONLY);
6332065a6f7SCyrill Gorcunov 	if (fd_kernel < 0)
6340b62d2bbSPekka Enberg 		die("Unable to open kernel %s", kernel_filename);
635ae1fae34SPekka Enberg 
6362065a6f7SCyrill Gorcunov 	if (initrd_filename) {
6372065a6f7SCyrill Gorcunov 		fd_initrd = open(initrd_filename, O_RDONLY);
6382065a6f7SCyrill Gorcunov 		if (fd_initrd < 0)
6390b62d2bbSPekka Enberg 			die("Unable to open initrd %s", initrd_filename);
64072811558SPekka Enberg 
64172811558SPekka Enberg 		if (!initrd_check(fd_initrd))
64272811558SPekka Enberg 			die("%s is not an initrd", initrd_filename);
6432065a6f7SCyrill Gorcunov 	}
6442065a6f7SCyrill Gorcunov 
64553861c74SJohn Floren 	ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode);
64628972750SCyrill Gorcunov 
64728972750SCyrill Gorcunov 	if (initrd_filename)
64828972750SCyrill Gorcunov 		close(fd_initrd);
64928972750SCyrill Gorcunov 
650009b0758SPekka Enberg 	if (ret)
651009b0758SPekka Enberg 		goto found_kernel;
652ae1fae34SPekka Enberg 
6534542f276SCyrill Gorcunov 	pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
6540b62d2bbSPekka Enberg 
6552065a6f7SCyrill Gorcunov 	ret = load_flat_binary(kvm, fd_kernel);
656009b0758SPekka Enberg 	if (ret)
657009b0758SPekka Enberg 		goto found_kernel;
658009b0758SPekka Enberg 
6595a6ac675SSasha Levin 	close(fd_kernel);
6605a6ac675SSasha Levin 
661009b0758SPekka Enberg 	die("%s is not a valid bzImage or flat binary", kernel_filename);
662009b0758SPekka Enberg 
663009b0758SPekka Enberg found_kernel:
6645a6ac675SSasha Levin 	close(fd_kernel);
6655a6ac675SSasha Levin 
666ae1fae34SPekka Enberg 	return ret;
667ae1fae34SPekka Enberg }
668ae1fae34SPekka Enberg 
669b3594ec7SCyrill Gorcunov /**
670b3594ec7SCyrill Gorcunov  * kvm__setup_bios - inject BIOS into guest system memory
67143835ac9SSasha Levin  * @kvm - guest system descriptor
672b3594ec7SCyrill Gorcunov  *
673b3594ec7SCyrill Gorcunov  * This function is a main routine where we poke guest memory
674b3594ec7SCyrill Gorcunov  * and install BIOS there.
675b3594ec7SCyrill Gorcunov  */
67643835ac9SSasha Levin void kvm__setup_bios(struct kvm *kvm)
6772f3976eeSPekka Enberg {
678b3594ec7SCyrill Gorcunov 	/* standart minimal configuration */
67943835ac9SSasha Levin 	setup_bios(kvm);
6802f3976eeSPekka Enberg 
681b3594ec7SCyrill Gorcunov 	/* FIXME: SMP, ACPI and friends here */
6820c7c14a7SCyrill Gorcunov 
6830c7c14a7SCyrill Gorcunov 	/* MP table */
68443835ac9SSasha Levin 	mptable_setup(kvm, kvm->nrcpus);
6852f3976eeSPekka Enberg }
6862f3976eeSPekka Enberg 
687ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000	/* 1 msec */
688ce79f1caSPekka Enberg 
689ce79f1caSPekka Enberg /*
690ce79f1caSPekka Enberg  * This function sets up a timer that's used to inject interrupts from the
691ce79f1caSPekka Enberg  * userspace hypervisor into the guest at periodical intervals. Please note
692ce79f1caSPekka Enberg  * that clock interrupt, for example, is not handled here.
693ce79f1caSPekka Enberg  */
69443835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm)
695ce79f1caSPekka Enberg {
696ce79f1caSPekka Enberg 	struct itimerspec its;
697ce79f1caSPekka Enberg 	struct sigevent sev;
698ce79f1caSPekka Enberg 
699ce79f1caSPekka Enberg 	memset(&sev, 0, sizeof(struct sigevent));
700ce79f1caSPekka Enberg 	sev.sigev_value.sival_int	= 0;
701c7828731SSasha Levin 	sev.sigev_notify		= SIGEV_THREAD_ID;
702ce79f1caSPekka Enberg 	sev.sigev_signo			= SIGALRM;
703c7828731SSasha Levin 	sev._sigev_un._tid		= syscall(__NR_gettid);
704ce79f1caSPekka Enberg 
70543835ac9SSasha Levin 	if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0)
706ce79f1caSPekka Enberg 		die("timer_create()");
707ce79f1caSPekka Enberg 
708ce79f1caSPekka Enberg 	its.it_value.tv_sec		= TIMER_INTERVAL_NS / 1000000000;
709ce79f1caSPekka Enberg 	its.it_value.tv_nsec		= TIMER_INTERVAL_NS % 1000000000;
710ce79f1caSPekka Enberg 	its.it_interval.tv_sec		= its.it_value.tv_sec;
711ce79f1caSPekka Enberg 	its.it_interval.tv_nsec		= its.it_value.tv_nsec;
712ce79f1caSPekka Enberg 
71343835ac9SSasha Levin 	if (timer_settime(kvm->timerid, 0, &its, NULL) < 0)
714ce79f1caSPekka Enberg 		die("timer_settime()");
715ce79f1caSPekka Enberg }
716ce79f1caSPekka Enberg 
71743835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm)
718fbfe68b7SSasha Levin {
71943835ac9SSasha Levin 	if (kvm->timerid)
72043835ac9SSasha Levin 		if (timer_delete(kvm->timerid) < 0)
721fbfe68b7SSasha Levin 			die("timer_delete()");
722fbfe68b7SSasha Levin 
72343835ac9SSasha Levin 	kvm->timerid = 0;
724fbfe68b7SSasha Levin }
725fbfe68b7SSasha Levin 
72643835ac9SSasha Levin void kvm__irq_line(struct kvm *kvm, int irq, int level)
7278b1ff07eSPekka Enberg {
7288b1ff07eSPekka Enberg 	struct kvm_irq_level irq_level;
7298b1ff07eSPekka Enberg 
7308b1ff07eSPekka Enberg 	irq_level	= (struct kvm_irq_level) {
7318b1ff07eSPekka Enberg 		{
7328b1ff07eSPekka Enberg 			.irq		= irq,
7338b1ff07eSPekka Enberg 		},
7348b1ff07eSPekka Enberg 		.level		= level,
7358b1ff07eSPekka Enberg 	};
7368b1ff07eSPekka Enberg 
73743835ac9SSasha Levin 	if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
7388b1ff07eSPekka Enberg 		die_perror("KVM_IRQ_LINE failed");
7398b1ff07eSPekka Enberg }
7408b1ff07eSPekka Enberg 
741bfaed61cSSasha Levin void kvm__irq_trigger(struct kvm *kvm, int irq)
742bfaed61cSSasha Levin {
743bfaed61cSSasha Levin 	kvm__irq_line(kvm, irq, 1);
744bfaed61cSSasha Levin 	kvm__irq_line(kvm, irq, 0);
745bfaed61cSSasha Levin }
746bfaed61cSSasha Levin 
74743835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
748090f898eSCyrill Gorcunov {
749090f898eSCyrill Gorcunov 	unsigned char *p;
750090f898eSCyrill Gorcunov 	unsigned long n;
751090f898eSCyrill Gorcunov 
752090f898eSCyrill Gorcunov 	size &= ~7; /* mod 8 */
753090f898eSCyrill Gorcunov 	if (!size)
754090f898eSCyrill Gorcunov 		return;
755090f898eSCyrill Gorcunov 
75643835ac9SSasha Levin 	p = guest_flat_to_host(kvm, addr);
757090f898eSCyrill Gorcunov 
75848cf3877SPekka Enberg 	for (n = 0; n < size; n += 8) {
75943835ac9SSasha Levin 		if (!host_ptr_in_ram(kvm, p + n))
76048cf3877SPekka Enberg 			break;
76148cf3877SPekka Enberg 
762090f898eSCyrill Gorcunov 		printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
763090f898eSCyrill Gorcunov 			addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
764090f898eSCyrill Gorcunov 				  p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
765090f898eSCyrill Gorcunov 	}
76648cf3877SPekka Enberg }
7674298ddadSSasha Levin 
7684298ddadSSasha Levin void kvm__pause(void)
7694298ddadSSasha Levin {
7704298ddadSSasha Levin 	int i, paused_vcpus = 0;
7714298ddadSSasha Levin 
7724298ddadSSasha Levin 	/* Check if the guest is running */
7734298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
7744298ddadSSasha Levin 		return;
7754298ddadSSasha Levin 
7764298ddadSSasha Levin 	mutex_lock(&pause_lock);
7774298ddadSSasha Levin 
7784298ddadSSasha Levin 	pause_event = eventfd(0, 0);
7794298ddadSSasha Levin 	if (pause_event < 0)
7804298ddadSSasha Levin 		die("Failed creating pause notification event");
7814298ddadSSasha Levin 	for (i = 0; i < kvm->nrcpus; i++)
7824298ddadSSasha Levin 		pthread_kill(kvm_cpus[i]->thread, SIGKVMPAUSE);
7834298ddadSSasha Levin 
7844298ddadSSasha Levin 	while (paused_vcpus < kvm->nrcpus) {
7854298ddadSSasha Levin 		u64 cur_read;
7864298ddadSSasha Levin 
7874298ddadSSasha Levin 		if (read(pause_event, &cur_read, sizeof(cur_read)) < 0)
7884298ddadSSasha Levin 			die("Failed reading pause event");
7894298ddadSSasha Levin 		paused_vcpus += cur_read;
7904298ddadSSasha Levin 	}
7914298ddadSSasha Levin 	close(pause_event);
7924298ddadSSasha Levin }
7934298ddadSSasha Levin 
7944298ddadSSasha Levin void kvm__continue(void)
7954298ddadSSasha Levin {
7964298ddadSSasha Levin 	/* Check if the guest is running */
7974298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
7984298ddadSSasha Levin 		return;
7994298ddadSSasha Levin 
8004298ddadSSasha Levin 	mutex_unlock(&pause_lock);
8014298ddadSSasha Levin }
8024298ddadSSasha Levin 
8034298ddadSSasha Levin void kvm__notify_paused(void)
8044298ddadSSasha Levin {
8054298ddadSSasha Levin 	u64 p = 1;
8064298ddadSSasha Levin 
8074298ddadSSasha Levin 	if (write(pause_event, &p, sizeof(p)) < 0)
8084298ddadSSasha Levin 		die("Failed notifying of paused VCPU.");
8094298ddadSSasha Levin 
8104298ddadSSasha Levin 	mutex_lock(&pause_lock);
8114298ddadSSasha Levin 	mutex_unlock(&pause_lock);
8124298ddadSSasha Levin }
813