xref: /kvmtool/kvm.c (revision 3d7f121c6c5d3fc74907796135ee0e6048e5761a)
1ae1fae34SPekka Enberg #include "kvm/kvm.h"
272811558SPekka Enberg #include "kvm/read-write.h"
372811558SPekka Enberg #include "kvm/util.h"
44298ddadSSasha Levin #include "kvm/mutex.h"
54298ddadSSasha Levin #include "kvm/kvm-cpu.h"
64b1addaeSSasha Levin #include "kvm/kvm-ipc.h"
7eda03319SPekka Enberg 
86c7d8514SPekka Enberg #include <linux/kvm.h>
9f5ab5f67SPekka Enberg 
104b1addaeSSasha Levin #include <sys/un.h>
11e2e876c2SMatt Evans #include <sys/stat.h>
124b1addaeSSasha Levin #include <sys/types.h>
134b1addaeSSasha Levin #include <sys/socket.h>
14ae1fae34SPekka Enberg #include <sys/ioctl.h>
151f9cff23SPekka Enberg #include <sys/mman.h>
162da26a59SPekka Enberg #include <stdbool.h>
176e5e8b8dSPekka Enberg #include <assert.h>
1806e41eeaSPekka Enberg #include <limits.h>
19ce79f1caSPekka Enberg #include <signal.h>
20f5ab5f67SPekka Enberg #include <stdarg.h>
21b8f6afcdSPekka Enberg #include <stdlib.h>
22f5ab5f67SPekka Enberg #include <string.h>
230d1f17ecSPekka Enberg #include <unistd.h>
241f9cff23SPekka Enberg #include <stdio.h>
25b8f6afcdSPekka Enberg #include <fcntl.h>
26ce79f1caSPekka Enberg #include <time.h>
274298ddadSSasha Levin #include <sys/eventfd.h>
28c7828731SSasha Levin #include <asm/unistd.h>
2963bc8503SSasha Levin #include <dirent.h>
30b8f6afcdSPekka Enberg 
31ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
320d1f17ecSPekka Enberg 
33ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = {
34ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
35ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
36ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
37ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
38ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
39ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
40ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
41ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
42ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
43ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
44ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
45ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
46ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
47ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
48ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
49ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
50ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
51ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
5263e158a0SMatt Evans #ifdef CONFIG_PPC64
5363e158a0SMatt Evans 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_PAPR_HCALL),
5463e158a0SMatt Evans #endif
559b1fb1c3SPekka Enberg };
569b1fb1c3SPekka Enberg 
574298ddadSSasha Levin extern struct kvm *kvm;
58e2077857SMatt Evans extern struct kvm_cpu **kvm_cpus;
594298ddadSSasha Levin static int pause_event;
604298ddadSSasha Levin static DEFINE_MUTEX(pause_lock);
61af7b0868SMatt Evans extern struct kvm_ext kvm_req_ext[];
624298ddadSSasha Levin 
639667701cSPekka Enberg static char kvm_dir[PATH_MAX];
649667701cSPekka Enberg 
659667701cSPekka Enberg static void set_dir(const char *fmt, va_list args)
669667701cSPekka Enberg {
67dd188f9fSPekka Enberg 	char tmp[PATH_MAX];
68dd188f9fSPekka Enberg 
69dd188f9fSPekka Enberg 	vsnprintf(tmp, sizeof(tmp), fmt, args);
70dd188f9fSPekka Enberg 
712bc995fbSPekka Enberg 	mkdir(tmp, 0777);
722bc995fbSPekka Enberg 
73dd188f9fSPekka Enberg 	if (!realpath(tmp, kvm_dir))
74dd188f9fSPekka Enberg 		die("Unable to set KVM tool directory");
75f76a3285SPekka Enberg 
76f76a3285SPekka Enberg 	strcat(kvm_dir, "/");
779667701cSPekka Enberg }
789667701cSPekka Enberg 
799667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...)
809667701cSPekka Enberg {
819667701cSPekka Enberg 	va_list args;
829667701cSPekka Enberg 
839667701cSPekka Enberg 	va_start(args, fmt);
849667701cSPekka Enberg 	set_dir(fmt, args);
859667701cSPekka Enberg 	va_end(args);
869667701cSPekka Enberg }
879667701cSPekka Enberg 
889667701cSPekka Enberg const char *kvm__get_dir(void)
899667701cSPekka Enberg {
909667701cSPekka Enberg 	return kvm_dir;
919667701cSPekka Enberg }
929667701cSPekka Enberg 
931d6fb3f2SSasha Levin bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
94b8f6afcdSPekka Enberg {
9528fa19c0SPekka Enberg 	int ret;
96b8f6afcdSPekka Enberg 
9743835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
984076b041SPekka Enberg 	if (ret < 0)
994076b041SPekka Enberg 		return false;
1004076b041SPekka Enberg 
1014076b041SPekka Enberg 	return ret;
1024076b041SPekka Enberg }
1034076b041SPekka Enberg 
10443835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm)
10555e19624SCyrill Gorcunov {
10655e19624SCyrill Gorcunov 	unsigned int i;
10755e19624SCyrill Gorcunov 
108af7b0868SMatt Evans 	for (i = 0; ; i++) {
109af7b0868SMatt Evans 		if (!kvm_req_ext[i].name)
110af7b0868SMatt Evans 			break;
11143835ac9SSasha Levin 		if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
1124542f276SCyrill Gorcunov 			pr_error("Unsuppored KVM extension detected: %s",
11355e19624SCyrill Gorcunov 				kvm_req_ext[i].name);
11455e19624SCyrill Gorcunov 			return (int)-i;
11555e19624SCyrill Gorcunov 		}
11655e19624SCyrill Gorcunov 	}
11755e19624SCyrill Gorcunov 
11855e19624SCyrill Gorcunov 	return 0;
11955e19624SCyrill Gorcunov }
12055e19624SCyrill Gorcunov 
1214076b041SPekka Enberg static struct kvm *kvm__new(void)
1224076b041SPekka Enberg {
12343835ac9SSasha Levin 	struct kvm *kvm = calloc(1, sizeof *kvm);
1244076b041SPekka Enberg 
12543835ac9SSasha Levin 	if (!kvm)
1264076b041SPekka Enberg 		die("out of memory");
1274076b041SPekka Enberg 
12843835ac9SSasha Levin 	return kvm;
1294076b041SPekka Enberg }
1304076b041SPekka Enberg 
1314b1addaeSSasha Levin static int kvm__create_socket(struct kvm *kvm)
1325358b0e6SSasha Levin {
1334b1addaeSSasha Levin 	char full_name[PATH_MAX];
1344b1addaeSSasha Levin 	unsigned int s;
1354b1addaeSSasha Levin 	struct sockaddr_un local;
1364b1addaeSSasha Levin 	int len, r;
1375358b0e6SSasha Levin 
1385358b0e6SSasha Levin 	if (!kvm->name)
1394b1addaeSSasha Levin 		return -1;
1405358b0e6SSasha Levin 
1419667701cSPekka Enberg 	sprintf(full_name, "%s", kvm__get_dir());
142c71efd96SSasha Levin 	mkdir(full_name, 0777);
1434b1addaeSSasha Levin 	sprintf(full_name, "%s/%s.sock", kvm__get_dir(), kvm->name);
144fa0022d2SSasha Levin 	if (access(full_name, F_OK) == 0)
145fa0022d2SSasha Levin 		die("Socket file %s already exist", full_name);
146fa0022d2SSasha Levin 
1474b1addaeSSasha Levin 	s = socket(AF_UNIX, SOCK_STREAM, 0);
1484b1addaeSSasha Levin 	if (s < 0)
1494b1addaeSSasha Levin 		return s;
1504b1addaeSSasha Levin 	local.sun_family = AF_UNIX;
1514b1addaeSSasha Levin 	strcpy(local.sun_path, full_name);
1524b1addaeSSasha Levin 	unlink(local.sun_path);
1534b1addaeSSasha Levin 	len = strlen(local.sun_path) + sizeof(local.sun_family);
1544b1addaeSSasha Levin 	r = bind(s, (struct sockaddr *)&local, len);
1554b1addaeSSasha Levin 	if (r < 0)
1564b1addaeSSasha Levin 		goto fail;
1574b1addaeSSasha Levin 
1584b1addaeSSasha Levin 	r = listen(s, 5);
1594b1addaeSSasha Levin 	if (r < 0)
1604b1addaeSSasha Levin 		goto fail;
1614b1addaeSSasha Levin 
1624b1addaeSSasha Levin 	return s;
1634b1addaeSSasha Levin 
1644b1addaeSSasha Levin fail:
1654b1addaeSSasha Levin 	close(s);
1664b1addaeSSasha Levin 	return -1;
1675358b0e6SSasha Levin }
1685358b0e6SSasha Levin 
1694b1addaeSSasha Levin void kvm__remove_socket(const char *name)
1705358b0e6SSasha Levin {
1715358b0e6SSasha Levin 	char full_name[PATH_MAX];
1725358b0e6SSasha Levin 
1734b1addaeSSasha Levin 	sprintf(full_name, "%s/%s.sock", kvm__get_dir(), name);
1745358b0e6SSasha Levin 	unlink(full_name);
1755358b0e6SSasha Levin }
1765358b0e6SSasha Levin 
1774b1addaeSSasha Levin int kvm__get_sock_by_instance(const char *name)
1785358b0e6SSasha Levin {
1794b1addaeSSasha Levin 	int s, len, r;
1804b1addaeSSasha Levin 	char sock_file[PATH_MAX];
1814b1addaeSSasha Levin 	struct sockaddr_un local;
1825358b0e6SSasha Levin 
1834b1addaeSSasha Levin 	sprintf(sock_file, "%s/%s.sock", kvm__get_dir(), name);
1844b1addaeSSasha Levin 	s = socket(AF_UNIX, SOCK_STREAM, 0);
1855358b0e6SSasha Levin 
1864b1addaeSSasha Levin 	local.sun_family = AF_UNIX;
1874b1addaeSSasha Levin 	strcpy(local.sun_path, sock_file);
1884b1addaeSSasha Levin 	len = strlen(local.sun_path) + sizeof(local.sun_family);
1895358b0e6SSasha Levin 
1904b1addaeSSasha Levin 	r = connect(s, &local, len);
191e3e9e392SSasha Levin 	if (r < 0 && errno == ECONNREFUSED) {
192e3e9e392SSasha Levin 		/* Clean ghost socket file */
193e3e9e392SSasha Levin 		unlink(sock_file);
194e3e9e392SSasha Levin 		return -1;
195e3e9e392SSasha Levin 	} else if (r < 0) {
1964b1addaeSSasha Levin 		die("Failed connecting to instance");
197e3e9e392SSasha Levin 	}
1985358b0e6SSasha Levin 
1994b1addaeSSasha Levin 	return s;
2005358b0e6SSasha Levin }
2015358b0e6SSasha Levin 
2024b1addaeSSasha Levin int kvm__enumerate_instances(int (*callback)(const char *name, int fd))
20363bc8503SSasha Levin {
20463bc8503SSasha Levin 	char full_name[PATH_MAX];
2054b1addaeSSasha Levin 	int sock;
20663bc8503SSasha Levin 	DIR *dir;
20763bc8503SSasha Levin 	struct dirent entry, *result;
208886af5f2SLiming Wang 	int ret = 0;
20963bc8503SSasha Levin 
2109667701cSPekka Enberg 	sprintf(full_name, "%s", kvm__get_dir());
21163bc8503SSasha Levin 	dir = opendir(full_name);
212*3d7f121cSLai Jiangshan 	if (!dir)
213*3d7f121cSLai Jiangshan 		return -1;
21463bc8503SSasha Levin 
215*3d7f121cSLai Jiangshan 	for (;;) {
21663bc8503SSasha Levin 		readdir_r(dir, &entry, &result);
21763bc8503SSasha Levin 		if (result == NULL)
21863bc8503SSasha Levin 			break;
2194b1addaeSSasha Levin 		if (entry.d_type == DT_SOCK) {
2204b1addaeSSasha Levin 			entry.d_name[strlen(entry.d_name)-5] = 0;
2214b1addaeSSasha Levin 			sock = kvm__get_sock_by_instance(entry.d_name);
222e3e9e392SSasha Levin 			if (sock < 0)
223e3e9e392SSasha Levin 				continue;
2244b1addaeSSasha Levin 			ret = callback(entry.d_name, sock);
2254b1addaeSSasha Levin 			close(sock);
226886af5f2SLiming Wang 			if (ret < 0)
227886af5f2SLiming Wang 				break;
22863bc8503SSasha Levin 		}
22963bc8503SSasha Levin 	}
23063bc8503SSasha Levin 
2311a0ef251SSasha Levin 	closedir(dir);
2321a0ef251SSasha Levin 
233886af5f2SLiming Wang 	return ret;
23463bc8503SSasha Levin }
23563bc8503SSasha Levin 
23643835ac9SSasha Levin void kvm__delete(struct kvm *kvm)
2379ef4c68eSPekka Enberg {
23843835ac9SSasha Levin 	kvm__stop_timer(kvm);
239fbfe68b7SSasha Levin 
24043835ac9SSasha Levin 	munmap(kvm->ram_start, kvm->ram_size);
241c733c80bSSasha Levin 	kvm_ipc__stop();
2424b1addaeSSasha Levin 	kvm__remove_socket(kvm->name);
24343835ac9SSasha Levin 	free(kvm);
2449ef4c68eSPekka Enberg }
2459ef4c68eSPekka Enberg 
24696feb589SPekka Enberg /*
24796feb589SPekka Enberg  * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
24896feb589SPekka Enberg  * memory regions to it. Therefore, be careful if you use this function for
24996feb589SPekka Enberg  * registering memory regions for emulating hardware.
25096feb589SPekka Enberg  */
25196feb589SPekka Enberg void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
2524076b041SPekka Enberg {
2532b0e3342SPekka Enberg 	struct kvm_userspace_memory_region mem;
254839051d9SSasha Levin 	int ret;
255839051d9SSasha Levin 
256839051d9SSasha Levin 	mem = (struct kvm_userspace_memory_region) {
25796feb589SPekka Enberg 		.slot			= kvm->mem_slots++,
258874467f8SSasha Levin 		.guest_phys_addr	= guest_phys,
259874467f8SSasha Levin 		.memory_size		= size,
260c4acb611SIngo Molnar 		.userspace_addr		= (unsigned long)userspace_addr,
261839051d9SSasha Levin 	};
262839051d9SSasha Levin 
263874467f8SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
264839051d9SSasha Levin 	if (ret < 0)
265839051d9SSasha Levin 		die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
266839051d9SSasha Levin }
267839051d9SSasha Levin 
2688259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm)
269384922b3SPekka Enberg {
270384922b3SPekka Enberg 	int ret;
271384922b3SPekka Enberg 
27243835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
2738259b8ccSSasha Levin 	if (ret <= 0)
2743b9b691dSMatt Evans 		/*
2753b9b691dSMatt Evans 		 * api.txt states that if KVM_CAP_NR_VCPUS does not exist,
2763b9b691dSMatt Evans 		 * assume 4.
2773b9b691dSMatt Evans 		 */
2783b9b691dSMatt Evans 		return 4;
279384922b3SPekka Enberg 
280384922b3SPekka Enberg 	return ret;
281384922b3SPekka Enberg }
282384922b3SPekka Enberg 
2834b1addaeSSasha Levin static void kvm__pid(int fd, u32 type, u32 len, u8 *msg)
2844b1addaeSSasha Levin {
2854b1addaeSSasha Levin 	pid_t pid = getpid();
2864b1addaeSSasha Levin 	int r = 0;
2874b1addaeSSasha Levin 
2884b1addaeSSasha Levin 	if (type == KVM_IPC_PID)
2894b1addaeSSasha Levin 		r = write(fd, &pid, sizeof(pid));
2904b1addaeSSasha Levin 
2914b1addaeSSasha Levin 	if (r < 0)
2924b1addaeSSasha Levin 		pr_warning("Failed sending PID");
2934b1addaeSSasha Levin }
2944b1addaeSSasha Levin 
2958259b8ccSSasha Levin /*
2968259b8ccSSasha Levin  * The following hack should be removed once 'x86: Raise the hard
2978259b8ccSSasha Levin  * VCPU count limit' makes it's way into the mainline.
2988259b8ccSSasha Levin  */
2998259b8ccSSasha Levin #ifndef KVM_CAP_MAX_VCPUS
3008259b8ccSSasha Levin #define KVM_CAP_MAX_VCPUS 66
3018259b8ccSSasha Levin #endif
3028259b8ccSSasha Levin 
3038259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm)
3048259b8ccSSasha Levin {
3058259b8ccSSasha Levin 	int ret;
3068259b8ccSSasha Levin 
3078259b8ccSSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS);
3088259b8ccSSasha Levin 	if (ret <= 0)
3098259b8ccSSasha Levin 		ret = kvm__recommended_cpus(kvm);
3108259b8ccSSasha Levin 
3118259b8ccSSasha Levin 	return ret;
3128259b8ccSSasha Levin }
3138259b8ccSSasha Levin 
31461061257SMatt Evans struct kvm *kvm__init(const char *kvm_dev, const char *hugetlbfs_path, u64 ram_size, const char *name)
315839051d9SSasha Levin {
31643835ac9SSasha Levin 	struct kvm *kvm;
3174076b041SPekka Enberg 	int ret;
3184076b041SPekka Enberg 
319af7b0868SMatt Evans 	if (!kvm__arch_cpu_supports_vm())
320c78b8713SAsias He 		die("Your CPU does not support hardware virtualization");
321c78b8713SAsias He 
32243835ac9SSasha Levin 	kvm = kvm__new();
3234076b041SPekka Enberg 
32443835ac9SSasha Levin 	kvm->sys_fd = open(kvm_dev, O_RDWR);
32543835ac9SSasha Levin 	if (kvm->sys_fd < 0) {
3266d7c36ceSPekka Enberg 		if (errno == ENOENT)
327e907b83fSPekka Enberg 			die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
328f8334800SIngo Molnar 		if (errno == ENODEV)
329f8334800SIngo Molnar 			die("'%s' KVM driver not available.\n  # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev);
3306d7c36ceSPekka Enberg 
331f8334800SIngo Molnar 		fprintf(stderr, "  Fatal, could not open %s: ", kvm_dev);
332f8334800SIngo Molnar 		perror(NULL);
333f8334800SIngo Molnar 		exit(1);
3346d7c36ceSPekka Enberg 	}
335b8f6afcdSPekka Enberg 
33643835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
3376c7d8514SPekka Enberg 	if (ret != KVM_API_VERSION)
338f5ab5f67SPekka Enberg 		die_perror("KVM_API_VERSION ioctl");
3396c7d8514SPekka Enberg 
34043835ac9SSasha Levin 	kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
34143835ac9SSasha Levin 	if (kvm->vm_fd < 0)
342f5ab5f67SPekka Enberg 		die_perror("KVM_CREATE_VM ioctl");
34328fa19c0SPekka Enberg 
34443835ac9SSasha Levin 	if (kvm__check_extensions(kvm))
34555e19624SCyrill Gorcunov 		die("A required KVM extention is not supported by OS");
3469687927dSAsias He 
34761061257SMatt Evans 	kvm__arch_init(kvm, kvm_dev, hugetlbfs_path, ram_size, name);
3489687927dSAsias He 
3495358b0e6SSasha Levin 	kvm->name = name;
3505358b0e6SSasha Levin 
3514b1addaeSSasha Levin 	kvm_ipc__start(kvm__create_socket(kvm));
3524b1addaeSSasha Levin 	kvm_ipc__register_handler(KVM_IPC_PID, kvm__pid);
35343835ac9SSasha Levin 	return kvm;
3544076b041SPekka Enberg }
3554076b041SPekka Enberg 
35672811558SPekka Enberg /* RFC 1952 */
35772811558SPekka Enberg #define GZIP_ID1		0x1f
35872811558SPekka Enberg #define GZIP_ID2		0x8b
359663ce1dfSMatt Evans #define CPIO_MAGIC		"0707"
360663ce1dfSMatt Evans /* initrd may be gzipped, or a plain cpio */
36172811558SPekka Enberg static bool initrd_check(int fd)
36272811558SPekka Enberg {
363663ce1dfSMatt Evans 	unsigned char id[4];
36472811558SPekka Enberg 
36572811558SPekka Enberg 	if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0)
36672811558SPekka Enberg 		return false;
36772811558SPekka Enberg 
36872811558SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
36972811558SPekka Enberg 		die_perror("lseek");
37072811558SPekka Enberg 
371663ce1dfSMatt Evans 	return (id[0] == GZIP_ID1 && id[1] == GZIP_ID2) ||
372663ce1dfSMatt Evans 		!memcmp(id, CPIO_MAGIC, 4);
37372811558SPekka Enberg }
37472811558SPekka Enberg 
3756d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
37653861c74SJohn Floren 		const char *initrd_filename, const char *kernel_cmdline, u16 vidmode)
377ae1fae34SPekka Enberg {
3787fb218bdSPekka Enberg 	bool ret;
3792065a6f7SCyrill Gorcunov 	int fd_kernel = -1, fd_initrd = -1;
380ae1fae34SPekka Enberg 
3812065a6f7SCyrill Gorcunov 	fd_kernel = open(kernel_filename, O_RDONLY);
3822065a6f7SCyrill Gorcunov 	if (fd_kernel < 0)
3830b62d2bbSPekka Enberg 		die("Unable to open kernel %s", kernel_filename);
384ae1fae34SPekka Enberg 
3852065a6f7SCyrill Gorcunov 	if (initrd_filename) {
3862065a6f7SCyrill Gorcunov 		fd_initrd = open(initrd_filename, O_RDONLY);
3872065a6f7SCyrill Gorcunov 		if (fd_initrd < 0)
3880b62d2bbSPekka Enberg 			die("Unable to open initrd %s", initrd_filename);
38972811558SPekka Enberg 
39072811558SPekka Enberg 		if (!initrd_check(fd_initrd))
39172811558SPekka Enberg 			die("%s is not an initrd", initrd_filename);
3922065a6f7SCyrill Gorcunov 	}
3932065a6f7SCyrill Gorcunov 
39453861c74SJohn Floren 	ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode);
39528972750SCyrill Gorcunov 
396009b0758SPekka Enberg 	if (ret)
397009b0758SPekka Enberg 		goto found_kernel;
398ae1fae34SPekka Enberg 
3994542f276SCyrill Gorcunov 	pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
4000b62d2bbSPekka Enberg 
401604dbd63SMatt Evans 	ret = load_flat_binary(kvm, fd_kernel, fd_initrd, kernel_cmdline);
402604dbd63SMatt Evans 
403009b0758SPekka Enberg 	if (ret)
404009b0758SPekka Enberg 		goto found_kernel;
405009b0758SPekka Enberg 
406604dbd63SMatt Evans 	if (initrd_filename)
407604dbd63SMatt Evans 		close(fd_initrd);
4085a6ac675SSasha Levin 	close(fd_kernel);
4095a6ac675SSasha Levin 
410009b0758SPekka Enberg 	die("%s is not a valid bzImage or flat binary", kernel_filename);
411009b0758SPekka Enberg 
412009b0758SPekka Enberg found_kernel:
413604dbd63SMatt Evans 	if (initrd_filename)
414604dbd63SMatt Evans 		close(fd_initrd);
4155a6ac675SSasha Levin 	close(fd_kernel);
4165a6ac675SSasha Levin 
417ae1fae34SPekka Enberg 	return ret;
418ae1fae34SPekka Enberg }
419ae1fae34SPekka Enberg 
420ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000	/* 1 msec */
421ce79f1caSPekka Enberg 
422ce79f1caSPekka Enberg /*
423ce79f1caSPekka Enberg  * This function sets up a timer that's used to inject interrupts from the
424ce79f1caSPekka Enberg  * userspace hypervisor into the guest at periodical intervals. Please note
425ce79f1caSPekka Enberg  * that clock interrupt, for example, is not handled here.
426ce79f1caSPekka Enberg  */
42743835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm)
428ce79f1caSPekka Enberg {
429ce79f1caSPekka Enberg 	struct itimerspec its;
430ce79f1caSPekka Enberg 	struct sigevent sev;
431ce79f1caSPekka Enberg 
432ce79f1caSPekka Enberg 	memset(&sev, 0, sizeof(struct sigevent));
433ce79f1caSPekka Enberg 	sev.sigev_value.sival_int	= 0;
434c7828731SSasha Levin 	sev.sigev_notify		= SIGEV_THREAD_ID;
435ce79f1caSPekka Enberg 	sev.sigev_signo			= SIGALRM;
436c7828731SSasha Levin 	sev._sigev_un._tid		= syscall(__NR_gettid);
437ce79f1caSPekka Enberg 
43843835ac9SSasha Levin 	if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0)
439ce79f1caSPekka Enberg 		die("timer_create()");
440ce79f1caSPekka Enberg 
441ce79f1caSPekka Enberg 	its.it_value.tv_sec		= TIMER_INTERVAL_NS / 1000000000;
442ce79f1caSPekka Enberg 	its.it_value.tv_nsec		= TIMER_INTERVAL_NS % 1000000000;
443ce79f1caSPekka Enberg 	its.it_interval.tv_sec		= its.it_value.tv_sec;
444ce79f1caSPekka Enberg 	its.it_interval.tv_nsec		= its.it_value.tv_nsec;
445ce79f1caSPekka Enberg 
44643835ac9SSasha Levin 	if (timer_settime(kvm->timerid, 0, &its, NULL) < 0)
447ce79f1caSPekka Enberg 		die("timer_settime()");
448ce79f1caSPekka Enberg }
449ce79f1caSPekka Enberg 
45043835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm)
451fbfe68b7SSasha Levin {
45243835ac9SSasha Levin 	if (kvm->timerid)
45343835ac9SSasha Levin 		if (timer_delete(kvm->timerid) < 0)
454fbfe68b7SSasha Levin 			die("timer_delete()");
455fbfe68b7SSasha Levin 
45643835ac9SSasha Levin 	kvm->timerid = 0;
457fbfe68b7SSasha Levin }
458fbfe68b7SSasha Levin 
45943835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
460090f898eSCyrill Gorcunov {
461090f898eSCyrill Gorcunov 	unsigned char *p;
462090f898eSCyrill Gorcunov 	unsigned long n;
463090f898eSCyrill Gorcunov 
464090f898eSCyrill Gorcunov 	size &= ~7; /* mod 8 */
465090f898eSCyrill Gorcunov 	if (!size)
466090f898eSCyrill Gorcunov 		return;
467090f898eSCyrill Gorcunov 
46843835ac9SSasha Levin 	p = guest_flat_to_host(kvm, addr);
469090f898eSCyrill Gorcunov 
47048cf3877SPekka Enberg 	for (n = 0; n < size; n += 8) {
47143835ac9SSasha Levin 		if (!host_ptr_in_ram(kvm, p + n))
47248cf3877SPekka Enberg 			break;
47348cf3877SPekka Enberg 
474090f898eSCyrill Gorcunov 		printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
475090f898eSCyrill Gorcunov 			addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
476090f898eSCyrill Gorcunov 				  p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
477090f898eSCyrill Gorcunov 	}
47848cf3877SPekka Enberg }
4794298ddadSSasha Levin 
4804298ddadSSasha Levin void kvm__pause(void)
4814298ddadSSasha Levin {
4824298ddadSSasha Levin 	int i, paused_vcpus = 0;
4834298ddadSSasha Levin 
4844298ddadSSasha Levin 	/* Check if the guest is running */
4854298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
4864298ddadSSasha Levin 		return;
4874298ddadSSasha Levin 
4884298ddadSSasha Levin 	mutex_lock(&pause_lock);
4894298ddadSSasha Levin 
4904298ddadSSasha Levin 	pause_event = eventfd(0, 0);
4914298ddadSSasha Levin 	if (pause_event < 0)
4924298ddadSSasha Levin 		die("Failed creating pause notification event");
4934298ddadSSasha Levin 	for (i = 0; i < kvm->nrcpus; i++)
4944298ddadSSasha Levin 		pthread_kill(kvm_cpus[i]->thread, SIGKVMPAUSE);
4954298ddadSSasha Levin 
4964298ddadSSasha Levin 	while (paused_vcpus < kvm->nrcpus) {
4974298ddadSSasha Levin 		u64 cur_read;
4984298ddadSSasha Levin 
4994298ddadSSasha Levin 		if (read(pause_event, &cur_read, sizeof(cur_read)) < 0)
5004298ddadSSasha Levin 			die("Failed reading pause event");
5014298ddadSSasha Levin 		paused_vcpus += cur_read;
5024298ddadSSasha Levin 	}
5034298ddadSSasha Levin 	close(pause_event);
5044298ddadSSasha Levin }
5054298ddadSSasha Levin 
5064298ddadSSasha Levin void kvm__continue(void)
5074298ddadSSasha Levin {
5084298ddadSSasha Levin 	/* Check if the guest is running */
5094298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
5104298ddadSSasha Levin 		return;
5114298ddadSSasha Levin 
5124298ddadSSasha Levin 	mutex_unlock(&pause_lock);
5134298ddadSSasha Levin }
5144298ddadSSasha Levin 
5154298ddadSSasha Levin void kvm__notify_paused(void)
5164298ddadSSasha Levin {
5174298ddadSSasha Levin 	u64 p = 1;
5184298ddadSSasha Levin 
5194298ddadSSasha Levin 	if (write(pause_event, &p, sizeof(p)) < 0)
5204298ddadSSasha Levin 		die("Failed notifying of paused VCPU.");
5214298ddadSSasha Levin 
5224298ddadSSasha Levin 	mutex_lock(&pause_lock);
5234298ddadSSasha Levin 	mutex_unlock(&pause_lock);
5244298ddadSSasha Levin }
525