xref: /kvmtool/kvm.c (revision 5f5b01446117c865c2ecf31d96eec29578400bf5)
1ae1fae34SPekka Enberg #include "kvm/kvm.h"
272811558SPekka Enberg #include "kvm/read-write.h"
372811558SPekka Enberg #include "kvm/util.h"
424ed52dbSCyrill Gorcunov #include "kvm/strbuf.h"
54298ddadSSasha Levin #include "kvm/mutex.h"
64298ddadSSasha Levin #include "kvm/kvm-cpu.h"
74b1addaeSSasha Levin #include "kvm/kvm-ipc.h"
8eda03319SPekka Enberg 
9d82350d3SWill Deacon #include <linux/kernel.h>
106c7d8514SPekka Enberg #include <linux/kvm.h>
11d82350d3SWill Deacon #include <linux/list.h>
12495fbd4eSSasha Levin #include <linux/err.h>
13f5ab5f67SPekka Enberg 
144b1addaeSSasha Levin #include <sys/un.h>
15e2e876c2SMatt Evans #include <sys/stat.h>
164b1addaeSSasha Levin #include <sys/types.h>
174b1addaeSSasha Levin #include <sys/socket.h>
18ae1fae34SPekka Enberg #include <sys/ioctl.h>
191f9cff23SPekka Enberg #include <sys/mman.h>
202da26a59SPekka Enberg #include <stdbool.h>
2106e41eeaSPekka Enberg #include <limits.h>
22ce79f1caSPekka Enberg #include <signal.h>
23f5ab5f67SPekka Enberg #include <stdarg.h>
24b8f6afcdSPekka Enberg #include <stdlib.h>
25f5ab5f67SPekka Enberg #include <string.h>
260d1f17ecSPekka Enberg #include <unistd.h>
271f9cff23SPekka Enberg #include <stdio.h>
28b8f6afcdSPekka Enberg #include <fcntl.h>
29ce79f1caSPekka Enberg #include <time.h>
304298ddadSSasha Levin #include <sys/eventfd.h>
31c7828731SSasha Levin #include <asm/unistd.h>
3263bc8503SSasha Levin #include <dirent.h>
33b8f6afcdSPekka Enberg 
34ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
350d1f17ecSPekka Enberg 
36ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = {
37ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
38ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
39ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
40ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
41ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
42ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
43ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
44ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
45ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
46ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
47ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
48ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
49ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
50ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
51ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
52ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
53ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
54ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
5563e158a0SMatt Evans #ifdef CONFIG_PPC64
5663e158a0SMatt Evans 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_PAPR_HCALL),
5763e158a0SMatt Evans #endif
589b1fb1c3SPekka Enberg };
599b1fb1c3SPekka Enberg 
604298ddadSSasha Levin static int pause_event;
614298ddadSSasha Levin static DEFINE_MUTEX(pause_lock);
62af7b0868SMatt Evans extern struct kvm_ext kvm_req_ext[];
634298ddadSSasha Levin 
649667701cSPekka Enberg static char kvm_dir[PATH_MAX];
659667701cSPekka Enberg 
66495fbd4eSSasha Levin static int set_dir(const char *fmt, va_list args)
679667701cSPekka Enberg {
68dd188f9fSPekka Enberg 	char tmp[PATH_MAX];
69dd188f9fSPekka Enberg 
70dd188f9fSPekka Enberg 	vsnprintf(tmp, sizeof(tmp), fmt, args);
71dd188f9fSPekka Enberg 
722bc995fbSPekka Enberg 	mkdir(tmp, 0777);
732bc995fbSPekka Enberg 
74dd188f9fSPekka Enberg 	if (!realpath(tmp, kvm_dir))
75495fbd4eSSasha Levin 		return -errno;
76f76a3285SPekka Enberg 
77f76a3285SPekka Enberg 	strcat(kvm_dir, "/");
78495fbd4eSSasha Levin 
79495fbd4eSSasha Levin 	return 0;
809667701cSPekka Enberg }
819667701cSPekka Enberg 
829667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...)
839667701cSPekka Enberg {
849667701cSPekka Enberg 	va_list args;
859667701cSPekka Enberg 
869667701cSPekka Enberg 	va_start(args, fmt);
879667701cSPekka Enberg 	set_dir(fmt, args);
889667701cSPekka Enberg 	va_end(args);
899667701cSPekka Enberg }
909667701cSPekka Enberg 
919667701cSPekka Enberg const char *kvm__get_dir(void)
929667701cSPekka Enberg {
939667701cSPekka Enberg 	return kvm_dir;
949667701cSPekka Enberg }
959667701cSPekka Enberg 
961d6fb3f2SSasha Levin bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
97b8f6afcdSPekka Enberg {
9828fa19c0SPekka Enberg 	int ret;
99b8f6afcdSPekka Enberg 
10043835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
1014076b041SPekka Enberg 	if (ret < 0)
1024076b041SPekka Enberg 		return false;
1034076b041SPekka Enberg 
1044076b041SPekka Enberg 	return ret;
1054076b041SPekka Enberg }
1064076b041SPekka Enberg 
10743835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm)
10855e19624SCyrill Gorcunov {
109495fbd4eSSasha Levin 	int i;
11055e19624SCyrill Gorcunov 
111af7b0868SMatt Evans 	for (i = 0; ; i++) {
112af7b0868SMatt Evans 		if (!kvm_req_ext[i].name)
113af7b0868SMatt Evans 			break;
11443835ac9SSasha Levin 		if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
115599ed2a8SCyrill Gorcunov 			pr_err("Unsuppored KVM extension detected: %s",
11655e19624SCyrill Gorcunov 				kvm_req_ext[i].name);
117495fbd4eSSasha Levin 			return -i;
11855e19624SCyrill Gorcunov 		}
11955e19624SCyrill Gorcunov 	}
12055e19624SCyrill Gorcunov 
12155e19624SCyrill Gorcunov 	return 0;
12255e19624SCyrill Gorcunov }
12355e19624SCyrill Gorcunov 
12447621338SSasha Levin struct kvm *kvm__new(void)
1254076b041SPekka Enberg {
126495fbd4eSSasha Levin 	struct kvm *kvm = calloc(1, sizeof(*kvm));
12743835ac9SSasha Levin 	if (!kvm)
128495fbd4eSSasha Levin 		return ERR_PTR(-ENOMEM);
1294076b041SPekka Enberg 
130d648dbf5SCyrill Gorcunov 	kvm->sys_fd = -1;
131d648dbf5SCyrill Gorcunov 	kvm->vm_fd = -1;
132d648dbf5SCyrill Gorcunov 
13343835ac9SSasha Levin 	return kvm;
1344076b041SPekka Enberg }
1354076b041SPekka Enberg 
136495fbd4eSSasha Levin int kvm__exit(struct kvm *kvm)
1379ef4c68eSPekka Enberg {
138d82350d3SWill Deacon 	struct kvm_mem_bank *bank, *tmp;
139495fbd4eSSasha Levin 
140d82350d3SWill Deacon 	kvm__arch_delete_ram(kvm);
141d82350d3SWill Deacon 
142d82350d3SWill Deacon 	list_for_each_entry_safe(bank, tmp, &kvm->mem_banks, list) {
143d82350d3SWill Deacon 		list_del(&bank->list);
144d82350d3SWill Deacon 		free(bank);
145d82350d3SWill Deacon 	}
146d82350d3SWill Deacon 
147d82350d3SWill Deacon 	free(kvm);
148495fbd4eSSasha Levin 	return 0;
1499ef4c68eSPekka Enberg }
15049a8afd1SSasha Levin core_exit(kvm__exit);
1519ef4c68eSPekka Enberg 
15296feb589SPekka Enberg /*
15396feb589SPekka Enberg  * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
15496feb589SPekka Enberg  * memory regions to it. Therefore, be careful if you use this function for
15596feb589SPekka Enberg  * registering memory regions for emulating hardware.
15696feb589SPekka Enberg  */
157495fbd4eSSasha Levin int kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
1584076b041SPekka Enberg {
1592b0e3342SPekka Enberg 	struct kvm_userspace_memory_region mem;
160d82350d3SWill Deacon 	struct kvm_mem_bank *bank;
161839051d9SSasha Levin 	int ret;
162839051d9SSasha Levin 
163d82350d3SWill Deacon 	bank = malloc(sizeof(*bank));
164d82350d3SWill Deacon 	if (!bank)
165d82350d3SWill Deacon 		return -ENOMEM;
166d82350d3SWill Deacon 
167d82350d3SWill Deacon 	INIT_LIST_HEAD(&bank->list);
168d82350d3SWill Deacon 	bank->guest_phys_addr		= guest_phys;
169d82350d3SWill Deacon 	bank->host_addr			= userspace_addr;
170d82350d3SWill Deacon 	bank->size			= size;
171d82350d3SWill Deacon 
172839051d9SSasha Levin 	mem = (struct kvm_userspace_memory_region) {
17396feb589SPekka Enberg 		.slot			= kvm->mem_slots++,
174874467f8SSasha Levin 		.guest_phys_addr	= guest_phys,
175874467f8SSasha Levin 		.memory_size		= size,
176c4acb611SIngo Molnar 		.userspace_addr		= (unsigned long)userspace_addr,
177839051d9SSasha Levin 	};
178839051d9SSasha Levin 
179874467f8SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
180839051d9SSasha Levin 	if (ret < 0)
181495fbd4eSSasha Levin 		return -errno;
182495fbd4eSSasha Levin 
183d82350d3SWill Deacon 	list_add(&bank->list, &kvm->mem_banks);
184495fbd4eSSasha Levin 	return 0;
185839051d9SSasha Levin }
186839051d9SSasha Levin 
187f412251fSWill Deacon void *guest_flat_to_host(struct kvm *kvm, u64 offset)
188f412251fSWill Deacon {
189f412251fSWill Deacon 	struct kvm_mem_bank *bank;
190f412251fSWill Deacon 
191f412251fSWill Deacon 	list_for_each_entry(bank, &kvm->mem_banks, list) {
192f412251fSWill Deacon 		u64 bank_start = bank->guest_phys_addr;
193f412251fSWill Deacon 		u64 bank_end = bank_start + bank->size;
194f412251fSWill Deacon 
195f412251fSWill Deacon 		if (offset >= bank_start && offset < bank_end)
196f412251fSWill Deacon 			return bank->host_addr + (offset - bank_start);
197f412251fSWill Deacon 	}
198f412251fSWill Deacon 
199f412251fSWill Deacon 	pr_warning("unable to translate guest address 0x%llx to host",
200f412251fSWill Deacon 			(unsigned long long)offset);
201f412251fSWill Deacon 	return NULL;
202f412251fSWill Deacon }
203f412251fSWill Deacon 
2040cb41990SWill Deacon u64 host_to_guest_flat(struct kvm *kvm, void *ptr)
2050cb41990SWill Deacon {
2060cb41990SWill Deacon 	struct kvm_mem_bank *bank;
2070cb41990SWill Deacon 
2080cb41990SWill Deacon 	list_for_each_entry(bank, &kvm->mem_banks, list) {
2090cb41990SWill Deacon 		void *bank_start = bank->host_addr;
2100cb41990SWill Deacon 		void *bank_end = bank_start + bank->size;
2110cb41990SWill Deacon 
2120cb41990SWill Deacon 		if (ptr >= bank_start && ptr < bank_end)
2130cb41990SWill Deacon 			return bank->guest_phys_addr + (ptr - bank_start);
2140cb41990SWill Deacon 	}
2150cb41990SWill Deacon 
2160cb41990SWill Deacon 	pr_warning("unable to translate host address %p to guest", ptr);
2170cb41990SWill Deacon 	return 0;
2180cb41990SWill Deacon }
2190cb41990SWill Deacon 
2208259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm)
221384922b3SPekka Enberg {
222384922b3SPekka Enberg 	int ret;
223384922b3SPekka Enberg 
22443835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
2258259b8ccSSasha Levin 	if (ret <= 0)
2263b9b691dSMatt Evans 		/*
2273b9b691dSMatt Evans 		 * api.txt states that if KVM_CAP_NR_VCPUS does not exist,
2283b9b691dSMatt Evans 		 * assume 4.
2293b9b691dSMatt Evans 		 */
2303b9b691dSMatt Evans 		return 4;
231384922b3SPekka Enberg 
232384922b3SPekka Enberg 	return ret;
233384922b3SPekka Enberg }
234384922b3SPekka Enberg 
2358259b8ccSSasha Levin /*
2368259b8ccSSasha Levin  * The following hack should be removed once 'x86: Raise the hard
2378259b8ccSSasha Levin  * VCPU count limit' makes it's way into the mainline.
2388259b8ccSSasha Levin  */
2398259b8ccSSasha Levin #ifndef KVM_CAP_MAX_VCPUS
2408259b8ccSSasha Levin #define KVM_CAP_MAX_VCPUS 66
2418259b8ccSSasha Levin #endif
2428259b8ccSSasha Levin 
2438259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm)
2448259b8ccSSasha Levin {
2458259b8ccSSasha Levin 	int ret;
2468259b8ccSSasha Levin 
2478259b8ccSSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS);
2488259b8ccSSasha Levin 	if (ret <= 0)
2498259b8ccSSasha Levin 		ret = kvm__recommended_cpus(kvm);
2508259b8ccSSasha Levin 
2518259b8ccSSasha Levin 	return ret;
2528259b8ccSSasha Levin }
2538259b8ccSSasha Levin 
25447621338SSasha Levin int kvm__init(struct kvm *kvm)
255839051d9SSasha Levin {
2564076b041SPekka Enberg 	int ret;
2574076b041SPekka Enberg 
258495fbd4eSSasha Levin 	if (!kvm__arch_cpu_supports_vm()) {
259495fbd4eSSasha Levin 		pr_err("Your CPU does not support hardware virtualization");
2606fce7105SYang Bai 		ret = -ENOSYS;
2616fce7105SYang Bai 		goto err;
262495fbd4eSSasha Levin 	}
263c78b8713SAsias He 
26447621338SSasha Levin 	kvm->sys_fd = open(kvm->cfg.dev, O_RDWR);
26543835ac9SSasha Levin 	if (kvm->sys_fd < 0) {
266d648dbf5SCyrill Gorcunov 		if (errno == ENOENT)
267495fbd4eSSasha Levin 			pr_err("'%s' not found. Please make sure your kernel has CONFIG_KVM "
26847621338SSasha Levin 			       "enabled and that the KVM modules are loaded.", kvm->cfg.dev);
269d648dbf5SCyrill Gorcunov 		else if (errno == ENODEV)
270d648dbf5SCyrill Gorcunov 			pr_err("'%s' KVM driver not available.\n  # (If the KVM "
271495fbd4eSSasha Levin 			       "module is loaded then 'dmesg' may offer further clues "
27247621338SSasha Levin 			       "about the failure.)", kvm->cfg.dev);
273d648dbf5SCyrill Gorcunov 		else
27447621338SSasha Levin 			pr_err("Could not open %s: ", kvm->cfg.dev);
275d648dbf5SCyrill Gorcunov 
276495fbd4eSSasha Levin 		ret = -errno;
277d648dbf5SCyrill Gorcunov 		goto err_free;
2786d7c36ceSPekka Enberg 	}
279b8f6afcdSPekka Enberg 
28043835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
281495fbd4eSSasha Levin 	if (ret != KVM_API_VERSION) {
282495fbd4eSSasha Levin 		pr_err("KVM_API_VERSION ioctl");
283495fbd4eSSasha Levin 		ret = -errno;
284d648dbf5SCyrill Gorcunov 		goto err_sys_fd;
285495fbd4eSSasha Levin 	}
2866c7d8514SPekka Enberg 
28743835ac9SSasha Levin 	kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
288495fbd4eSSasha Levin 	if (kvm->vm_fd < 0) {
289495fbd4eSSasha Levin 		ret = kvm->vm_fd;
290d648dbf5SCyrill Gorcunov 		goto err_sys_fd;
291495fbd4eSSasha Levin 	}
29228fa19c0SPekka Enberg 
293495fbd4eSSasha Levin 	if (kvm__check_extensions(kvm)) {
294*5f5b0144SWill Deacon 		pr_err("A required KVM extension is not supported by OS");
295495fbd4eSSasha Levin 		ret = -ENOSYS;
2966fce7105SYang Bai 		goto err_vm_fd;
297495fbd4eSSasha Levin 	}
2989687927dSAsias He 
29947621338SSasha Levin 	kvm__arch_init(kvm, kvm->cfg.hugetlbfs_path, kvm->cfg.ram_size);
3009687927dSAsias He 
301d82350d3SWill Deacon 	INIT_LIST_HEAD(&kvm->mem_banks);
302abee258bSSasha Levin 	kvm__init_ram(kvm);
303abee258bSSasha Levin 
304084a1356SSasha Levin 	if (!kvm->cfg.firmware_filename) {
305084a1356SSasha Levin 		if (!kvm__load_kernel(kvm, kvm->cfg.kernel_filename,
306ff7ba6faSWill Deacon 				kvm->cfg.initrd_filename, kvm->cfg.real_cmdline))
307084a1356SSasha Levin 			die("unable to load kernel %s", kvm->cfg.kernel_filename);
308084a1356SSasha Levin 	}
309084a1356SSasha Levin 
310084a1356SSasha Levin 	if (kvm->cfg.firmware_filename) {
311084a1356SSasha Levin 		if (!kvm__load_firmware(kvm, kvm->cfg.firmware_filename))
312084a1356SSasha Levin 			die("unable to load firmware image %s: %s", kvm->cfg.firmware_filename, strerror(errno));
313084a1356SSasha Levin 	} else {
314084a1356SSasha Levin 		ret = kvm__arch_setup_firmware(kvm);
315084a1356SSasha Levin 		if (ret < 0)
316084a1356SSasha Levin 			die("kvm__arch_setup_firmware() failed with error %d\n", ret);
317084a1356SSasha Levin 	}
318084a1356SSasha Levin 
31947621338SSasha Levin 	return 0;
320d648dbf5SCyrill Gorcunov 
3216fce7105SYang Bai err_vm_fd:
322495fbd4eSSasha Levin 	close(kvm->vm_fd);
323d648dbf5SCyrill Gorcunov err_sys_fd:
324495fbd4eSSasha Levin 	close(kvm->sys_fd);
325d648dbf5SCyrill Gorcunov err_free:
326495fbd4eSSasha Levin 	free(kvm);
3276fce7105SYang Bai err:
32847621338SSasha Levin 	return ret;
3294076b041SPekka Enberg }
33049a8afd1SSasha Levin core_init(kvm__init);
3314076b041SPekka Enberg 
33272811558SPekka Enberg /* RFC 1952 */
33372811558SPekka Enberg #define GZIP_ID1		0x1f
33472811558SPekka Enberg #define GZIP_ID2		0x8b
335663ce1dfSMatt Evans #define CPIO_MAGIC		"0707"
336663ce1dfSMatt Evans /* initrd may be gzipped, or a plain cpio */
33772811558SPekka Enberg static bool initrd_check(int fd)
33872811558SPekka Enberg {
339663ce1dfSMatt Evans 	unsigned char id[4];
34072811558SPekka Enberg 
34172811558SPekka Enberg 	if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0)
34272811558SPekka Enberg 		return false;
34372811558SPekka Enberg 
34472811558SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
34572811558SPekka Enberg 		die_perror("lseek");
34672811558SPekka Enberg 
347663ce1dfSMatt Evans 	return (id[0] == GZIP_ID1 && id[1] == GZIP_ID2) ||
348663ce1dfSMatt Evans 		!memcmp(id, CPIO_MAGIC, 4);
34972811558SPekka Enberg }
35072811558SPekka Enberg 
3516d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
352ff7ba6faSWill Deacon 		const char *initrd_filename, const char *kernel_cmdline)
353ae1fae34SPekka Enberg {
3547fb218bdSPekka Enberg 	bool ret;
3552065a6f7SCyrill Gorcunov 	int fd_kernel = -1, fd_initrd = -1;
356ae1fae34SPekka Enberg 
3572065a6f7SCyrill Gorcunov 	fd_kernel = open(kernel_filename, O_RDONLY);
3582065a6f7SCyrill Gorcunov 	if (fd_kernel < 0)
3590b62d2bbSPekka Enberg 		die("Unable to open kernel %s", kernel_filename);
360ae1fae34SPekka Enberg 
3612065a6f7SCyrill Gorcunov 	if (initrd_filename) {
3622065a6f7SCyrill Gorcunov 		fd_initrd = open(initrd_filename, O_RDONLY);
3632065a6f7SCyrill Gorcunov 		if (fd_initrd < 0)
3640b62d2bbSPekka Enberg 			die("Unable to open initrd %s", initrd_filename);
36572811558SPekka Enberg 
36672811558SPekka Enberg 		if (!initrd_check(fd_initrd))
36772811558SPekka Enberg 			die("%s is not an initrd", initrd_filename);
3682065a6f7SCyrill Gorcunov 	}
3692065a6f7SCyrill Gorcunov 
370ff7ba6faSWill Deacon 	ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline);
37128972750SCyrill Gorcunov 
372009b0758SPekka Enberg 	if (ret)
373009b0758SPekka Enberg 		goto found_kernel;
374ae1fae34SPekka Enberg 
3754542f276SCyrill Gorcunov 	pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
3760b62d2bbSPekka Enberg 
377604dbd63SMatt Evans 	ret = load_flat_binary(kvm, fd_kernel, fd_initrd, kernel_cmdline);
378604dbd63SMatt Evans 
379009b0758SPekka Enberg 	if (ret)
380009b0758SPekka Enberg 		goto found_kernel;
381009b0758SPekka Enberg 
382604dbd63SMatt Evans 	if (initrd_filename)
383604dbd63SMatt Evans 		close(fd_initrd);
3845a6ac675SSasha Levin 	close(fd_kernel);
3855a6ac675SSasha Levin 
386009b0758SPekka Enberg 	die("%s is not a valid bzImage or flat binary", kernel_filename);
387009b0758SPekka Enberg 
388009b0758SPekka Enberg found_kernel:
389604dbd63SMatt Evans 	if (initrd_filename)
390604dbd63SMatt Evans 		close(fd_initrd);
3915a6ac675SSasha Levin 	close(fd_kernel);
3925a6ac675SSasha Levin 
393ae1fae34SPekka Enberg 	return ret;
394ae1fae34SPekka Enberg }
395ae1fae34SPekka Enberg 
396ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000	/* 1 msec */
397ce79f1caSPekka Enberg 
398ce79f1caSPekka Enberg /*
399ce79f1caSPekka Enberg  * This function sets up a timer that's used to inject interrupts from the
400ce79f1caSPekka Enberg  * userspace hypervisor into the guest at periodical intervals. Please note
401ce79f1caSPekka Enberg  * that clock interrupt, for example, is not handled here.
402ce79f1caSPekka Enberg  */
403b4532ca9SSasha Levin int kvm_timer__init(struct kvm *kvm)
404ce79f1caSPekka Enberg {
405ce79f1caSPekka Enberg 	struct itimerspec its;
406ce79f1caSPekka Enberg 	struct sigevent sev;
407b4532ca9SSasha Levin 	int r;
408ce79f1caSPekka Enberg 
409ce79f1caSPekka Enberg 	memset(&sev, 0, sizeof(struct sigevent));
410ce79f1caSPekka Enberg 	sev.sigev_value.sival_int	= 0;
411c7828731SSasha Levin 	sev.sigev_notify		= SIGEV_THREAD_ID;
412ce79f1caSPekka Enberg 	sev.sigev_signo			= SIGALRM;
4135002444cSSasha Levin 	sev.sigev_value.sival_ptr	= kvm;
414c7828731SSasha Levin 	sev._sigev_un._tid		= syscall(__NR_gettid);
415ce79f1caSPekka Enberg 
416b4532ca9SSasha Levin 	r = timer_create(CLOCK_REALTIME, &sev, &kvm->timerid);
417b4532ca9SSasha Levin 	if (r < 0)
418b4532ca9SSasha Levin 		return r;
419ce79f1caSPekka Enberg 
420ce79f1caSPekka Enberg 	its.it_value.tv_sec		= TIMER_INTERVAL_NS / 1000000000;
421ce79f1caSPekka Enberg 	its.it_value.tv_nsec		= TIMER_INTERVAL_NS % 1000000000;
422ce79f1caSPekka Enberg 	its.it_interval.tv_sec		= its.it_value.tv_sec;
423ce79f1caSPekka Enberg 	its.it_interval.tv_nsec		= its.it_value.tv_nsec;
424ce79f1caSPekka Enberg 
425b4532ca9SSasha Levin 	r = timer_settime(kvm->timerid, 0, &its, NULL);
426b4532ca9SSasha Levin 	if (r < 0) {
427b4532ca9SSasha Levin 		timer_delete(kvm->timerid);
428b4532ca9SSasha Levin 		return r;
429ce79f1caSPekka Enberg 	}
430ce79f1caSPekka Enberg 
431b4532ca9SSasha Levin 	return 0;
432b4532ca9SSasha Levin }
43349a8afd1SSasha Levin firmware_init(kvm_timer__init);
434b4532ca9SSasha Levin 
435b4532ca9SSasha Levin int kvm_timer__exit(struct kvm *kvm)
436fbfe68b7SSasha Levin {
43743835ac9SSasha Levin 	if (kvm->timerid)
43843835ac9SSasha Levin 		if (timer_delete(kvm->timerid) < 0)
439fbfe68b7SSasha Levin 			die("timer_delete()");
440fbfe68b7SSasha Levin 
44143835ac9SSasha Levin 	kvm->timerid = 0;
442b4532ca9SSasha Levin 
443b4532ca9SSasha Levin 	return 0;
444fbfe68b7SSasha Levin }
44549a8afd1SSasha Levin firmware_exit(kvm_timer__exit);
446fbfe68b7SSasha Levin 
44743835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
448090f898eSCyrill Gorcunov {
449090f898eSCyrill Gorcunov 	unsigned char *p;
450090f898eSCyrill Gorcunov 	unsigned long n;
451090f898eSCyrill Gorcunov 
452090f898eSCyrill Gorcunov 	size &= ~7; /* mod 8 */
453090f898eSCyrill Gorcunov 	if (!size)
454090f898eSCyrill Gorcunov 		return;
455090f898eSCyrill Gorcunov 
45643835ac9SSasha Levin 	p = guest_flat_to_host(kvm, addr);
457090f898eSCyrill Gorcunov 
45848cf3877SPekka Enberg 	for (n = 0; n < size; n += 8) {
45943835ac9SSasha Levin 		if (!host_ptr_in_ram(kvm, p + n))
46048cf3877SPekka Enberg 			break;
46148cf3877SPekka Enberg 
462090f898eSCyrill Gorcunov 		printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
463090f898eSCyrill Gorcunov 			addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
464090f898eSCyrill Gorcunov 				  p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
465090f898eSCyrill Gorcunov 	}
46648cf3877SPekka Enberg }
4674298ddadSSasha Levin 
4684346fd8fSSasha Levin void kvm__pause(struct kvm *kvm)
4694298ddadSSasha Levin {
4704298ddadSSasha Levin 	int i, paused_vcpus = 0;
4714298ddadSSasha Levin 
4724298ddadSSasha Levin 	/* Check if the guest is running */
473df4239fbSSasha Levin 	if (!kvm->cpus[0] || kvm->cpus[0]->thread == 0)
4744298ddadSSasha Levin 		return;
4754298ddadSSasha Levin 
4764298ddadSSasha Levin 	mutex_lock(&pause_lock);
4774298ddadSSasha Levin 
4784298ddadSSasha Levin 	pause_event = eventfd(0, 0);
4794298ddadSSasha Levin 	if (pause_event < 0)
4804298ddadSSasha Levin 		die("Failed creating pause notification event");
4814298ddadSSasha Levin 	for (i = 0; i < kvm->nrcpus; i++)
482df4239fbSSasha Levin 		pthread_kill(kvm->cpus[i]->thread, SIGKVMPAUSE);
4834298ddadSSasha Levin 
4844298ddadSSasha Levin 	while (paused_vcpus < kvm->nrcpus) {
4854298ddadSSasha Levin 		u64 cur_read;
4864298ddadSSasha Levin 
4874298ddadSSasha Levin 		if (read(pause_event, &cur_read, sizeof(cur_read)) < 0)
4884298ddadSSasha Levin 			die("Failed reading pause event");
4894298ddadSSasha Levin 		paused_vcpus += cur_read;
4904298ddadSSasha Levin 	}
4914298ddadSSasha Levin 	close(pause_event);
4924298ddadSSasha Levin }
4934298ddadSSasha Levin 
4944346fd8fSSasha Levin void kvm__continue(struct kvm *kvm)
4954298ddadSSasha Levin {
4964298ddadSSasha Levin 	/* Check if the guest is running */
497df4239fbSSasha Levin 	if (!kvm->cpus[0] || kvm->cpus[0]->thread == 0)
4984298ddadSSasha Levin 		return;
4994298ddadSSasha Levin 
5004298ddadSSasha Levin 	mutex_unlock(&pause_lock);
5014298ddadSSasha Levin }
5024298ddadSSasha Levin 
5034298ddadSSasha Levin void kvm__notify_paused(void)
5044298ddadSSasha Levin {
5054298ddadSSasha Levin 	u64 p = 1;
5064298ddadSSasha Levin 
5074298ddadSSasha Levin 	if (write(pause_event, &p, sizeof(p)) < 0)
5084298ddadSSasha Levin 		die("Failed notifying of paused VCPU.");
5094298ddadSSasha Levin 
5104298ddadSSasha Levin 	mutex_lock(&pause_lock);
5114298ddadSSasha Levin 	mutex_unlock(&pause_lock);
5124298ddadSSasha Levin }
513