xref: /kvmtool/kvm.c (revision 4346fd8f1ef04040775965c8476be61ad8e15643)
1ae1fae34SPekka Enberg #include "kvm/kvm.h"
272811558SPekka Enberg #include "kvm/read-write.h"
372811558SPekka Enberg #include "kvm/util.h"
424ed52dbSCyrill Gorcunov #include "kvm/strbuf.h"
54298ddadSSasha Levin #include "kvm/mutex.h"
64298ddadSSasha Levin #include "kvm/kvm-cpu.h"
74b1addaeSSasha Levin #include "kvm/kvm-ipc.h"
8eda03319SPekka Enberg 
96c7d8514SPekka Enberg #include <linux/kvm.h>
10495fbd4eSSasha Levin #include <linux/err.h>
11f5ab5f67SPekka Enberg 
124b1addaeSSasha Levin #include <sys/un.h>
13e2e876c2SMatt Evans #include <sys/stat.h>
144b1addaeSSasha Levin #include <sys/types.h>
154b1addaeSSasha Levin #include <sys/socket.h>
16ae1fae34SPekka Enberg #include <sys/ioctl.h>
171f9cff23SPekka Enberg #include <sys/mman.h>
182da26a59SPekka Enberg #include <stdbool.h>
1906e41eeaSPekka Enberg #include <limits.h>
20ce79f1caSPekka Enberg #include <signal.h>
21f5ab5f67SPekka Enberg #include <stdarg.h>
22b8f6afcdSPekka Enberg #include <stdlib.h>
23f5ab5f67SPekka Enberg #include <string.h>
240d1f17ecSPekka Enberg #include <unistd.h>
251f9cff23SPekka Enberg #include <stdio.h>
26b8f6afcdSPekka Enberg #include <fcntl.h>
27ce79f1caSPekka Enberg #include <time.h>
284298ddadSSasha Levin #include <sys/eventfd.h>
29c7828731SSasha Levin #include <asm/unistd.h>
3063bc8503SSasha Levin #include <dirent.h>
31b8f6afcdSPekka Enberg 
32ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
330d1f17ecSPekka Enberg 
34ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = {
35ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
36ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
37ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
38ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
39ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
40ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
41ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
42ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
43ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
44ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
45ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
46ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
47ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
48ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
49ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
50ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
51ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
52ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
5363e158a0SMatt Evans #ifdef CONFIG_PPC64
5463e158a0SMatt Evans 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_PAPR_HCALL),
5563e158a0SMatt Evans #endif
569b1fb1c3SPekka Enberg };
579b1fb1c3SPekka Enberg 
584298ddadSSasha Levin static int pause_event;
594298ddadSSasha Levin static DEFINE_MUTEX(pause_lock);
60af7b0868SMatt Evans extern struct kvm_ext kvm_req_ext[];
614298ddadSSasha Levin 
629667701cSPekka Enberg static char kvm_dir[PATH_MAX];
639667701cSPekka Enberg 
64495fbd4eSSasha Levin static int set_dir(const char *fmt, va_list args)
659667701cSPekka Enberg {
66dd188f9fSPekka Enberg 	char tmp[PATH_MAX];
67dd188f9fSPekka Enberg 
68dd188f9fSPekka Enberg 	vsnprintf(tmp, sizeof(tmp), fmt, args);
69dd188f9fSPekka Enberg 
702bc995fbSPekka Enberg 	mkdir(tmp, 0777);
712bc995fbSPekka Enberg 
72dd188f9fSPekka Enberg 	if (!realpath(tmp, kvm_dir))
73495fbd4eSSasha Levin 		return -errno;
74f76a3285SPekka Enberg 
75f76a3285SPekka Enberg 	strcat(kvm_dir, "/");
76495fbd4eSSasha Levin 
77495fbd4eSSasha Levin 	return 0;
789667701cSPekka Enberg }
799667701cSPekka Enberg 
809667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...)
819667701cSPekka Enberg {
829667701cSPekka Enberg 	va_list args;
839667701cSPekka Enberg 
849667701cSPekka Enberg 	va_start(args, fmt);
859667701cSPekka Enberg 	set_dir(fmt, args);
869667701cSPekka Enberg 	va_end(args);
879667701cSPekka Enberg }
889667701cSPekka Enberg 
899667701cSPekka Enberg const char *kvm__get_dir(void)
909667701cSPekka Enberg {
919667701cSPekka Enberg 	return kvm_dir;
929667701cSPekka Enberg }
939667701cSPekka Enberg 
941d6fb3f2SSasha Levin bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
95b8f6afcdSPekka Enberg {
9628fa19c0SPekka Enberg 	int ret;
97b8f6afcdSPekka Enberg 
9843835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
994076b041SPekka Enberg 	if (ret < 0)
1004076b041SPekka Enberg 		return false;
1014076b041SPekka Enberg 
1024076b041SPekka Enberg 	return ret;
1034076b041SPekka Enberg }
1044076b041SPekka Enberg 
10543835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm)
10655e19624SCyrill Gorcunov {
107495fbd4eSSasha Levin 	int i;
10855e19624SCyrill Gorcunov 
109af7b0868SMatt Evans 	for (i = 0; ; i++) {
110af7b0868SMatt Evans 		if (!kvm_req_ext[i].name)
111af7b0868SMatt Evans 			break;
11243835ac9SSasha Levin 		if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
113599ed2a8SCyrill Gorcunov 			pr_err("Unsuppored KVM extension detected: %s",
11455e19624SCyrill Gorcunov 				kvm_req_ext[i].name);
115495fbd4eSSasha Levin 			return -i;
11655e19624SCyrill Gorcunov 		}
11755e19624SCyrill Gorcunov 	}
11855e19624SCyrill Gorcunov 
11955e19624SCyrill Gorcunov 	return 0;
12055e19624SCyrill Gorcunov }
12155e19624SCyrill Gorcunov 
12247621338SSasha Levin struct kvm *kvm__new(void)
1234076b041SPekka Enberg {
124495fbd4eSSasha Levin 	struct kvm *kvm = calloc(1, sizeof(*kvm));
12543835ac9SSasha Levin 	if (!kvm)
126495fbd4eSSasha Levin 		return ERR_PTR(-ENOMEM);
1274076b041SPekka Enberg 
128d648dbf5SCyrill Gorcunov 	kvm->sys_fd = -1;
129d648dbf5SCyrill Gorcunov 	kvm->vm_fd = -1;
130d648dbf5SCyrill Gorcunov 
13143835ac9SSasha Levin 	return kvm;
1324076b041SPekka Enberg }
1334076b041SPekka Enberg 
134495fbd4eSSasha Levin int kvm__exit(struct kvm *kvm)
1359ef4c68eSPekka Enberg {
136e56e2de7SLai Jiangshan 	kvm__arch_delete_ram(kvm);
13743835ac9SSasha Levin 	free(kvm);
138495fbd4eSSasha Levin 
139495fbd4eSSasha Levin 	return 0;
1409ef4c68eSPekka Enberg }
14149a8afd1SSasha Levin core_exit(kvm__exit);
1429ef4c68eSPekka Enberg 
14396feb589SPekka Enberg /*
14496feb589SPekka Enberg  * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
14596feb589SPekka Enberg  * memory regions to it. Therefore, be careful if you use this function for
14696feb589SPekka Enberg  * registering memory regions for emulating hardware.
14796feb589SPekka Enberg  */
148495fbd4eSSasha Levin int kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
1494076b041SPekka Enberg {
1502b0e3342SPekka Enberg 	struct kvm_userspace_memory_region mem;
151839051d9SSasha Levin 	int ret;
152839051d9SSasha Levin 
153839051d9SSasha Levin 	mem = (struct kvm_userspace_memory_region) {
15496feb589SPekka Enberg 		.slot			= kvm->mem_slots++,
155874467f8SSasha Levin 		.guest_phys_addr	= guest_phys,
156874467f8SSasha Levin 		.memory_size		= size,
157c4acb611SIngo Molnar 		.userspace_addr		= (unsigned long)userspace_addr,
158839051d9SSasha Levin 	};
159839051d9SSasha Levin 
160874467f8SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
161839051d9SSasha Levin 	if (ret < 0)
162495fbd4eSSasha Levin 		return -errno;
163495fbd4eSSasha Levin 
164495fbd4eSSasha Levin 	return 0;
165839051d9SSasha Levin }
166839051d9SSasha Levin 
1678259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm)
168384922b3SPekka Enberg {
169384922b3SPekka Enberg 	int ret;
170384922b3SPekka Enberg 
17143835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
1728259b8ccSSasha Levin 	if (ret <= 0)
1733b9b691dSMatt Evans 		/*
1743b9b691dSMatt Evans 		 * api.txt states that if KVM_CAP_NR_VCPUS does not exist,
1753b9b691dSMatt Evans 		 * assume 4.
1763b9b691dSMatt Evans 		 */
1773b9b691dSMatt Evans 		return 4;
178384922b3SPekka Enberg 
179384922b3SPekka Enberg 	return ret;
180384922b3SPekka Enberg }
181384922b3SPekka Enberg 
1828259b8ccSSasha Levin /*
1838259b8ccSSasha Levin  * The following hack should be removed once 'x86: Raise the hard
1848259b8ccSSasha Levin  * VCPU count limit' makes it's way into the mainline.
1858259b8ccSSasha Levin  */
1868259b8ccSSasha Levin #ifndef KVM_CAP_MAX_VCPUS
1878259b8ccSSasha Levin #define KVM_CAP_MAX_VCPUS 66
1888259b8ccSSasha Levin #endif
1898259b8ccSSasha Levin 
1908259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm)
1918259b8ccSSasha Levin {
1928259b8ccSSasha Levin 	int ret;
1938259b8ccSSasha Levin 
1948259b8ccSSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS);
1958259b8ccSSasha Levin 	if (ret <= 0)
1968259b8ccSSasha Levin 		ret = kvm__recommended_cpus(kvm);
1978259b8ccSSasha Levin 
1988259b8ccSSasha Levin 	return ret;
1998259b8ccSSasha Levin }
2008259b8ccSSasha Levin 
20147621338SSasha Levin int kvm__init(struct kvm *kvm)
202839051d9SSasha Levin {
2034076b041SPekka Enberg 	int ret;
2044076b041SPekka Enberg 
205495fbd4eSSasha Levin 	if (!kvm__arch_cpu_supports_vm()) {
206495fbd4eSSasha Levin 		pr_err("Your CPU does not support hardware virtualization");
2076fce7105SYang Bai 		ret = -ENOSYS;
2086fce7105SYang Bai 		goto err;
209495fbd4eSSasha Levin 	}
210c78b8713SAsias He 
21147621338SSasha Levin 	kvm->sys_fd = open(kvm->cfg.dev, O_RDWR);
21243835ac9SSasha Levin 	if (kvm->sys_fd < 0) {
213d648dbf5SCyrill Gorcunov 		if (errno == ENOENT)
214495fbd4eSSasha Levin 			pr_err("'%s' not found. Please make sure your kernel has CONFIG_KVM "
21547621338SSasha Levin 			       "enabled and that the KVM modules are loaded.", kvm->cfg.dev);
216d648dbf5SCyrill Gorcunov 		else if (errno == ENODEV)
217d648dbf5SCyrill Gorcunov 			pr_err("'%s' KVM driver not available.\n  # (If the KVM "
218495fbd4eSSasha Levin 			       "module is loaded then 'dmesg' may offer further clues "
21947621338SSasha Levin 			       "about the failure.)", kvm->cfg.dev);
220d648dbf5SCyrill Gorcunov 		else
22147621338SSasha Levin 			pr_err("Could not open %s: ", kvm->cfg.dev);
222d648dbf5SCyrill Gorcunov 
223495fbd4eSSasha Levin 		ret = -errno;
224d648dbf5SCyrill Gorcunov 		goto err_free;
2256d7c36ceSPekka Enberg 	}
226b8f6afcdSPekka Enberg 
22743835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
228495fbd4eSSasha Levin 	if (ret != KVM_API_VERSION) {
229495fbd4eSSasha Levin 		pr_err("KVM_API_VERSION ioctl");
230495fbd4eSSasha Levin 		ret = -errno;
231d648dbf5SCyrill Gorcunov 		goto err_sys_fd;
232495fbd4eSSasha Levin 	}
2336c7d8514SPekka Enberg 
23443835ac9SSasha Levin 	kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
235495fbd4eSSasha Levin 	if (kvm->vm_fd < 0) {
236495fbd4eSSasha Levin 		ret = kvm->vm_fd;
237d648dbf5SCyrill Gorcunov 		goto err_sys_fd;
238495fbd4eSSasha Levin 	}
23928fa19c0SPekka Enberg 
240495fbd4eSSasha Levin 	if (kvm__check_extensions(kvm)) {
241495fbd4eSSasha Levin 		pr_err("A required KVM extention is not supported by OS");
242495fbd4eSSasha Levin 		ret = -ENOSYS;
2436fce7105SYang Bai 		goto err_vm_fd;
244495fbd4eSSasha Levin 	}
2459687927dSAsias He 
24647621338SSasha Levin 	kvm__arch_init(kvm, kvm->cfg.hugetlbfs_path, kvm->cfg.ram_size);
2479687927dSAsias He 
248abee258bSSasha Levin 	kvm__init_ram(kvm);
249abee258bSSasha Levin 
250084a1356SSasha Levin 	if (!kvm->cfg.firmware_filename) {
251084a1356SSasha Levin 		if (!kvm__load_kernel(kvm, kvm->cfg.kernel_filename,
252084a1356SSasha Levin 				kvm->cfg.initrd_filename, kvm->cfg.real_cmdline, kvm->cfg.vidmode))
253084a1356SSasha Levin 			die("unable to load kernel %s", kvm->cfg.kernel_filename);
254084a1356SSasha Levin 	}
255084a1356SSasha Levin 
256084a1356SSasha Levin 	if (kvm->cfg.firmware_filename) {
257084a1356SSasha Levin 		if (!kvm__load_firmware(kvm, kvm->cfg.firmware_filename))
258084a1356SSasha Levin 			die("unable to load firmware image %s: %s", kvm->cfg.firmware_filename, strerror(errno));
259084a1356SSasha Levin 	} else {
260084a1356SSasha Levin 		ret = kvm__arch_setup_firmware(kvm);
261084a1356SSasha Levin 		if (ret < 0)
262084a1356SSasha Levin 			die("kvm__arch_setup_firmware() failed with error %d\n", ret);
263084a1356SSasha Levin 	}
264084a1356SSasha Levin 
26547621338SSasha Levin 	return 0;
266d648dbf5SCyrill Gorcunov 
2676fce7105SYang Bai err_vm_fd:
268495fbd4eSSasha Levin 	close(kvm->vm_fd);
269d648dbf5SCyrill Gorcunov err_sys_fd:
270495fbd4eSSasha Levin 	close(kvm->sys_fd);
271d648dbf5SCyrill Gorcunov err_free:
272495fbd4eSSasha Levin 	free(kvm);
2736fce7105SYang Bai err:
27447621338SSasha Levin 	return ret;
2754076b041SPekka Enberg }
27649a8afd1SSasha Levin core_init(kvm__init);
2774076b041SPekka Enberg 
27872811558SPekka Enberg /* RFC 1952 */
27972811558SPekka Enberg #define GZIP_ID1		0x1f
28072811558SPekka Enberg #define GZIP_ID2		0x8b
281663ce1dfSMatt Evans #define CPIO_MAGIC		"0707"
282663ce1dfSMatt Evans /* initrd may be gzipped, or a plain cpio */
28372811558SPekka Enberg static bool initrd_check(int fd)
28472811558SPekka Enberg {
285663ce1dfSMatt Evans 	unsigned char id[4];
28672811558SPekka Enberg 
28772811558SPekka Enberg 	if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0)
28872811558SPekka Enberg 		return false;
28972811558SPekka Enberg 
29072811558SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
29172811558SPekka Enberg 		die_perror("lseek");
29272811558SPekka Enberg 
293663ce1dfSMatt Evans 	return (id[0] == GZIP_ID1 && id[1] == GZIP_ID2) ||
294663ce1dfSMatt Evans 		!memcmp(id, CPIO_MAGIC, 4);
29572811558SPekka Enberg }
29672811558SPekka Enberg 
2976d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
29853861c74SJohn Floren 		const char *initrd_filename, const char *kernel_cmdline, u16 vidmode)
299ae1fae34SPekka Enberg {
3007fb218bdSPekka Enberg 	bool ret;
3012065a6f7SCyrill Gorcunov 	int fd_kernel = -1, fd_initrd = -1;
302ae1fae34SPekka Enberg 
3032065a6f7SCyrill Gorcunov 	fd_kernel = open(kernel_filename, O_RDONLY);
3042065a6f7SCyrill Gorcunov 	if (fd_kernel < 0)
3050b62d2bbSPekka Enberg 		die("Unable to open kernel %s", kernel_filename);
306ae1fae34SPekka Enberg 
3072065a6f7SCyrill Gorcunov 	if (initrd_filename) {
3082065a6f7SCyrill Gorcunov 		fd_initrd = open(initrd_filename, O_RDONLY);
3092065a6f7SCyrill Gorcunov 		if (fd_initrd < 0)
3100b62d2bbSPekka Enberg 			die("Unable to open initrd %s", initrd_filename);
31172811558SPekka Enberg 
31272811558SPekka Enberg 		if (!initrd_check(fd_initrd))
31372811558SPekka Enberg 			die("%s is not an initrd", initrd_filename);
3142065a6f7SCyrill Gorcunov 	}
3152065a6f7SCyrill Gorcunov 
31653861c74SJohn Floren 	ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode);
31728972750SCyrill Gorcunov 
318009b0758SPekka Enberg 	if (ret)
319009b0758SPekka Enberg 		goto found_kernel;
320ae1fae34SPekka Enberg 
3214542f276SCyrill Gorcunov 	pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
3220b62d2bbSPekka Enberg 
323604dbd63SMatt Evans 	ret = load_flat_binary(kvm, fd_kernel, fd_initrd, kernel_cmdline);
324604dbd63SMatt Evans 
325009b0758SPekka Enberg 	if (ret)
326009b0758SPekka Enberg 		goto found_kernel;
327009b0758SPekka Enberg 
328604dbd63SMatt Evans 	if (initrd_filename)
329604dbd63SMatt Evans 		close(fd_initrd);
3305a6ac675SSasha Levin 	close(fd_kernel);
3315a6ac675SSasha Levin 
332009b0758SPekka Enberg 	die("%s is not a valid bzImage or flat binary", kernel_filename);
333009b0758SPekka Enberg 
334009b0758SPekka Enberg found_kernel:
335604dbd63SMatt Evans 	if (initrd_filename)
336604dbd63SMatt Evans 		close(fd_initrd);
3375a6ac675SSasha Levin 	close(fd_kernel);
3385a6ac675SSasha Levin 
339ae1fae34SPekka Enberg 	return ret;
340ae1fae34SPekka Enberg }
341ae1fae34SPekka Enberg 
342ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000	/* 1 msec */
343ce79f1caSPekka Enberg 
344ce79f1caSPekka Enberg /*
345ce79f1caSPekka Enberg  * This function sets up a timer that's used to inject interrupts from the
346ce79f1caSPekka Enberg  * userspace hypervisor into the guest at periodical intervals. Please note
347ce79f1caSPekka Enberg  * that clock interrupt, for example, is not handled here.
348ce79f1caSPekka Enberg  */
349b4532ca9SSasha Levin int kvm_timer__init(struct kvm *kvm)
350ce79f1caSPekka Enberg {
351ce79f1caSPekka Enberg 	struct itimerspec its;
352ce79f1caSPekka Enberg 	struct sigevent sev;
353b4532ca9SSasha Levin 	int r;
354ce79f1caSPekka Enberg 
355ce79f1caSPekka Enberg 	memset(&sev, 0, sizeof(struct sigevent));
356ce79f1caSPekka Enberg 	sev.sigev_value.sival_int	= 0;
357c7828731SSasha Levin 	sev.sigev_notify		= SIGEV_THREAD_ID;
358ce79f1caSPekka Enberg 	sev.sigev_signo			= SIGALRM;
3595002444cSSasha Levin 	sev.sigev_value.sival_ptr	= kvm;
360c7828731SSasha Levin 	sev._sigev_un._tid		= syscall(__NR_gettid);
361ce79f1caSPekka Enberg 
362b4532ca9SSasha Levin 	r = timer_create(CLOCK_REALTIME, &sev, &kvm->timerid);
363b4532ca9SSasha Levin 	if (r < 0)
364b4532ca9SSasha Levin 		return r;
365ce79f1caSPekka Enberg 
366ce79f1caSPekka Enberg 	its.it_value.tv_sec		= TIMER_INTERVAL_NS / 1000000000;
367ce79f1caSPekka Enberg 	its.it_value.tv_nsec		= TIMER_INTERVAL_NS % 1000000000;
368ce79f1caSPekka Enberg 	its.it_interval.tv_sec		= its.it_value.tv_sec;
369ce79f1caSPekka Enberg 	its.it_interval.tv_nsec		= its.it_value.tv_nsec;
370ce79f1caSPekka Enberg 
371b4532ca9SSasha Levin 	r = timer_settime(kvm->timerid, 0, &its, NULL);
372b4532ca9SSasha Levin 	if (r < 0) {
373b4532ca9SSasha Levin 		timer_delete(kvm->timerid);
374b4532ca9SSasha Levin 		return r;
375ce79f1caSPekka Enberg 	}
376ce79f1caSPekka Enberg 
377b4532ca9SSasha Levin 	return 0;
378b4532ca9SSasha Levin }
37949a8afd1SSasha Levin firmware_init(kvm_timer__init);
380b4532ca9SSasha Levin 
381b4532ca9SSasha Levin int kvm_timer__exit(struct kvm *kvm)
382fbfe68b7SSasha Levin {
38343835ac9SSasha Levin 	if (kvm->timerid)
38443835ac9SSasha Levin 		if (timer_delete(kvm->timerid) < 0)
385fbfe68b7SSasha Levin 			die("timer_delete()");
386fbfe68b7SSasha Levin 
38743835ac9SSasha Levin 	kvm->timerid = 0;
388b4532ca9SSasha Levin 
389b4532ca9SSasha Levin 	return 0;
390fbfe68b7SSasha Levin }
39149a8afd1SSasha Levin firmware_exit(kvm_timer__exit);
392fbfe68b7SSasha Levin 
39343835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
394090f898eSCyrill Gorcunov {
395090f898eSCyrill Gorcunov 	unsigned char *p;
396090f898eSCyrill Gorcunov 	unsigned long n;
397090f898eSCyrill Gorcunov 
398090f898eSCyrill Gorcunov 	size &= ~7; /* mod 8 */
399090f898eSCyrill Gorcunov 	if (!size)
400090f898eSCyrill Gorcunov 		return;
401090f898eSCyrill Gorcunov 
40243835ac9SSasha Levin 	p = guest_flat_to_host(kvm, addr);
403090f898eSCyrill Gorcunov 
40448cf3877SPekka Enberg 	for (n = 0; n < size; n += 8) {
40543835ac9SSasha Levin 		if (!host_ptr_in_ram(kvm, p + n))
40648cf3877SPekka Enberg 			break;
40748cf3877SPekka Enberg 
408090f898eSCyrill Gorcunov 		printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
409090f898eSCyrill Gorcunov 			addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
410090f898eSCyrill Gorcunov 				  p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
411090f898eSCyrill Gorcunov 	}
41248cf3877SPekka Enberg }
4134298ddadSSasha Levin 
414*4346fd8fSSasha Levin void kvm__pause(struct kvm *kvm)
4154298ddadSSasha Levin {
4164298ddadSSasha Levin 	int i, paused_vcpus = 0;
4174298ddadSSasha Levin 
4184298ddadSSasha Levin 	/* Check if the guest is running */
419df4239fbSSasha Levin 	if (!kvm->cpus[0] || kvm->cpus[0]->thread == 0)
4204298ddadSSasha Levin 		return;
4214298ddadSSasha Levin 
4224298ddadSSasha Levin 	mutex_lock(&pause_lock);
4234298ddadSSasha Levin 
4244298ddadSSasha Levin 	pause_event = eventfd(0, 0);
4254298ddadSSasha Levin 	if (pause_event < 0)
4264298ddadSSasha Levin 		die("Failed creating pause notification event");
4274298ddadSSasha Levin 	for (i = 0; i < kvm->nrcpus; i++)
428df4239fbSSasha Levin 		pthread_kill(kvm->cpus[i]->thread, SIGKVMPAUSE);
4294298ddadSSasha Levin 
4304298ddadSSasha Levin 	while (paused_vcpus < kvm->nrcpus) {
4314298ddadSSasha Levin 		u64 cur_read;
4324298ddadSSasha Levin 
4334298ddadSSasha Levin 		if (read(pause_event, &cur_read, sizeof(cur_read)) < 0)
4344298ddadSSasha Levin 			die("Failed reading pause event");
4354298ddadSSasha Levin 		paused_vcpus += cur_read;
4364298ddadSSasha Levin 	}
4374298ddadSSasha Levin 	close(pause_event);
4384298ddadSSasha Levin }
4394298ddadSSasha Levin 
440*4346fd8fSSasha Levin void kvm__continue(struct kvm *kvm)
4414298ddadSSasha Levin {
4424298ddadSSasha Levin 	/* Check if the guest is running */
443df4239fbSSasha Levin 	if (!kvm->cpus[0] || kvm->cpus[0]->thread == 0)
4444298ddadSSasha Levin 		return;
4454298ddadSSasha Levin 
4464298ddadSSasha Levin 	mutex_unlock(&pause_lock);
4474298ddadSSasha Levin }
4484298ddadSSasha Levin 
4494298ddadSSasha Levin void kvm__notify_paused(void)
4504298ddadSSasha Levin {
4514298ddadSSasha Levin 	u64 p = 1;
4524298ddadSSasha Levin 
4534298ddadSSasha Levin 	if (write(pause_event, &p, sizeof(p)) < 0)
4544298ddadSSasha Levin 		die("Failed notifying of paused VCPU.");
4554298ddadSSasha Levin 
4564298ddadSSasha Levin 	mutex_lock(&pause_lock);
4574298ddadSSasha Levin 	mutex_unlock(&pause_lock);
4584298ddadSSasha Levin }
459