xref: /kvmtool/kvm.c (revision 4298ddade7b629079b3dfb0457cf4bcff7c47d3e)
1ae1fae34SPekka Enberg #include "kvm/kvm.h"
2ae1fae34SPekka Enberg 
3b3594ec7SCyrill Gorcunov #include "kvm/boot-protocol.h"
472811558SPekka Enberg #include "kvm/cpufeature.h"
572811558SPekka Enberg #include "kvm/read-write.h"
672811558SPekka Enberg #include "kvm/interrupt.h"
70c7c14a7SCyrill Gorcunov #include "kvm/mptable.h"
872811558SPekka Enberg #include "kvm/util.h"
9*4298ddadSSasha Levin #include "kvm/mutex.h"
10*4298ddadSSasha Levin #include "kvm/kvm-cpu.h"
11eda03319SPekka Enberg 
126c7d8514SPekka Enberg #include <linux/kvm.h>
13f5ab5f67SPekka Enberg 
14f5ab5f67SPekka Enberg #include <asm/bootparam.h>
15f5ab5f67SPekka Enberg 
16ae1fae34SPekka Enberg #include <sys/ioctl.h>
171f9cff23SPekka Enberg #include <sys/mman.h>
18ce79f1caSPekka Enberg #include <sys/stat.h>
192da26a59SPekka Enberg #include <stdbool.h>
206e5e8b8dSPekka Enberg #include <assert.h>
2106e41eeaSPekka Enberg #include <limits.h>
22ce79f1caSPekka Enberg #include <signal.h>
23f5ab5f67SPekka Enberg #include <stdarg.h>
24b8f6afcdSPekka Enberg #include <stdlib.h>
25f5ab5f67SPekka Enberg #include <string.h>
260d1f17ecSPekka Enberg #include <unistd.h>
271f9cff23SPekka Enberg #include <stdio.h>
28b8f6afcdSPekka Enberg #include <fcntl.h>
29ce79f1caSPekka Enberg #include <time.h>
30*4298ddadSSasha Levin #include <sys/eventfd.h>
31b8f6afcdSPekka Enberg 
32ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
330d1f17ecSPekka Enberg 
34ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = {
35ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
36ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
37ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
38ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
39ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
40ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
41ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
42ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
43ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
44ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
45ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
46ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
47ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
48ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
49ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
50ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
51ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
52ae1fae34SPekka Enberg 	DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
539b1fb1c3SPekka Enberg };
549b1fb1c3SPekka Enberg 
5555e19624SCyrill Gorcunov #define DEFINE_KVM_EXT(ext)		\
5655e19624SCyrill Gorcunov 	.name = #ext,			\
5755e19624SCyrill Gorcunov 	.code = ext
5855e19624SCyrill Gorcunov 
5955e19624SCyrill Gorcunov struct {
6055e19624SCyrill Gorcunov 	const char *name;
6155e19624SCyrill Gorcunov 	int code;
6255e19624SCyrill Gorcunov } kvm_req_ext[] = {
6355e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
6455e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
6555e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_PIT2) },
6655e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
6755e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
6855e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
697c0ec28fSCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_HLT) },
7055e19624SCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
71d38ad31aSCyrill Gorcunov 	{ DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
7255e19624SCyrill Gorcunov };
7355e19624SCyrill Gorcunov 
74*4298ddadSSasha Levin extern struct kvm *kvm;
75*4298ddadSSasha Levin extern struct kvm_cpu *kvm_cpus[KVM_NR_CPUS];
76*4298ddadSSasha Levin static int pause_event;
77*4298ddadSSasha Levin static DEFINE_MUTEX(pause_lock);
78*4298ddadSSasha Levin 
7943835ac9SSasha Levin static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
80b8f6afcdSPekka Enberg {
8128fa19c0SPekka Enberg 	int ret;
82b8f6afcdSPekka Enberg 
8343835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
844076b041SPekka Enberg 	if (ret < 0)
854076b041SPekka Enberg 		return false;
864076b041SPekka Enberg 
874076b041SPekka Enberg 	return ret;
884076b041SPekka Enberg }
894076b041SPekka Enberg 
9043835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm)
9155e19624SCyrill Gorcunov {
9255e19624SCyrill Gorcunov 	unsigned int i;
9355e19624SCyrill Gorcunov 
9455e19624SCyrill Gorcunov 	for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
9543835ac9SSasha Levin 		if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
964542f276SCyrill Gorcunov 			pr_error("Unsuppored KVM extension detected: %s",
9755e19624SCyrill Gorcunov 				kvm_req_ext[i].name);
9855e19624SCyrill Gorcunov 			return (int)-i;
9955e19624SCyrill Gorcunov 		}
10055e19624SCyrill Gorcunov 	}
10155e19624SCyrill Gorcunov 
10255e19624SCyrill Gorcunov 	return 0;
10355e19624SCyrill Gorcunov }
10455e19624SCyrill Gorcunov 
1054076b041SPekka Enberg static struct kvm *kvm__new(void)
1064076b041SPekka Enberg {
10743835ac9SSasha Levin 	struct kvm *kvm = calloc(1, sizeof *kvm);
1084076b041SPekka Enberg 
10943835ac9SSasha Levin 	if (!kvm)
1104076b041SPekka Enberg 		die("out of memory");
1114076b041SPekka Enberg 
11243835ac9SSasha Levin 	return kvm;
1134076b041SPekka Enberg }
1144076b041SPekka Enberg 
11543835ac9SSasha Levin void kvm__delete(struct kvm *kvm)
1169ef4c68eSPekka Enberg {
11743835ac9SSasha Levin 	kvm__stop_timer(kvm);
118fbfe68b7SSasha Levin 
11943835ac9SSasha Levin 	munmap(kvm->ram_start, kvm->ram_size);
12043835ac9SSasha Levin 	free(kvm);
1219ef4c68eSPekka Enberg }
1229ef4c68eSPekka Enberg 
123c78b8713SAsias He static bool kvm__cpu_supports_vm(void)
124c78b8713SAsias He {
125c78b8713SAsias He 	struct cpuid_regs regs;
1263fdf659dSSasha Levin 	u32 eax_base;
127831fbf23SPekka Enberg 	int feature;
128c78b8713SAsias He 
129c78b8713SAsias He 	regs	= (struct cpuid_regs) {
130831fbf23SPekka Enberg 		.eax		= 0x00,
131c78b8713SAsias He 	};
132c78b8713SAsias He 	host_cpuid(&regs);
133c78b8713SAsias He 
134ae87afbfSCyrill Gorcunov 	switch (regs.ebx) {
135ae87afbfSCyrill Gorcunov 	case CPUID_VENDOR_INTEL_1:
136831fbf23SPekka Enberg 		eax_base	= 0x00;
137831fbf23SPekka Enberg 		feature		= KVM__X86_FEATURE_VMX;
138ae87afbfSCyrill Gorcunov 		break;
13934649df9SPekka Enberg 
140ae87afbfSCyrill Gorcunov 	case CPUID_VENDOR_AMD_1:
141831fbf23SPekka Enberg 		eax_base	= 0x80000000;
142831fbf23SPekka Enberg 		feature		= KVM__X86_FEATURE_SVM;
143ae87afbfSCyrill Gorcunov 		break;
14434649df9SPekka Enberg 
14534649df9SPekka Enberg 	default:
14634649df9SPekka Enberg 		return false;
147ae87afbfSCyrill Gorcunov 	}
148ae87afbfSCyrill Gorcunov 
149831fbf23SPekka Enberg 	regs	= (struct cpuid_regs) {
150831fbf23SPekka Enberg 		.eax		= eax_base,
151831fbf23SPekka Enberg 	};
152831fbf23SPekka Enberg 	host_cpuid(&regs);
153831fbf23SPekka Enberg 
154831fbf23SPekka Enberg 	if (regs.eax < eax_base + 0x01)
155831fbf23SPekka Enberg 		return false;
156831fbf23SPekka Enberg 
157831fbf23SPekka Enberg 	regs	= (struct cpuid_regs) {
158831fbf23SPekka Enberg 		.eax		= eax_base + 0x01
159831fbf23SPekka Enberg 	};
160831fbf23SPekka Enberg 	host_cpuid(&regs);
161831fbf23SPekka Enberg 
162831fbf23SPekka Enberg 	return regs.ecx & (1 << feature);
163c78b8713SAsias He }
164c78b8713SAsias He 
165874467f8SSasha Levin static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
1664076b041SPekka Enberg {
1672b0e3342SPekka Enberg 	struct kvm_userspace_memory_region mem;
168839051d9SSasha Levin 	int ret;
169839051d9SSasha Levin 
170839051d9SSasha Levin 	mem = (struct kvm_userspace_memory_region) {
171874467f8SSasha Levin 		.slot			= slot,
172874467f8SSasha Levin 		.guest_phys_addr	= guest_phys,
173874467f8SSasha Levin 		.memory_size		= size,
174c4acb611SIngo Molnar 		.userspace_addr		= (unsigned long)userspace_addr,
175839051d9SSasha Levin 	};
176839051d9SSasha Levin 
177874467f8SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
178839051d9SSasha Levin 	if (ret < 0)
179839051d9SSasha Levin 		die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
180839051d9SSasha Levin }
181839051d9SSasha Levin 
182874467f8SSasha Levin /*
183874467f8SSasha Levin  * Allocating RAM size bigger than 4GB requires us to leave a gap
184874467f8SSasha Levin  * in the RAM which is used for PCI MMIO, hotplug, and unconfigured
185874467f8SSasha Levin  * devices (see documentation of e820_setup_gap() for details).
186874467f8SSasha Levin  *
187874467f8SSasha Levin  * If we're required to initialize RAM bigger than 4GB, we will create
188874467f8SSasha Levin  * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space.
189874467f8SSasha Levin  */
190874467f8SSasha Levin 
19143835ac9SSasha Levin void kvm__init_ram(struct kvm *kvm)
192874467f8SSasha Levin {
193874467f8SSasha Levin 	u64	phys_start, phys_size;
194874467f8SSasha Levin 	void	*host_mem;
195874467f8SSasha Levin 
19643835ac9SSasha Levin 	if (kvm->ram_size < KVM_32BIT_GAP_START) {
197874467f8SSasha Levin 		/* Use a single block of RAM for 32bit RAM */
198874467f8SSasha Levin 
199874467f8SSasha Levin 		phys_start = 0;
20043835ac9SSasha Levin 		phys_size  = kvm->ram_size;
20143835ac9SSasha Levin 		host_mem   = kvm->ram_start;
202874467f8SSasha Levin 
203f234697eSSasha Levin 		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
204874467f8SSasha Levin 	} else {
205874467f8SSasha Levin 		/* First RAM range from zero to the PCI gap: */
206874467f8SSasha Levin 
207874467f8SSasha Levin 		phys_start = 0;
208874467f8SSasha Levin 		phys_size  = KVM_32BIT_GAP_START;
20943835ac9SSasha Levin 		host_mem   = kvm->ram_start;
210874467f8SSasha Levin 
21143835ac9SSasha Levin 		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
212874467f8SSasha Levin 
213874467f8SSasha Levin 		/* Second RAM range from 4GB to the end of RAM: */
214874467f8SSasha Levin 
215874467f8SSasha Levin 		phys_start = 0x100000000ULL;
21643835ac9SSasha Levin 		phys_size  = kvm->ram_size - phys_size;
21743835ac9SSasha Levin 		host_mem   = kvm->ram_start + phys_start;
218874467f8SSasha Levin 
21943835ac9SSasha Levin 		kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
220874467f8SSasha Levin 	}
221874467f8SSasha Levin }
222874467f8SSasha Levin 
22343835ac9SSasha Levin int kvm__max_cpus(struct kvm *kvm)
224384922b3SPekka Enberg {
225384922b3SPekka Enberg 	int ret;
226384922b3SPekka Enberg 
22743835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
228384922b3SPekka Enberg 	if (ret < 0)
229384922b3SPekka Enberg 		die_perror("KVM_CAP_NR_VCPUS");
230384922b3SPekka Enberg 
231384922b3SPekka Enberg 	return ret;
232384922b3SPekka Enberg }
233384922b3SPekka Enberg 
234839051d9SSasha Levin struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
235839051d9SSasha Levin {
2369687927dSAsias He 	struct kvm_pit_config pit_config = { .flags = 0, };
23743835ac9SSasha Levin 	struct kvm *kvm;
2384076b041SPekka Enberg 	int ret;
2394076b041SPekka Enberg 
240c78b8713SAsias He 	if (!kvm__cpu_supports_vm())
241c78b8713SAsias He 		die("Your CPU does not support hardware virtualization");
242c78b8713SAsias He 
24343835ac9SSasha Levin 	kvm = kvm__new();
2444076b041SPekka Enberg 
24543835ac9SSasha Levin 	kvm->sys_fd = open(kvm_dev, O_RDWR);
24643835ac9SSasha Levin 	if (kvm->sys_fd < 0) {
2476d7c36ceSPekka Enberg 		if (errno == ENOENT)
248e907b83fSPekka Enberg 			die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
249f8334800SIngo Molnar 		if (errno == ENODEV)
250f8334800SIngo Molnar 			die("'%s' KVM driver not available.\n  # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev);
2516d7c36ceSPekka Enberg 
252f8334800SIngo Molnar 		fprintf(stderr, "  Fatal, could not open %s: ", kvm_dev);
253f8334800SIngo Molnar 		perror(NULL);
254f8334800SIngo Molnar 		exit(1);
2556d7c36ceSPekka Enberg 	}
256b8f6afcdSPekka Enberg 
25743835ac9SSasha Levin 	ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
2586c7d8514SPekka Enberg 	if (ret != KVM_API_VERSION)
259f5ab5f67SPekka Enberg 		die_perror("KVM_API_VERSION ioctl");
2606c7d8514SPekka Enberg 
26143835ac9SSasha Levin 	kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
26243835ac9SSasha Levin 	if (kvm->vm_fd < 0)
263f5ab5f67SPekka Enberg 		die_perror("KVM_CREATE_VM ioctl");
26428fa19c0SPekka Enberg 
26543835ac9SSasha Levin 	if (kvm__check_extensions(kvm))
26655e19624SCyrill Gorcunov 		die("A required KVM extention is not supported by OS");
2679687927dSAsias He 
26843835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
2699687927dSAsias He 	if (ret < 0)
2709687927dSAsias He 		die_perror("KVM_SET_TSS_ADDR ioctl");
2719687927dSAsias He 
27243835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config);
2739687927dSAsias He 	if (ret < 0)
2749687927dSAsias He 		die_perror("KVM_CREATE_PIT2 ioctl");
2759687927dSAsias He 
27643835ac9SSasha Levin 	kvm->ram_size		= ram_size;
2770d1f17ecSPekka Enberg 
27843835ac9SSasha Levin 	if (kvm->ram_size < KVM_32BIT_GAP_START) {
27937c34ca8SSasha Levin 		kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
280874467f8SSasha Levin 	} else {
28137c34ca8SSasha Levin 		kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
28243835ac9SSasha Levin 		if (kvm->ram_start != MAP_FAILED) {
283874467f8SSasha Levin 			/*
284874467f8SSasha Levin 			 * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that
285874467f8SSasha Levin 			 * if we accidently write to it, we will know.
286874467f8SSasha Levin 			 */
28743835ac9SSasha Levin 			mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
288874467f8SSasha Levin 		}
289874467f8SSasha Levin 	}
29043835ac9SSasha Levin 	if (kvm->ram_start == MAP_FAILED)
2910d1f17ecSPekka Enberg 		die("out of memory");
2920d1f17ecSPekka Enberg 
29343835ac9SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
294895c2fefSPekka Enberg 	if (ret < 0)
2959687927dSAsias He 		die_perror("KVM_CREATE_IRQCHIP ioctl");
2969687927dSAsias He 
29743835ac9SSasha Levin 	return kvm;
2984076b041SPekka Enberg }
2994076b041SPekka Enberg 
3005f6772b8SCyrill Gorcunov #define BOOT_LOADER_SELECTOR	0x1000
301b08e9ec4SPekka Enberg #define BOOT_LOADER_IP		0x0000
302dbdb74c2SPekka Enberg #define BOOT_LOADER_SP		0x8000
3032dd4a4edSCyrill Gorcunov #define BOOT_CMDLINE_OFFSET	0x20000
3042dd4a4edSCyrill Gorcunov 
3059a4ecdc5SPekka Enberg #define BOOT_PROTOCOL_REQUIRED	0x206
306a43f6460SCyrill Gorcunov #define LOAD_HIGH		0x01
307009b0758SPekka Enberg 
30843835ac9SSasha Levin static int load_flat_binary(struct kvm *kvm, int fd)
309009b0758SPekka Enberg {
310009b0758SPekka Enberg 	void *p;
311009b0758SPekka Enberg 	int nr;
312009b0758SPekka Enberg 
313009b0758SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
314009b0758SPekka Enberg 		die_perror("lseek");
315009b0758SPekka Enberg 
31643835ac9SSasha Levin 	p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
317009b0758SPekka Enberg 
318009b0758SPekka Enberg 	while ((nr = read(fd, p, 65536)) > 0)
319009b0758SPekka Enberg 		p += nr;
320009b0758SPekka Enberg 
32143835ac9SSasha Levin 	kvm->boot_selector	= BOOT_LOADER_SELECTOR;
32243835ac9SSasha Levin 	kvm->boot_ip		= BOOT_LOADER_IP;
32343835ac9SSasha Levin 	kvm->boot_sp		= BOOT_LOADER_SP;
324edc8a14dSPekka Enberg 
3257fb218bdSPekka Enberg 	return true;
326009b0758SPekka Enberg }
327009b0758SPekka Enberg 
328ae1fae34SPekka Enberg static const char *BZIMAGE_MAGIC	= "HdrS";
329ae1fae34SPekka Enberg 
33043835ac9SSasha Levin static bool load_bzimage(struct kvm *kvm, int fd_kernel,
33153861c74SJohn Floren 			int fd_initrd, const char *kernel_cmdline, u16 vidmode)
332ae1fae34SPekka Enberg {
333b9271160SPekka Enberg 	struct boot_params *kern_boot;
3344b62331fSPekka Enberg 	unsigned long setup_sects;
335b9271160SPekka Enberg 	struct boot_params boot;
3362dd4a4edSCyrill Gorcunov 	size_t cmdline_size;
3377fb218bdSPekka Enberg 	ssize_t setup_size;
33822489bb0SCyrill Gorcunov 	void *p;
339ae1fae34SPekka Enberg 	int nr;
340ae1fae34SPekka Enberg 
3415d67eaf6SPekka Enberg 	/*
3425d67eaf6SPekka Enberg 	 * See Documentation/x86/boot.txt for details no bzImage on-disk and
3435d67eaf6SPekka Enberg 	 * memory layout.
3445d67eaf6SPekka Enberg 	 */
3455d67eaf6SPekka Enberg 
3462065a6f7SCyrill Gorcunov 	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
347009b0758SPekka Enberg 		die_perror("lseek");
348009b0758SPekka Enberg 
3490b62d2bbSPekka Enberg 	if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot))
3502346d461SPekka Enberg 		return false;
351ae1fae34SPekka Enberg 
3520b62d2bbSPekka Enberg 	if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)))
3537fb218bdSPekka Enberg 		return false;
354ae1fae34SPekka Enberg 
3550ea58e5bSPekka Enberg 	if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED)
3560b62d2bbSPekka Enberg 		die("Too old kernel");
357ad681038SCyrill Gorcunov 
3582065a6f7SCyrill Gorcunov 	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
359e93ab78aSPekka Enberg 		die_perror("lseek");
360e93ab78aSPekka Enberg 
3614cf542bbSCyrill Gorcunov 	if (!boot.hdr.setup_sects)
3624cf542bbSCyrill Gorcunov 		boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
36310943d14SPekka Enberg 	setup_sects = boot.hdr.setup_sects + 1;
36410943d14SPekka Enberg 
36554d4a626SPekka Enberg 	setup_size = setup_sects << 9;
36643835ac9SSasha Levin 	p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
367ae1fae34SPekka Enberg 
3682065a6f7SCyrill Gorcunov 	/* copy setup.bin to mem*/
3692065a6f7SCyrill Gorcunov 	if (read(fd_kernel, p, setup_size) != setup_size)
3707fb218bdSPekka Enberg 		die_perror("read");
3717fb218bdSPekka Enberg 
3722065a6f7SCyrill Gorcunov 	/* copy vmlinux.bin to BZ_KERNEL_START*/
37343835ac9SSasha Levin 	p = guest_flat_to_host(kvm, BZ_KERNEL_START);
374ae1fae34SPekka Enberg 
3752065a6f7SCyrill Gorcunov 	while ((nr = read(fd_kernel, p, 65536)) > 0)
376ae1fae34SPekka Enberg 		p += nr;
377ae1fae34SPekka Enberg 
37843835ac9SSasha Levin 	p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET);
379debcfac0SCyrill Gorcunov 	if (kernel_cmdline) {
380debcfac0SCyrill Gorcunov 		cmdline_size = strlen(kernel_cmdline) + 1;
381debcfac0SCyrill Gorcunov 		if (cmdline_size > boot.hdr.cmdline_size)
382debcfac0SCyrill Gorcunov 			cmdline_size = boot.hdr.cmdline_size;
383ad681038SCyrill Gorcunov 
3842dd4a4edSCyrill Gorcunov 		memset(p, 0, boot.hdr.cmdline_size);
3852dd4a4edSCyrill Gorcunov 		memcpy(p, kernel_cmdline, cmdline_size - 1);
386debcfac0SCyrill Gorcunov 	}
387debcfac0SCyrill Gorcunov 
38843835ac9SSasha Levin 	kern_boot	= guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00);
389a43f6460SCyrill Gorcunov 
390b9271160SPekka Enberg 	kern_boot->hdr.cmd_line_ptr	= BOOT_CMDLINE_OFFSET;
391b9271160SPekka Enberg 	kern_boot->hdr.type_of_loader	= 0xff;
392b9271160SPekka Enberg 	kern_boot->hdr.heap_end_ptr	= 0xfe00;
393b9271160SPekka Enberg 	kern_boot->hdr.loadflags	|= CAN_USE_HEAP;
39453861c74SJohn Floren 	kern_boot->hdr.vid_mode		= vidmode;
395a43f6460SCyrill Gorcunov 
3962065a6f7SCyrill Gorcunov 	/*
3972065a6f7SCyrill Gorcunov 	 * Read initrd image into guest memory
3982065a6f7SCyrill Gorcunov 	 */
3992065a6f7SCyrill Gorcunov 	if (fd_initrd >= 0) {
4002065a6f7SCyrill Gorcunov 		struct stat initrd_stat;
4012065a6f7SCyrill Gorcunov 		unsigned long addr;
4022065a6f7SCyrill Gorcunov 
4032065a6f7SCyrill Gorcunov 		if (fstat(fd_initrd, &initrd_stat))
4042065a6f7SCyrill Gorcunov 			die_perror("fstat");
4052065a6f7SCyrill Gorcunov 
4062065a6f7SCyrill Gorcunov 		addr = boot.hdr.initrd_addr_max & ~0xfffff;
4072065a6f7SCyrill Gorcunov 		for (;;) {
4082065a6f7SCyrill Gorcunov 			if (addr < BZ_KERNEL_START)
4092065a6f7SCyrill Gorcunov 				die("Not enough memory for initrd");
41043835ac9SSasha Levin 			else if (addr < (kvm->ram_size - initrd_stat.st_size))
4112065a6f7SCyrill Gorcunov 				break;
4122065a6f7SCyrill Gorcunov 			addr -= 0x100000;
4132065a6f7SCyrill Gorcunov 		}
4142065a6f7SCyrill Gorcunov 
41543835ac9SSasha Levin 		p = guest_flat_to_host(kvm, addr);
4162065a6f7SCyrill Gorcunov 		nr = read(fd_initrd, p, initrd_stat.st_size);
4172065a6f7SCyrill Gorcunov 		if (nr != initrd_stat.st_size)
4182065a6f7SCyrill Gorcunov 			die("Failed to read initrd");
4192065a6f7SCyrill Gorcunov 
4202065a6f7SCyrill Gorcunov 		kern_boot->hdr.ramdisk_image	= addr;
4212065a6f7SCyrill Gorcunov 		kern_boot->hdr.ramdisk_size	= initrd_stat.st_size;
4222065a6f7SCyrill Gorcunov 	}
4232065a6f7SCyrill Gorcunov 
42443835ac9SSasha Levin 	kvm->boot_selector	= BOOT_LOADER_SELECTOR;
425edc8a14dSPekka Enberg 	/*
426edc8a14dSPekka Enberg 	 * The real-mode setup code starts at offset 0x200 of a bzImage. See
427edc8a14dSPekka Enberg 	 * Documentation/x86/boot.txt for details.
428edc8a14dSPekka Enberg 	 */
42943835ac9SSasha Levin 	kvm->boot_ip		= BOOT_LOADER_IP + 0x200;
43043835ac9SSasha Levin 	kvm->boot_sp		= BOOT_LOADER_SP;
431edc8a14dSPekka Enberg 
4327fb218bdSPekka Enberg 	return true;
433ae1fae34SPekka Enberg }
434ae1fae34SPekka Enberg 
43572811558SPekka Enberg /* RFC 1952 */
43672811558SPekka Enberg #define GZIP_ID1		0x1f
43772811558SPekka Enberg #define GZIP_ID2		0x8b
43872811558SPekka Enberg 
43972811558SPekka Enberg static bool initrd_check(int fd)
44072811558SPekka Enberg {
44172811558SPekka Enberg 	unsigned char id[2];
44272811558SPekka Enberg 
44372811558SPekka Enberg 	if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0)
44472811558SPekka Enberg 		return false;
44572811558SPekka Enberg 
44672811558SPekka Enberg 	if (lseek(fd, 0, SEEK_SET) < 0)
44772811558SPekka Enberg 		die_perror("lseek");
44872811558SPekka Enberg 
44972811558SPekka Enberg 	return id[0] == GZIP_ID1 && id[1] == GZIP_ID2;
45072811558SPekka Enberg }
45172811558SPekka Enberg 
4526d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
45353861c74SJohn Floren 		const char *initrd_filename, const char *kernel_cmdline, u16 vidmode)
454ae1fae34SPekka Enberg {
4557fb218bdSPekka Enberg 	bool ret;
4562065a6f7SCyrill Gorcunov 	int fd_kernel = -1, fd_initrd = -1;
457ae1fae34SPekka Enberg 
4582065a6f7SCyrill Gorcunov 	fd_kernel = open(kernel_filename, O_RDONLY);
4592065a6f7SCyrill Gorcunov 	if (fd_kernel < 0)
4600b62d2bbSPekka Enberg 		die("Unable to open kernel %s", kernel_filename);
461ae1fae34SPekka Enberg 
4622065a6f7SCyrill Gorcunov 	if (initrd_filename) {
4632065a6f7SCyrill Gorcunov 		fd_initrd = open(initrd_filename, O_RDONLY);
4642065a6f7SCyrill Gorcunov 		if (fd_initrd < 0)
4650b62d2bbSPekka Enberg 			die("Unable to open initrd %s", initrd_filename);
46672811558SPekka Enberg 
46772811558SPekka Enberg 		if (!initrd_check(fd_initrd))
46872811558SPekka Enberg 			die("%s is not an initrd", initrd_filename);
4692065a6f7SCyrill Gorcunov 	}
4702065a6f7SCyrill Gorcunov 
47153861c74SJohn Floren 	ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode);
47228972750SCyrill Gorcunov 
47328972750SCyrill Gorcunov 	if (initrd_filename)
47428972750SCyrill Gorcunov 		close(fd_initrd);
47528972750SCyrill Gorcunov 
476009b0758SPekka Enberg 	if (ret)
477009b0758SPekka Enberg 		goto found_kernel;
478ae1fae34SPekka Enberg 
4794542f276SCyrill Gorcunov 	pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
4800b62d2bbSPekka Enberg 
4812065a6f7SCyrill Gorcunov 	ret = load_flat_binary(kvm, fd_kernel);
482009b0758SPekka Enberg 	if (ret)
483009b0758SPekka Enberg 		goto found_kernel;
484009b0758SPekka Enberg 
4855a6ac675SSasha Levin 	close(fd_kernel);
4865a6ac675SSasha Levin 
487009b0758SPekka Enberg 	die("%s is not a valid bzImage or flat binary", kernel_filename);
488009b0758SPekka Enberg 
489009b0758SPekka Enberg found_kernel:
4905a6ac675SSasha Levin 	close(fd_kernel);
4915a6ac675SSasha Levin 
492ae1fae34SPekka Enberg 	return ret;
493ae1fae34SPekka Enberg }
494ae1fae34SPekka Enberg 
495b3594ec7SCyrill Gorcunov /**
496b3594ec7SCyrill Gorcunov  * kvm__setup_bios - inject BIOS into guest system memory
49743835ac9SSasha Levin  * @kvm - guest system descriptor
498b3594ec7SCyrill Gorcunov  *
499b3594ec7SCyrill Gorcunov  * This function is a main routine where we poke guest memory
500b3594ec7SCyrill Gorcunov  * and install BIOS there.
501b3594ec7SCyrill Gorcunov  */
50243835ac9SSasha Levin void kvm__setup_bios(struct kvm *kvm)
5032f3976eeSPekka Enberg {
504b3594ec7SCyrill Gorcunov 	/* standart minimal configuration */
50543835ac9SSasha Levin 	setup_bios(kvm);
5062f3976eeSPekka Enberg 
507b3594ec7SCyrill Gorcunov 	/* FIXME: SMP, ACPI and friends here */
5080c7c14a7SCyrill Gorcunov 
5090c7c14a7SCyrill Gorcunov 	/* MP table */
51043835ac9SSasha Levin 	mptable_setup(kvm, kvm->nrcpus);
5112f3976eeSPekka Enberg }
5122f3976eeSPekka Enberg 
513ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000	/* 1 msec */
514ce79f1caSPekka Enberg 
515ce79f1caSPekka Enberg /*
516ce79f1caSPekka Enberg  * This function sets up a timer that's used to inject interrupts from the
517ce79f1caSPekka Enberg  * userspace hypervisor into the guest at periodical intervals. Please note
518ce79f1caSPekka Enberg  * that clock interrupt, for example, is not handled here.
519ce79f1caSPekka Enberg  */
52043835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm)
521ce79f1caSPekka Enberg {
522ce79f1caSPekka Enberg 	struct itimerspec its;
523ce79f1caSPekka Enberg 	struct sigevent sev;
524ce79f1caSPekka Enberg 
525ce79f1caSPekka Enberg 	memset(&sev, 0, sizeof(struct sigevent));
526ce79f1caSPekka Enberg 	sev.sigev_value.sival_int	= 0;
527ce79f1caSPekka Enberg 	sev.sigev_notify		= SIGEV_SIGNAL;
528ce79f1caSPekka Enberg 	sev.sigev_signo			= SIGALRM;
529ce79f1caSPekka Enberg 
53043835ac9SSasha Levin 	if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0)
531ce79f1caSPekka Enberg 		die("timer_create()");
532ce79f1caSPekka Enberg 
533ce79f1caSPekka Enberg 	its.it_value.tv_sec		= TIMER_INTERVAL_NS / 1000000000;
534ce79f1caSPekka Enberg 	its.it_value.tv_nsec		= TIMER_INTERVAL_NS % 1000000000;
535ce79f1caSPekka Enberg 	its.it_interval.tv_sec		= its.it_value.tv_sec;
536ce79f1caSPekka Enberg 	its.it_interval.tv_nsec		= its.it_value.tv_nsec;
537ce79f1caSPekka Enberg 
53843835ac9SSasha Levin 	if (timer_settime(kvm->timerid, 0, &its, NULL) < 0)
539ce79f1caSPekka Enberg 		die("timer_settime()");
540ce79f1caSPekka Enberg }
541ce79f1caSPekka Enberg 
54243835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm)
543fbfe68b7SSasha Levin {
54443835ac9SSasha Levin 	if (kvm->timerid)
54543835ac9SSasha Levin 		if (timer_delete(kvm->timerid) < 0)
546fbfe68b7SSasha Levin 			die("timer_delete()");
547fbfe68b7SSasha Levin 
54843835ac9SSasha Levin 	kvm->timerid = 0;
549fbfe68b7SSasha Levin }
550fbfe68b7SSasha Levin 
55143835ac9SSasha Levin void kvm__irq_line(struct kvm *kvm, int irq, int level)
5528b1ff07eSPekka Enberg {
5538b1ff07eSPekka Enberg 	struct kvm_irq_level irq_level;
5548b1ff07eSPekka Enberg 
5558b1ff07eSPekka Enberg 	irq_level	= (struct kvm_irq_level) {
5568b1ff07eSPekka Enberg 		{
5578b1ff07eSPekka Enberg 			.irq		= irq,
5588b1ff07eSPekka Enberg 		},
5598b1ff07eSPekka Enberg 		.level		= level,
5608b1ff07eSPekka Enberg 	};
5618b1ff07eSPekka Enberg 
56243835ac9SSasha Levin 	if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
5638b1ff07eSPekka Enberg 		die_perror("KVM_IRQ_LINE failed");
5648b1ff07eSPekka Enberg }
5658b1ff07eSPekka Enberg 
56643835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
567090f898eSCyrill Gorcunov {
568090f898eSCyrill Gorcunov 	unsigned char *p;
569090f898eSCyrill Gorcunov 	unsigned long n;
570090f898eSCyrill Gorcunov 
571090f898eSCyrill Gorcunov 	size &= ~7; /* mod 8 */
572090f898eSCyrill Gorcunov 	if (!size)
573090f898eSCyrill Gorcunov 		return;
574090f898eSCyrill Gorcunov 
57543835ac9SSasha Levin 	p = guest_flat_to_host(kvm, addr);
576090f898eSCyrill Gorcunov 
57748cf3877SPekka Enberg 	for (n = 0; n < size; n += 8) {
57843835ac9SSasha Levin 		if (!host_ptr_in_ram(kvm, p + n))
57948cf3877SPekka Enberg 			break;
58048cf3877SPekka Enberg 
581090f898eSCyrill Gorcunov 		printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
582090f898eSCyrill Gorcunov 			addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
583090f898eSCyrill Gorcunov 				  p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
584090f898eSCyrill Gorcunov 	}
58548cf3877SPekka Enberg }
586*4298ddadSSasha Levin 
587*4298ddadSSasha Levin void kvm__pause(void)
588*4298ddadSSasha Levin {
589*4298ddadSSasha Levin 	int i, paused_vcpus = 0;
590*4298ddadSSasha Levin 
591*4298ddadSSasha Levin 	/* Check if the guest is running */
592*4298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
593*4298ddadSSasha Levin 		return;
594*4298ddadSSasha Levin 
595*4298ddadSSasha Levin 	mutex_lock(&pause_lock);
596*4298ddadSSasha Levin 
597*4298ddadSSasha Levin 	pause_event = eventfd(0, 0);
598*4298ddadSSasha Levin 	if (pause_event < 0)
599*4298ddadSSasha Levin 		die("Failed creating pause notification event");
600*4298ddadSSasha Levin 	for (i = 0; i < kvm->nrcpus; i++)
601*4298ddadSSasha Levin 		pthread_kill(kvm_cpus[i]->thread, SIGKVMPAUSE);
602*4298ddadSSasha Levin 
603*4298ddadSSasha Levin 	while (paused_vcpus < kvm->nrcpus) {
604*4298ddadSSasha Levin 		u64 cur_read;
605*4298ddadSSasha Levin 
606*4298ddadSSasha Levin 		if (read(pause_event, &cur_read, sizeof(cur_read)) < 0)
607*4298ddadSSasha Levin 			die("Failed reading pause event");
608*4298ddadSSasha Levin 		paused_vcpus += cur_read;
609*4298ddadSSasha Levin 	}
610*4298ddadSSasha Levin 	close(pause_event);
611*4298ddadSSasha Levin }
612*4298ddadSSasha Levin 
613*4298ddadSSasha Levin void kvm__continue(void)
614*4298ddadSSasha Levin {
615*4298ddadSSasha Levin 	/* Check if the guest is running */
616*4298ddadSSasha Levin 	if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0)
617*4298ddadSSasha Levin 		return;
618*4298ddadSSasha Levin 
619*4298ddadSSasha Levin 	mutex_unlock(&pause_lock);
620*4298ddadSSasha Levin }
621*4298ddadSSasha Levin 
622*4298ddadSSasha Levin void kvm__notify_paused(void)
623*4298ddadSSasha Levin {
624*4298ddadSSasha Levin 	u64 p = 1;
625*4298ddadSSasha Levin 
626*4298ddadSSasha Levin 	if (write(pause_event, &p, sizeof(p)) < 0)
627*4298ddadSSasha Levin 		die("Failed notifying of paused VCPU.");
628*4298ddadSSasha Levin 
629*4298ddadSSasha Levin 	mutex_lock(&pause_lock);
630*4298ddadSSasha Levin 	mutex_unlock(&pause_lock);
631*4298ddadSSasha Levin }
632