1ae1fae34SPekka Enberg #include "kvm/kvm.h" 2ae1fae34SPekka Enberg 3c78b8713SAsias He #include "kvm/cpufeature.h" 4ce79f1caSPekka Enberg #include "kvm/interrupt.h" 5b3594ec7SCyrill Gorcunov #include "kvm/boot-protocol.h" 6f3150089SPekka Enberg #include "kvm/util.h" 70c7c14a7SCyrill Gorcunov #include "kvm/mptable.h" 8eda03319SPekka Enberg 96c7d8514SPekka Enberg #include <linux/kvm.h> 10f5ab5f67SPekka Enberg 11f5ab5f67SPekka Enberg #include <asm/bootparam.h> 12f5ab5f67SPekka Enberg 13ae1fae34SPekka Enberg #include <sys/ioctl.h> 141f9cff23SPekka Enberg #include <sys/mman.h> 15ce79f1caSPekka Enberg #include <sys/stat.h> 162da26a59SPekka Enberg #include <stdbool.h> 176e5e8b8dSPekka Enberg #include <assert.h> 1806e41eeaSPekka Enberg #include <limits.h> 19ce79f1caSPekka Enberg #include <signal.h> 20f5ab5f67SPekka Enberg #include <stdarg.h> 21b8f6afcdSPekka Enberg #include <stdlib.h> 22f5ab5f67SPekka Enberg #include <string.h> 230d1f17ecSPekka Enberg #include <unistd.h> 241f9cff23SPekka Enberg #include <stdio.h> 25b8f6afcdSPekka Enberg #include <fcntl.h> 26ce79f1caSPekka Enberg #include <time.h> 27b8f6afcdSPekka Enberg 28ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason 290d1f17ecSPekka Enberg 30ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = { 31ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN), 32ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION), 33ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO), 34ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL), 35ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG), 36ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT), 37ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO), 38ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN), 39ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN), 40ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY), 41ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR), 42ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR), 43ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS), 44ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC), 45ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET), 46ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR), 47ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI), 48ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR), 499b1fb1c3SPekka Enberg }; 509b1fb1c3SPekka Enberg 5155e19624SCyrill Gorcunov #define DEFINE_KVM_EXT(ext) \ 5255e19624SCyrill Gorcunov .name = #ext, \ 5355e19624SCyrill Gorcunov .code = ext 5455e19624SCyrill Gorcunov 5555e19624SCyrill Gorcunov struct { 5655e19624SCyrill Gorcunov const char *name; 5755e19624SCyrill Gorcunov int code; 5855e19624SCyrill Gorcunov } kvm_req_ext[] = { 5955e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) }, 6055e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) }, 6155e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_PIT2) }, 6255e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) }, 6355e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) }, 6455e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) }, 657c0ec28fSCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_HLT) }, 6655e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) }, 67d38ad31aSCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) }, 6855e19624SCyrill Gorcunov }; 6955e19624SCyrill Gorcunov 7043835ac9SSasha Levin static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension) 71b8f6afcdSPekka Enberg { 7228fa19c0SPekka Enberg int ret; 73b8f6afcdSPekka Enberg 7443835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension); 754076b041SPekka Enberg if (ret < 0) 764076b041SPekka Enberg return false; 774076b041SPekka Enberg 784076b041SPekka Enberg return ret; 794076b041SPekka Enberg } 804076b041SPekka Enberg 8143835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm) 8255e19624SCyrill Gorcunov { 8355e19624SCyrill Gorcunov unsigned int i; 8455e19624SCyrill Gorcunov 8555e19624SCyrill Gorcunov for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) { 8643835ac9SSasha Levin if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) { 8755e19624SCyrill Gorcunov error("Unsuppored KVM extension detected: %s", 8855e19624SCyrill Gorcunov kvm_req_ext[i].name); 8955e19624SCyrill Gorcunov return (int)-i; 9055e19624SCyrill Gorcunov } 9155e19624SCyrill Gorcunov } 9255e19624SCyrill Gorcunov 9355e19624SCyrill Gorcunov return 0; 9455e19624SCyrill Gorcunov } 9555e19624SCyrill Gorcunov 964076b041SPekka Enberg static struct kvm *kvm__new(void) 974076b041SPekka Enberg { 9843835ac9SSasha Levin struct kvm *kvm = calloc(1, sizeof *kvm); 994076b041SPekka Enberg 10043835ac9SSasha Levin if (!kvm) 1014076b041SPekka Enberg die("out of memory"); 1024076b041SPekka Enberg 10343835ac9SSasha Levin return kvm; 1044076b041SPekka Enberg } 1054076b041SPekka Enberg 10643835ac9SSasha Levin void kvm__delete(struct kvm *kvm) 1079ef4c68eSPekka Enberg { 10843835ac9SSasha Levin kvm__stop_timer(kvm); 109fbfe68b7SSasha Levin 11043835ac9SSasha Levin munmap(kvm->ram_start, kvm->ram_size); 11143835ac9SSasha Levin free(kvm); 1129ef4c68eSPekka Enberg } 1139ef4c68eSPekka Enberg 114c78b8713SAsias He static bool kvm__cpu_supports_vm(void) 115c78b8713SAsias He { 116c78b8713SAsias He struct cpuid_regs regs; 1173fdf659dSSasha Levin u32 eax_base; 118831fbf23SPekka Enberg int feature; 119c78b8713SAsias He 120c78b8713SAsias He regs = (struct cpuid_regs) { 121831fbf23SPekka Enberg .eax = 0x00, 122c78b8713SAsias He }; 123c78b8713SAsias He host_cpuid(®s); 124c78b8713SAsias He 125ae87afbfSCyrill Gorcunov switch (regs.ebx) { 126ae87afbfSCyrill Gorcunov case CPUID_VENDOR_INTEL_1: 127831fbf23SPekka Enberg eax_base = 0x00; 128831fbf23SPekka Enberg feature = KVM__X86_FEATURE_VMX; 129ae87afbfSCyrill Gorcunov break; 13034649df9SPekka Enberg 131ae87afbfSCyrill Gorcunov case CPUID_VENDOR_AMD_1: 132831fbf23SPekka Enberg eax_base = 0x80000000; 133831fbf23SPekka Enberg feature = KVM__X86_FEATURE_SVM; 134ae87afbfSCyrill Gorcunov break; 13534649df9SPekka Enberg 13634649df9SPekka Enberg default: 13734649df9SPekka Enberg return false; 138ae87afbfSCyrill Gorcunov } 139ae87afbfSCyrill Gorcunov 140831fbf23SPekka Enberg regs = (struct cpuid_regs) { 141831fbf23SPekka Enberg .eax = eax_base, 142831fbf23SPekka Enberg }; 143831fbf23SPekka Enberg host_cpuid(®s); 144831fbf23SPekka Enberg 145831fbf23SPekka Enberg if (regs.eax < eax_base + 0x01) 146831fbf23SPekka Enberg return false; 147831fbf23SPekka Enberg 148831fbf23SPekka Enberg regs = (struct cpuid_regs) { 149831fbf23SPekka Enberg .eax = eax_base + 0x01 150831fbf23SPekka Enberg }; 151831fbf23SPekka Enberg host_cpuid(®s); 152831fbf23SPekka Enberg 153831fbf23SPekka Enberg return regs.ecx & (1 << feature); 154c78b8713SAsias He } 155c78b8713SAsias He 156874467f8SSasha Levin static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr) 1574076b041SPekka Enberg { 1582b0e3342SPekka Enberg struct kvm_userspace_memory_region mem; 159839051d9SSasha Levin int ret; 160839051d9SSasha Levin 161839051d9SSasha Levin mem = (struct kvm_userspace_memory_region) { 162874467f8SSasha Levin .slot = slot, 163874467f8SSasha Levin .guest_phys_addr = guest_phys, 164874467f8SSasha Levin .memory_size = size, 165*c4acb611SIngo Molnar .userspace_addr = (unsigned long)userspace_addr, 166839051d9SSasha Levin }; 167839051d9SSasha Levin 168874467f8SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem); 169839051d9SSasha Levin if (ret < 0) 170839051d9SSasha Levin die_perror("KVM_SET_USER_MEMORY_REGION ioctl"); 171839051d9SSasha Levin } 172839051d9SSasha Levin 173874467f8SSasha Levin /* 174874467f8SSasha Levin * Allocating RAM size bigger than 4GB requires us to leave a gap 175874467f8SSasha Levin * in the RAM which is used for PCI MMIO, hotplug, and unconfigured 176874467f8SSasha Levin * devices (see documentation of e820_setup_gap() for details). 177874467f8SSasha Levin * 178874467f8SSasha Levin * If we're required to initialize RAM bigger than 4GB, we will create 179874467f8SSasha Levin * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space. 180874467f8SSasha Levin */ 181874467f8SSasha Levin 18243835ac9SSasha Levin void kvm__init_ram(struct kvm *kvm) 183874467f8SSasha Levin { 184874467f8SSasha Levin u64 phys_start, phys_size; 185874467f8SSasha Levin void *host_mem; 186874467f8SSasha Levin 18743835ac9SSasha Levin if (kvm->ram_size < KVM_32BIT_GAP_START) { 188874467f8SSasha Levin /* Use a single block of RAM for 32bit RAM */ 189874467f8SSasha Levin 190874467f8SSasha Levin phys_start = 0; 19143835ac9SSasha Levin phys_size = kvm->ram_size; 19243835ac9SSasha Levin host_mem = kvm->ram_start; 193874467f8SSasha Levin 19443835ac9SSasha Levin kvm_register_mem_slot(kvm, 0, 0, kvm->ram_size, kvm->ram_start); 195874467f8SSasha Levin } else { 196874467f8SSasha Levin /* First RAM range from zero to the PCI gap: */ 197874467f8SSasha Levin 198874467f8SSasha Levin phys_start = 0; 199874467f8SSasha Levin phys_size = KVM_32BIT_GAP_START; 20043835ac9SSasha Levin host_mem = kvm->ram_start; 201874467f8SSasha Levin 20243835ac9SSasha Levin kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem); 203874467f8SSasha Levin 204874467f8SSasha Levin /* Second RAM range from 4GB to the end of RAM: */ 205874467f8SSasha Levin 206874467f8SSasha Levin phys_start = 0x100000000ULL; 20743835ac9SSasha Levin phys_size = kvm->ram_size - phys_size; 20843835ac9SSasha Levin host_mem = kvm->ram_start + phys_start; 209874467f8SSasha Levin 21043835ac9SSasha Levin kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem); 211874467f8SSasha Levin } 212874467f8SSasha Levin } 213874467f8SSasha Levin 21443835ac9SSasha Levin int kvm__max_cpus(struct kvm *kvm) 215384922b3SPekka Enberg { 216384922b3SPekka Enberg int ret; 217384922b3SPekka Enberg 21843835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); 219384922b3SPekka Enberg if (ret < 0) 220384922b3SPekka Enberg die_perror("KVM_CAP_NR_VCPUS"); 221384922b3SPekka Enberg 222384922b3SPekka Enberg return ret; 223384922b3SPekka Enberg } 224384922b3SPekka Enberg 225839051d9SSasha Levin struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size) 226839051d9SSasha Levin { 2279687927dSAsias He struct kvm_pit_config pit_config = { .flags = 0, }; 22843835ac9SSasha Levin struct kvm *kvm; 2294076b041SPekka Enberg int ret; 2304076b041SPekka Enberg 231c78b8713SAsias He if (!kvm__cpu_supports_vm()) 232c78b8713SAsias He die("Your CPU does not support hardware virtualization"); 233c78b8713SAsias He 23443835ac9SSasha Levin kvm = kvm__new(); 2354076b041SPekka Enberg 23643835ac9SSasha Levin kvm->sys_fd = open(kvm_dev, O_RDWR); 23743835ac9SSasha Levin if (kvm->sys_fd < 0) { 2386d7c36ceSPekka Enberg if (errno == ENOENT) 239e907b83fSPekka Enberg die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev); 240f8334800SIngo Molnar if (errno == ENODEV) 241f8334800SIngo Molnar die("'%s' KVM driver not available.\n # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev); 2426d7c36ceSPekka Enberg 243f8334800SIngo Molnar fprintf(stderr, " Fatal, could not open %s: ", kvm_dev); 244f8334800SIngo Molnar perror(NULL); 245f8334800SIngo Molnar exit(1); 2466d7c36ceSPekka Enberg } 247b8f6afcdSPekka Enberg 24843835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0); 2496c7d8514SPekka Enberg if (ret != KVM_API_VERSION) 250f5ab5f67SPekka Enberg die_perror("KVM_API_VERSION ioctl"); 2516c7d8514SPekka Enberg 25243835ac9SSasha Levin kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0); 25343835ac9SSasha Levin if (kvm->vm_fd < 0) 254f5ab5f67SPekka Enberg die_perror("KVM_CREATE_VM ioctl"); 25528fa19c0SPekka Enberg 25643835ac9SSasha Levin if (kvm__check_extensions(kvm)) 25755e19624SCyrill Gorcunov die("A required KVM extention is not supported by OS"); 2589687927dSAsias He 25943835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000); 2609687927dSAsias He if (ret < 0) 2619687927dSAsias He die_perror("KVM_SET_TSS_ADDR ioctl"); 2629687927dSAsias He 26343835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config); 2649687927dSAsias He if (ret < 0) 2659687927dSAsias He die_perror("KVM_CREATE_PIT2 ioctl"); 2669687927dSAsias He 26743835ac9SSasha Levin kvm->ram_size = ram_size; 2680d1f17ecSPekka Enberg 26943835ac9SSasha Levin if (kvm->ram_size < KVM_32BIT_GAP_START) { 27037c34ca8SSasha Levin kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0); 271874467f8SSasha Levin } else { 27237c34ca8SSasha Levin kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0); 27343835ac9SSasha Levin if (kvm->ram_start != MAP_FAILED) { 274874467f8SSasha Levin /* 275874467f8SSasha Levin * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that 276874467f8SSasha Levin * if we accidently write to it, we will know. 277874467f8SSasha Levin */ 27843835ac9SSasha Levin mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE); 279874467f8SSasha Levin } 280874467f8SSasha Levin } 28143835ac9SSasha Levin if (kvm->ram_start == MAP_FAILED) 2820d1f17ecSPekka Enberg die("out of memory"); 2830d1f17ecSPekka Enberg 28443835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP); 285895c2fefSPekka Enberg if (ret < 0) 2869687927dSAsias He die_perror("KVM_CREATE_IRQCHIP ioctl"); 2879687927dSAsias He 28843835ac9SSasha Levin return kvm; 2894076b041SPekka Enberg } 2904076b041SPekka Enberg 2915f6772b8SCyrill Gorcunov #define BOOT_LOADER_SELECTOR 0x1000 292b08e9ec4SPekka Enberg #define BOOT_LOADER_IP 0x0000 293dbdb74c2SPekka Enberg #define BOOT_LOADER_SP 0x8000 2942dd4a4edSCyrill Gorcunov #define BOOT_CMDLINE_OFFSET 0x20000 2952dd4a4edSCyrill Gorcunov 2969a4ecdc5SPekka Enberg #define BOOT_PROTOCOL_REQUIRED 0x206 297a43f6460SCyrill Gorcunov #define LOAD_HIGH 0x01 298009b0758SPekka Enberg 29943835ac9SSasha Levin static int load_flat_binary(struct kvm *kvm, int fd) 300009b0758SPekka Enberg { 301009b0758SPekka Enberg void *p; 302009b0758SPekka Enberg int nr; 303009b0758SPekka Enberg 304009b0758SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 305009b0758SPekka Enberg die_perror("lseek"); 306009b0758SPekka Enberg 30743835ac9SSasha Levin p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP); 308009b0758SPekka Enberg 309009b0758SPekka Enberg while ((nr = read(fd, p, 65536)) > 0) 310009b0758SPekka Enberg p += nr; 311009b0758SPekka Enberg 31243835ac9SSasha Levin kvm->boot_selector = BOOT_LOADER_SELECTOR; 31343835ac9SSasha Levin kvm->boot_ip = BOOT_LOADER_IP; 31443835ac9SSasha Levin kvm->boot_sp = BOOT_LOADER_SP; 315edc8a14dSPekka Enberg 3167fb218bdSPekka Enberg return true; 317009b0758SPekka Enberg } 318009b0758SPekka Enberg 319ae1fae34SPekka Enberg static const char *BZIMAGE_MAGIC = "HdrS"; 320ae1fae34SPekka Enberg 32143835ac9SSasha Levin static bool load_bzimage(struct kvm *kvm, int fd_kernel, 3222065a6f7SCyrill Gorcunov int fd_initrd, const char *kernel_cmdline) 323ae1fae34SPekka Enberg { 324b9271160SPekka Enberg struct boot_params *kern_boot; 3254b62331fSPekka Enberg unsigned long setup_sects; 326b9271160SPekka Enberg struct boot_params boot; 3272dd4a4edSCyrill Gorcunov size_t cmdline_size; 3287fb218bdSPekka Enberg ssize_t setup_size; 32922489bb0SCyrill Gorcunov void *p; 330ae1fae34SPekka Enberg int nr; 331ae1fae34SPekka Enberg 3325d67eaf6SPekka Enberg /* 3335d67eaf6SPekka Enberg * See Documentation/x86/boot.txt for details no bzImage on-disk and 3345d67eaf6SPekka Enberg * memory layout. 3355d67eaf6SPekka Enberg */ 3365d67eaf6SPekka Enberg 3372065a6f7SCyrill Gorcunov if (lseek(fd_kernel, 0, SEEK_SET) < 0) 338009b0758SPekka Enberg die_perror("lseek"); 339009b0758SPekka Enberg 3400b62d2bbSPekka Enberg if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot)) 3412346d461SPekka Enberg return false; 342ae1fae34SPekka Enberg 3430b62d2bbSPekka Enberg if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC))) 3447fb218bdSPekka Enberg return false; 345ae1fae34SPekka Enberg 3460ea58e5bSPekka Enberg if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) 3470b62d2bbSPekka Enberg die("Too old kernel"); 348ad681038SCyrill Gorcunov 3492065a6f7SCyrill Gorcunov if (lseek(fd_kernel, 0, SEEK_SET) < 0) 350e93ab78aSPekka Enberg die_perror("lseek"); 351e93ab78aSPekka Enberg 3524cf542bbSCyrill Gorcunov if (!boot.hdr.setup_sects) 3534cf542bbSCyrill Gorcunov boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS; 35410943d14SPekka Enberg setup_sects = boot.hdr.setup_sects + 1; 35510943d14SPekka Enberg 35654d4a626SPekka Enberg setup_size = setup_sects << 9; 35743835ac9SSasha Levin p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP); 358ae1fae34SPekka Enberg 3592065a6f7SCyrill Gorcunov /* copy setup.bin to mem*/ 3602065a6f7SCyrill Gorcunov if (read(fd_kernel, p, setup_size) != setup_size) 3617fb218bdSPekka Enberg die_perror("read"); 3627fb218bdSPekka Enberg 3632065a6f7SCyrill Gorcunov /* copy vmlinux.bin to BZ_KERNEL_START*/ 36443835ac9SSasha Levin p = guest_flat_to_host(kvm, BZ_KERNEL_START); 365ae1fae34SPekka Enberg 3662065a6f7SCyrill Gorcunov while ((nr = read(fd_kernel, p, 65536)) > 0) 367ae1fae34SPekka Enberg p += nr; 368ae1fae34SPekka Enberg 36943835ac9SSasha Levin p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET); 370debcfac0SCyrill Gorcunov if (kernel_cmdline) { 371debcfac0SCyrill Gorcunov cmdline_size = strlen(kernel_cmdline) + 1; 372debcfac0SCyrill Gorcunov if (cmdline_size > boot.hdr.cmdline_size) 373debcfac0SCyrill Gorcunov cmdline_size = boot.hdr.cmdline_size; 374ad681038SCyrill Gorcunov 3752dd4a4edSCyrill Gorcunov memset(p, 0, boot.hdr.cmdline_size); 3762dd4a4edSCyrill Gorcunov memcpy(p, kernel_cmdline, cmdline_size - 1); 377debcfac0SCyrill Gorcunov } 378debcfac0SCyrill Gorcunov 37943835ac9SSasha Levin kern_boot = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00); 380a43f6460SCyrill Gorcunov 381b9271160SPekka Enberg kern_boot->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET; 382b9271160SPekka Enberg kern_boot->hdr.type_of_loader = 0xff; 383b9271160SPekka Enberg kern_boot->hdr.heap_end_ptr = 0xfe00; 384b9271160SPekka Enberg kern_boot->hdr.loadflags |= CAN_USE_HEAP; 385a43f6460SCyrill Gorcunov 3862065a6f7SCyrill Gorcunov /* 3872065a6f7SCyrill Gorcunov * Read initrd image into guest memory 3882065a6f7SCyrill Gorcunov */ 3892065a6f7SCyrill Gorcunov if (fd_initrd >= 0) { 3902065a6f7SCyrill Gorcunov struct stat initrd_stat; 3912065a6f7SCyrill Gorcunov unsigned long addr; 3922065a6f7SCyrill Gorcunov 3932065a6f7SCyrill Gorcunov if (fstat(fd_initrd, &initrd_stat)) 3942065a6f7SCyrill Gorcunov die_perror("fstat"); 3952065a6f7SCyrill Gorcunov 3962065a6f7SCyrill Gorcunov addr = boot.hdr.initrd_addr_max & ~0xfffff; 3972065a6f7SCyrill Gorcunov for (;;) { 3982065a6f7SCyrill Gorcunov if (addr < BZ_KERNEL_START) 3992065a6f7SCyrill Gorcunov die("Not enough memory for initrd"); 40043835ac9SSasha Levin else if (addr < (kvm->ram_size - initrd_stat.st_size)) 4012065a6f7SCyrill Gorcunov break; 4022065a6f7SCyrill Gorcunov addr -= 0x100000; 4032065a6f7SCyrill Gorcunov } 4042065a6f7SCyrill Gorcunov 40543835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 4062065a6f7SCyrill Gorcunov nr = read(fd_initrd, p, initrd_stat.st_size); 4072065a6f7SCyrill Gorcunov if (nr != initrd_stat.st_size) 4082065a6f7SCyrill Gorcunov die("Failed to read initrd"); 4092065a6f7SCyrill Gorcunov 4102065a6f7SCyrill Gorcunov kern_boot->hdr.ramdisk_image = addr; 4112065a6f7SCyrill Gorcunov kern_boot->hdr.ramdisk_size = initrd_stat.st_size; 4122065a6f7SCyrill Gorcunov } 4132065a6f7SCyrill Gorcunov 41443835ac9SSasha Levin kvm->boot_selector = BOOT_LOADER_SELECTOR; 415edc8a14dSPekka Enberg /* 416edc8a14dSPekka Enberg * The real-mode setup code starts at offset 0x200 of a bzImage. See 417edc8a14dSPekka Enberg * Documentation/x86/boot.txt for details. 418edc8a14dSPekka Enberg */ 41943835ac9SSasha Levin kvm->boot_ip = BOOT_LOADER_IP + 0x200; 42043835ac9SSasha Levin kvm->boot_sp = BOOT_LOADER_SP; 421edc8a14dSPekka Enberg 4227fb218bdSPekka Enberg return true; 423ae1fae34SPekka Enberg } 424ae1fae34SPekka Enberg 4256d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename, 4262065a6f7SCyrill Gorcunov const char *initrd_filename, const char *kernel_cmdline) 427ae1fae34SPekka Enberg { 4287fb218bdSPekka Enberg bool ret; 4292065a6f7SCyrill Gorcunov int fd_kernel = -1, fd_initrd = -1; 430ae1fae34SPekka Enberg 4312065a6f7SCyrill Gorcunov fd_kernel = open(kernel_filename, O_RDONLY); 4322065a6f7SCyrill Gorcunov if (fd_kernel < 0) 4330b62d2bbSPekka Enberg die("Unable to open kernel %s", kernel_filename); 434ae1fae34SPekka Enberg 4352065a6f7SCyrill Gorcunov if (initrd_filename) { 4362065a6f7SCyrill Gorcunov fd_initrd = open(initrd_filename, O_RDONLY); 4372065a6f7SCyrill Gorcunov if (fd_initrd < 0) 4380b62d2bbSPekka Enberg die("Unable to open initrd %s", initrd_filename); 4392065a6f7SCyrill Gorcunov } 4402065a6f7SCyrill Gorcunov 4412065a6f7SCyrill Gorcunov ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline); 44228972750SCyrill Gorcunov 44328972750SCyrill Gorcunov if (initrd_filename) 44428972750SCyrill Gorcunov close(fd_initrd); 44528972750SCyrill Gorcunov 446009b0758SPekka Enberg if (ret) 447009b0758SPekka Enberg goto found_kernel; 448ae1fae34SPekka Enberg 4490b62d2bbSPekka Enberg warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename); 4500b62d2bbSPekka Enberg 4512065a6f7SCyrill Gorcunov ret = load_flat_binary(kvm, fd_kernel); 452009b0758SPekka Enberg if (ret) 453009b0758SPekka Enberg goto found_kernel; 454009b0758SPekka Enberg 4555a6ac675SSasha Levin close(fd_kernel); 4565a6ac675SSasha Levin 457009b0758SPekka Enberg die("%s is not a valid bzImage or flat binary", kernel_filename); 458009b0758SPekka Enberg 459009b0758SPekka Enberg found_kernel: 4605a6ac675SSasha Levin close(fd_kernel); 4615a6ac675SSasha Levin 462ae1fae34SPekka Enberg return ret; 463ae1fae34SPekka Enberg } 464ae1fae34SPekka Enberg 465b3594ec7SCyrill Gorcunov /** 466b3594ec7SCyrill Gorcunov * kvm__setup_bios - inject BIOS into guest system memory 46743835ac9SSasha Levin * @kvm - guest system descriptor 468b3594ec7SCyrill Gorcunov * 469b3594ec7SCyrill Gorcunov * This function is a main routine where we poke guest memory 470b3594ec7SCyrill Gorcunov * and install BIOS there. 471b3594ec7SCyrill Gorcunov */ 47243835ac9SSasha Levin void kvm__setup_bios(struct kvm *kvm) 4732f3976eeSPekka Enberg { 474b3594ec7SCyrill Gorcunov /* standart minimal configuration */ 47543835ac9SSasha Levin setup_bios(kvm); 4762f3976eeSPekka Enberg 477b3594ec7SCyrill Gorcunov /* FIXME: SMP, ACPI and friends here */ 4780c7c14a7SCyrill Gorcunov 4790c7c14a7SCyrill Gorcunov /* MP table */ 48043835ac9SSasha Levin mptable_setup(kvm, kvm->nrcpus); 4812f3976eeSPekka Enberg } 4822f3976eeSPekka Enberg 483ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000 /* 1 msec */ 484ce79f1caSPekka Enberg 485ce79f1caSPekka Enberg /* 486ce79f1caSPekka Enberg * This function sets up a timer that's used to inject interrupts from the 487ce79f1caSPekka Enberg * userspace hypervisor into the guest at periodical intervals. Please note 488ce79f1caSPekka Enberg * that clock interrupt, for example, is not handled here. 489ce79f1caSPekka Enberg */ 49043835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm) 491ce79f1caSPekka Enberg { 492ce79f1caSPekka Enberg struct itimerspec its; 493ce79f1caSPekka Enberg struct sigevent sev; 494ce79f1caSPekka Enberg 495ce79f1caSPekka Enberg memset(&sev, 0, sizeof(struct sigevent)); 496ce79f1caSPekka Enberg sev.sigev_value.sival_int = 0; 497ce79f1caSPekka Enberg sev.sigev_notify = SIGEV_SIGNAL; 498ce79f1caSPekka Enberg sev.sigev_signo = SIGALRM; 499ce79f1caSPekka Enberg 50043835ac9SSasha Levin if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0) 501ce79f1caSPekka Enberg die("timer_create()"); 502ce79f1caSPekka Enberg 503ce79f1caSPekka Enberg its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000; 504ce79f1caSPekka Enberg its.it_value.tv_nsec = TIMER_INTERVAL_NS % 1000000000; 505ce79f1caSPekka Enberg its.it_interval.tv_sec = its.it_value.tv_sec; 506ce79f1caSPekka Enberg its.it_interval.tv_nsec = its.it_value.tv_nsec; 507ce79f1caSPekka Enberg 50843835ac9SSasha Levin if (timer_settime(kvm->timerid, 0, &its, NULL) < 0) 509ce79f1caSPekka Enberg die("timer_settime()"); 510ce79f1caSPekka Enberg } 511ce79f1caSPekka Enberg 51243835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm) 513fbfe68b7SSasha Levin { 51443835ac9SSasha Levin if (kvm->timerid) 51543835ac9SSasha Levin if (timer_delete(kvm->timerid) < 0) 516fbfe68b7SSasha Levin die("timer_delete()"); 517fbfe68b7SSasha Levin 51843835ac9SSasha Levin kvm->timerid = 0; 519fbfe68b7SSasha Levin } 520fbfe68b7SSasha Levin 52143835ac9SSasha Levin void kvm__irq_line(struct kvm *kvm, int irq, int level) 5228b1ff07eSPekka Enberg { 5238b1ff07eSPekka Enberg struct kvm_irq_level irq_level; 5248b1ff07eSPekka Enberg 5258b1ff07eSPekka Enberg irq_level = (struct kvm_irq_level) { 5268b1ff07eSPekka Enberg { 5278b1ff07eSPekka Enberg .irq = irq, 5288b1ff07eSPekka Enberg }, 5298b1ff07eSPekka Enberg .level = level, 5308b1ff07eSPekka Enberg }; 5318b1ff07eSPekka Enberg 53243835ac9SSasha Levin if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0) 5338b1ff07eSPekka Enberg die_perror("KVM_IRQ_LINE failed"); 5348b1ff07eSPekka Enberg } 5358b1ff07eSPekka Enberg 53643835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size) 537090f898eSCyrill Gorcunov { 538090f898eSCyrill Gorcunov unsigned char *p; 539090f898eSCyrill Gorcunov unsigned long n; 540090f898eSCyrill Gorcunov 541090f898eSCyrill Gorcunov size &= ~7; /* mod 8 */ 542090f898eSCyrill Gorcunov if (!size) 543090f898eSCyrill Gorcunov return; 544090f898eSCyrill Gorcunov 54543835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 546090f898eSCyrill Gorcunov 54748cf3877SPekka Enberg for (n = 0; n < size; n += 8) { 54843835ac9SSasha Levin if (!host_ptr_in_ram(kvm, p + n)) 54948cf3877SPekka Enberg break; 55048cf3877SPekka Enberg 551090f898eSCyrill Gorcunov printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n", 552090f898eSCyrill Gorcunov addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3], 553090f898eSCyrill Gorcunov p[n + 4], p[n + 5], p[n + 6], p[n + 7]); 554090f898eSCyrill Gorcunov } 55548cf3877SPekka Enberg } 556