1ae1fae34SPekka Enberg #include "kvm/kvm.h" 2ae1fae34SPekka Enberg 3b3594ec7SCyrill Gorcunov #include "kvm/boot-protocol.h" 472811558SPekka Enberg #include "kvm/cpufeature.h" 572811558SPekka Enberg #include "kvm/read-write.h" 672811558SPekka Enberg #include "kvm/interrupt.h" 70c7c14a7SCyrill Gorcunov #include "kvm/mptable.h" 872811558SPekka Enberg #include "kvm/util.h" 94298ddadSSasha Levin #include "kvm/mutex.h" 104298ddadSSasha Levin #include "kvm/kvm-cpu.h" 11eda03319SPekka Enberg 126c7d8514SPekka Enberg #include <linux/kvm.h> 13f5ab5f67SPekka Enberg 14f5ab5f67SPekka Enberg #include <asm/bootparam.h> 15f5ab5f67SPekka Enberg 16ae1fae34SPekka Enberg #include <sys/ioctl.h> 171f9cff23SPekka Enberg #include <sys/mman.h> 18ce79f1caSPekka Enberg #include <sys/stat.h> 192da26a59SPekka Enberg #include <stdbool.h> 206e5e8b8dSPekka Enberg #include <assert.h> 2106e41eeaSPekka Enberg #include <limits.h> 22ce79f1caSPekka Enberg #include <signal.h> 23f5ab5f67SPekka Enberg #include <stdarg.h> 24b8f6afcdSPekka Enberg #include <stdlib.h> 25f5ab5f67SPekka Enberg #include <string.h> 260d1f17ecSPekka Enberg #include <unistd.h> 271f9cff23SPekka Enberg #include <stdio.h> 28b8f6afcdSPekka Enberg #include <fcntl.h> 29ce79f1caSPekka Enberg #include <time.h> 304298ddadSSasha Levin #include <sys/eventfd.h> 31c7828731SSasha Levin #include <asm/unistd.h> 32*63bc8503SSasha Levin #include <dirent.h> 33b8f6afcdSPekka Enberg 34ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason 35c71efd96SSasha Levin #define KVM_PID_FILE_PATH "/.kvm-tools/" 36c71efd96SSasha Levin #define HOME_DIR getenv("HOME") 370d1f17ecSPekka Enberg 38ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = { 39ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN), 40ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION), 41ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO), 42ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL), 43ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG), 44ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT), 45ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO), 46ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN), 47ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN), 48ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY), 49ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR), 50ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR), 51ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS), 52ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC), 53ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET), 54ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR), 55ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI), 56ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR), 579b1fb1c3SPekka Enberg }; 589b1fb1c3SPekka Enberg 5955e19624SCyrill Gorcunov #define DEFINE_KVM_EXT(ext) \ 6055e19624SCyrill Gorcunov .name = #ext, \ 6155e19624SCyrill Gorcunov .code = ext 6255e19624SCyrill Gorcunov 6355e19624SCyrill Gorcunov struct { 6455e19624SCyrill Gorcunov const char *name; 6555e19624SCyrill Gorcunov int code; 6655e19624SCyrill Gorcunov } kvm_req_ext[] = { 6755e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) }, 6855e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) }, 6955e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_PIT2) }, 7055e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) }, 7155e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) }, 7255e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) }, 737c0ec28fSCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_HLT) }, 7455e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) }, 75d38ad31aSCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) }, 7655e19624SCyrill Gorcunov }; 7755e19624SCyrill Gorcunov 784298ddadSSasha Levin extern struct kvm *kvm; 794298ddadSSasha Levin extern struct kvm_cpu *kvm_cpus[KVM_NR_CPUS]; 804298ddadSSasha Levin static int pause_event; 814298ddadSSasha Levin static DEFINE_MUTEX(pause_lock); 824298ddadSSasha Levin 8343835ac9SSasha Levin static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension) 84b8f6afcdSPekka Enberg { 8528fa19c0SPekka Enberg int ret; 86b8f6afcdSPekka Enberg 8743835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension); 884076b041SPekka Enberg if (ret < 0) 894076b041SPekka Enberg return false; 904076b041SPekka Enberg 914076b041SPekka Enberg return ret; 924076b041SPekka Enberg } 934076b041SPekka Enberg 9443835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm) 9555e19624SCyrill Gorcunov { 9655e19624SCyrill Gorcunov unsigned int i; 9755e19624SCyrill Gorcunov 9855e19624SCyrill Gorcunov for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) { 9943835ac9SSasha Levin if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) { 1004542f276SCyrill Gorcunov pr_error("Unsuppored KVM extension detected: %s", 10155e19624SCyrill Gorcunov kvm_req_ext[i].name); 10255e19624SCyrill Gorcunov return (int)-i; 10355e19624SCyrill Gorcunov } 10455e19624SCyrill Gorcunov } 10555e19624SCyrill Gorcunov 10655e19624SCyrill Gorcunov return 0; 10755e19624SCyrill Gorcunov } 10855e19624SCyrill Gorcunov 1094076b041SPekka Enberg static struct kvm *kvm__new(void) 1104076b041SPekka Enberg { 11143835ac9SSasha Levin struct kvm *kvm = calloc(1, sizeof *kvm); 1124076b041SPekka Enberg 11343835ac9SSasha Levin if (!kvm) 1144076b041SPekka Enberg die("out of memory"); 1154076b041SPekka Enberg 11643835ac9SSasha Levin return kvm; 1174076b041SPekka Enberg } 1184076b041SPekka Enberg 1195358b0e6SSasha Levin static void kvm__create_pidfile(struct kvm *kvm) 1205358b0e6SSasha Levin { 1215358b0e6SSasha Levin int fd; 1225358b0e6SSasha Levin char full_name[PATH_MAX], pid[10]; 1235358b0e6SSasha Levin 1245358b0e6SSasha Levin if (!kvm->name) 1255358b0e6SSasha Levin return; 1265358b0e6SSasha Levin 127c71efd96SSasha Levin sprintf(full_name, "%s/%s", HOME_DIR, KVM_PID_FILE_PATH); 128c71efd96SSasha Levin mkdir(full_name, 0777); 129c71efd96SSasha Levin sprintf(full_name, "%s/%s/%s.pid", HOME_DIR, KVM_PID_FILE_PATH, kvm->name); 1305358b0e6SSasha Levin fd = open(full_name, O_CREAT | O_WRONLY, 0666); 1315358b0e6SSasha Levin sprintf(pid, "%u\n", getpid()); 1325358b0e6SSasha Levin if (write(fd, pid, strlen(pid)) <= 0) 1335358b0e6SSasha Levin die("Failed creating PID file"); 1345358b0e6SSasha Levin close(fd); 1355358b0e6SSasha Levin } 1365358b0e6SSasha Levin 1375358b0e6SSasha Levin static void kvm__remove_pidfile(struct kvm *kvm) 1385358b0e6SSasha Levin { 1395358b0e6SSasha Levin char full_name[PATH_MAX]; 1405358b0e6SSasha Levin 1415358b0e6SSasha Levin if (!kvm->name) 1425358b0e6SSasha Levin return; 1435358b0e6SSasha Levin 144c71efd96SSasha Levin sprintf(full_name, "%s/%s/%s.pid", HOME_DIR, KVM_PID_FILE_PATH, kvm->name); 1455358b0e6SSasha Levin unlink(full_name); 1465358b0e6SSasha Levin } 1475358b0e6SSasha Levin 1485358b0e6SSasha Levin int kvm__get_pid_by_instance(const char *name) 1495358b0e6SSasha Levin { 1505358b0e6SSasha Levin int fd, pid; 1515358b0e6SSasha Levin char pid_str[10], pid_file[PATH_MAX]; 1525358b0e6SSasha Levin 153c71efd96SSasha Levin sprintf(pid_file, "%s/%s/%s.pid", HOME_DIR, KVM_PID_FILE_PATH, name); 1545358b0e6SSasha Levin fd = open(pid_file, O_RDONLY); 1555358b0e6SSasha Levin if (fd < 0) 1565358b0e6SSasha Levin return -1; 1575358b0e6SSasha Levin 1585358b0e6SSasha Levin if (read(fd, pid_str, 10) == 0) 1595358b0e6SSasha Levin return -1; 1605358b0e6SSasha Levin 1615358b0e6SSasha Levin pid = atoi(pid_str); 1625358b0e6SSasha Levin if (pid < 0) 1635358b0e6SSasha Levin return -1; 1645358b0e6SSasha Levin 1655358b0e6SSasha Levin return pid; 1665358b0e6SSasha Levin } 1675358b0e6SSasha Levin 168*63bc8503SSasha Levin int kvm__enumerate_instances(void (*callback)(const char *name, int pid)) 169*63bc8503SSasha Levin { 170*63bc8503SSasha Levin char full_name[PATH_MAX]; 171*63bc8503SSasha Levin int pid; 172*63bc8503SSasha Levin DIR *dir; 173*63bc8503SSasha Levin struct dirent entry, *result; 174*63bc8503SSasha Levin 175*63bc8503SSasha Levin sprintf(full_name, "%s/%s", HOME_DIR, KVM_PID_FILE_PATH); 176*63bc8503SSasha Levin dir = opendir(full_name); 177*63bc8503SSasha Levin 178*63bc8503SSasha Levin for (;;) { 179*63bc8503SSasha Levin readdir_r(dir, &entry, &result); 180*63bc8503SSasha Levin if (result == NULL) 181*63bc8503SSasha Levin break; 182*63bc8503SSasha Levin if (entry.d_type == DT_REG) { 183*63bc8503SSasha Levin entry.d_name[strlen(entry.d_name)-4] = 0; 184*63bc8503SSasha Levin pid = kvm__get_pid_by_instance(entry.d_name); 185*63bc8503SSasha Levin callback(entry.d_name, pid); 186*63bc8503SSasha Levin } 187*63bc8503SSasha Levin } 188*63bc8503SSasha Levin 189*63bc8503SSasha Levin return 0; 190*63bc8503SSasha Levin } 191*63bc8503SSasha Levin 19243835ac9SSasha Levin void kvm__delete(struct kvm *kvm) 1939ef4c68eSPekka Enberg { 19443835ac9SSasha Levin kvm__stop_timer(kvm); 195fbfe68b7SSasha Levin 19643835ac9SSasha Levin munmap(kvm->ram_start, kvm->ram_size); 1975358b0e6SSasha Levin kvm__remove_pidfile(kvm); 19843835ac9SSasha Levin free(kvm); 1999ef4c68eSPekka Enberg } 2009ef4c68eSPekka Enberg 201c78b8713SAsias He static bool kvm__cpu_supports_vm(void) 202c78b8713SAsias He { 203c78b8713SAsias He struct cpuid_regs regs; 2043fdf659dSSasha Levin u32 eax_base; 205831fbf23SPekka Enberg int feature; 206c78b8713SAsias He 207c78b8713SAsias He regs = (struct cpuid_regs) { 208831fbf23SPekka Enberg .eax = 0x00, 209c78b8713SAsias He }; 210c78b8713SAsias He host_cpuid(®s); 211c78b8713SAsias He 212ae87afbfSCyrill Gorcunov switch (regs.ebx) { 213ae87afbfSCyrill Gorcunov case CPUID_VENDOR_INTEL_1: 214831fbf23SPekka Enberg eax_base = 0x00; 215831fbf23SPekka Enberg feature = KVM__X86_FEATURE_VMX; 216ae87afbfSCyrill Gorcunov break; 21734649df9SPekka Enberg 218ae87afbfSCyrill Gorcunov case CPUID_VENDOR_AMD_1: 219831fbf23SPekka Enberg eax_base = 0x80000000; 220831fbf23SPekka Enberg feature = KVM__X86_FEATURE_SVM; 221ae87afbfSCyrill Gorcunov break; 22234649df9SPekka Enberg 22334649df9SPekka Enberg default: 22434649df9SPekka Enberg return false; 225ae87afbfSCyrill Gorcunov } 226ae87afbfSCyrill Gorcunov 227831fbf23SPekka Enberg regs = (struct cpuid_regs) { 228831fbf23SPekka Enberg .eax = eax_base, 229831fbf23SPekka Enberg }; 230831fbf23SPekka Enberg host_cpuid(®s); 231831fbf23SPekka Enberg 232831fbf23SPekka Enberg if (regs.eax < eax_base + 0x01) 233831fbf23SPekka Enberg return false; 234831fbf23SPekka Enberg 235831fbf23SPekka Enberg regs = (struct cpuid_regs) { 236831fbf23SPekka Enberg .eax = eax_base + 0x01 237831fbf23SPekka Enberg }; 238831fbf23SPekka Enberg host_cpuid(®s); 239831fbf23SPekka Enberg 240831fbf23SPekka Enberg return regs.ecx & (1 << feature); 241c78b8713SAsias He } 242c78b8713SAsias He 24396feb589SPekka Enberg /* 24496feb589SPekka Enberg * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping 24596feb589SPekka Enberg * memory regions to it. Therefore, be careful if you use this function for 24696feb589SPekka Enberg * registering memory regions for emulating hardware. 24796feb589SPekka Enberg */ 24896feb589SPekka Enberg void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr) 2494076b041SPekka Enberg { 2502b0e3342SPekka Enberg struct kvm_userspace_memory_region mem; 251839051d9SSasha Levin int ret; 252839051d9SSasha Levin 253839051d9SSasha Levin mem = (struct kvm_userspace_memory_region) { 25496feb589SPekka Enberg .slot = kvm->mem_slots++, 255874467f8SSasha Levin .guest_phys_addr = guest_phys, 256874467f8SSasha Levin .memory_size = size, 257c4acb611SIngo Molnar .userspace_addr = (unsigned long)userspace_addr, 258839051d9SSasha Levin }; 259839051d9SSasha Levin 260874467f8SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem); 261839051d9SSasha Levin if (ret < 0) 262839051d9SSasha Levin die_perror("KVM_SET_USER_MEMORY_REGION ioctl"); 263839051d9SSasha Levin } 264839051d9SSasha Levin 265874467f8SSasha Levin /* 266874467f8SSasha Levin * Allocating RAM size bigger than 4GB requires us to leave a gap 267874467f8SSasha Levin * in the RAM which is used for PCI MMIO, hotplug, and unconfigured 268874467f8SSasha Levin * devices (see documentation of e820_setup_gap() for details). 269874467f8SSasha Levin * 270874467f8SSasha Levin * If we're required to initialize RAM bigger than 4GB, we will create 271874467f8SSasha Levin * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space. 272874467f8SSasha Levin */ 273874467f8SSasha Levin 27443835ac9SSasha Levin void kvm__init_ram(struct kvm *kvm) 275874467f8SSasha Levin { 276874467f8SSasha Levin u64 phys_start, phys_size; 277874467f8SSasha Levin void *host_mem; 278874467f8SSasha Levin 27943835ac9SSasha Levin if (kvm->ram_size < KVM_32BIT_GAP_START) { 280874467f8SSasha Levin /* Use a single block of RAM for 32bit RAM */ 281874467f8SSasha Levin 282874467f8SSasha Levin phys_start = 0; 28343835ac9SSasha Levin phys_size = kvm->ram_size; 28443835ac9SSasha Levin host_mem = kvm->ram_start; 285874467f8SSasha Levin 28696feb589SPekka Enberg kvm__register_mem(kvm, phys_start, phys_size, host_mem); 287874467f8SSasha Levin } else { 288874467f8SSasha Levin /* First RAM range from zero to the PCI gap: */ 289874467f8SSasha Levin 290874467f8SSasha Levin phys_start = 0; 291874467f8SSasha Levin phys_size = KVM_32BIT_GAP_START; 29243835ac9SSasha Levin host_mem = kvm->ram_start; 293874467f8SSasha Levin 29496feb589SPekka Enberg kvm__register_mem(kvm, phys_start, phys_size, host_mem); 295874467f8SSasha Levin 296874467f8SSasha Levin /* Second RAM range from 4GB to the end of RAM: */ 297874467f8SSasha Levin 298874467f8SSasha Levin phys_start = 0x100000000ULL; 29943835ac9SSasha Levin phys_size = kvm->ram_size - phys_size; 30043835ac9SSasha Levin host_mem = kvm->ram_start + phys_start; 301874467f8SSasha Levin 30296feb589SPekka Enberg kvm__register_mem(kvm, phys_start, phys_size, host_mem); 303874467f8SSasha Levin } 304874467f8SSasha Levin } 305874467f8SSasha Levin 30643835ac9SSasha Levin int kvm__max_cpus(struct kvm *kvm) 307384922b3SPekka Enberg { 308384922b3SPekka Enberg int ret; 309384922b3SPekka Enberg 31043835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); 311384922b3SPekka Enberg if (ret < 0) 312384922b3SPekka Enberg die_perror("KVM_CAP_NR_VCPUS"); 313384922b3SPekka Enberg 314384922b3SPekka Enberg return ret; 315384922b3SPekka Enberg } 316384922b3SPekka Enberg 3175358b0e6SSasha Levin struct kvm *kvm__init(const char *kvm_dev, u64 ram_size, const char *name) 318839051d9SSasha Levin { 3199687927dSAsias He struct kvm_pit_config pit_config = { .flags = 0, }; 32043835ac9SSasha Levin struct kvm *kvm; 3214076b041SPekka Enberg int ret; 3224076b041SPekka Enberg 323c78b8713SAsias He if (!kvm__cpu_supports_vm()) 324c78b8713SAsias He die("Your CPU does not support hardware virtualization"); 325c78b8713SAsias He 32643835ac9SSasha Levin kvm = kvm__new(); 3274076b041SPekka Enberg 32843835ac9SSasha Levin kvm->sys_fd = open(kvm_dev, O_RDWR); 32943835ac9SSasha Levin if (kvm->sys_fd < 0) { 3306d7c36ceSPekka Enberg if (errno == ENOENT) 331e907b83fSPekka Enberg die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev); 332f8334800SIngo Molnar if (errno == ENODEV) 333f8334800SIngo Molnar die("'%s' KVM driver not available.\n # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev); 3346d7c36ceSPekka Enberg 335f8334800SIngo Molnar fprintf(stderr, " Fatal, could not open %s: ", kvm_dev); 336f8334800SIngo Molnar perror(NULL); 337f8334800SIngo Molnar exit(1); 3386d7c36ceSPekka Enberg } 339b8f6afcdSPekka Enberg 34043835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0); 3416c7d8514SPekka Enberg if (ret != KVM_API_VERSION) 342f5ab5f67SPekka Enberg die_perror("KVM_API_VERSION ioctl"); 3436c7d8514SPekka Enberg 34443835ac9SSasha Levin kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0); 34543835ac9SSasha Levin if (kvm->vm_fd < 0) 346f5ab5f67SPekka Enberg die_perror("KVM_CREATE_VM ioctl"); 34728fa19c0SPekka Enberg 34843835ac9SSasha Levin if (kvm__check_extensions(kvm)) 34955e19624SCyrill Gorcunov die("A required KVM extention is not supported by OS"); 3509687927dSAsias He 35143835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000); 3529687927dSAsias He if (ret < 0) 3539687927dSAsias He die_perror("KVM_SET_TSS_ADDR ioctl"); 3549687927dSAsias He 35543835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config); 3569687927dSAsias He if (ret < 0) 3579687927dSAsias He die_perror("KVM_CREATE_PIT2 ioctl"); 3589687927dSAsias He 35943835ac9SSasha Levin kvm->ram_size = ram_size; 3600d1f17ecSPekka Enberg 36143835ac9SSasha Levin if (kvm->ram_size < KVM_32BIT_GAP_START) { 36237c34ca8SSasha Levin kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0); 363874467f8SSasha Levin } else { 36437c34ca8SSasha Levin kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0); 36543835ac9SSasha Levin if (kvm->ram_start != MAP_FAILED) { 366874467f8SSasha Levin /* 367874467f8SSasha Levin * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that 368874467f8SSasha Levin * if we accidently write to it, we will know. 369874467f8SSasha Levin */ 37043835ac9SSasha Levin mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE); 371874467f8SSasha Levin } 372874467f8SSasha Levin } 37343835ac9SSasha Levin if (kvm->ram_start == MAP_FAILED) 3740d1f17ecSPekka Enberg die("out of memory"); 3750d1f17ecSPekka Enberg 3767f4f39a4SSasha Levin madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE); 3777f4f39a4SSasha Levin 37843835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP); 379895c2fefSPekka Enberg if (ret < 0) 3809687927dSAsias He die_perror("KVM_CREATE_IRQCHIP ioctl"); 3819687927dSAsias He 3825358b0e6SSasha Levin kvm->name = name; 3835358b0e6SSasha Levin 3845358b0e6SSasha Levin kvm__create_pidfile(kvm); 3855358b0e6SSasha Levin 38643835ac9SSasha Levin return kvm; 3874076b041SPekka Enberg } 3884076b041SPekka Enberg 3895f6772b8SCyrill Gorcunov #define BOOT_LOADER_SELECTOR 0x1000 390b08e9ec4SPekka Enberg #define BOOT_LOADER_IP 0x0000 391dbdb74c2SPekka Enberg #define BOOT_LOADER_SP 0x8000 3922dd4a4edSCyrill Gorcunov #define BOOT_CMDLINE_OFFSET 0x20000 3932dd4a4edSCyrill Gorcunov 3949a4ecdc5SPekka Enberg #define BOOT_PROTOCOL_REQUIRED 0x206 395a43f6460SCyrill Gorcunov #define LOAD_HIGH 0x01 396009b0758SPekka Enberg 39743835ac9SSasha Levin static int load_flat_binary(struct kvm *kvm, int fd) 398009b0758SPekka Enberg { 399009b0758SPekka Enberg void *p; 400009b0758SPekka Enberg int nr; 401009b0758SPekka Enberg 402009b0758SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 403009b0758SPekka Enberg die_perror("lseek"); 404009b0758SPekka Enberg 40543835ac9SSasha Levin p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP); 406009b0758SPekka Enberg 407009b0758SPekka Enberg while ((nr = read(fd, p, 65536)) > 0) 408009b0758SPekka Enberg p += nr; 409009b0758SPekka Enberg 41043835ac9SSasha Levin kvm->boot_selector = BOOT_LOADER_SELECTOR; 41143835ac9SSasha Levin kvm->boot_ip = BOOT_LOADER_IP; 41243835ac9SSasha Levin kvm->boot_sp = BOOT_LOADER_SP; 413edc8a14dSPekka Enberg 4147fb218bdSPekka Enberg return true; 415009b0758SPekka Enberg } 416009b0758SPekka Enberg 417ae1fae34SPekka Enberg static const char *BZIMAGE_MAGIC = "HdrS"; 418ae1fae34SPekka Enberg 41943835ac9SSasha Levin static bool load_bzimage(struct kvm *kvm, int fd_kernel, 42053861c74SJohn Floren int fd_initrd, const char *kernel_cmdline, u16 vidmode) 421ae1fae34SPekka Enberg { 422b9271160SPekka Enberg struct boot_params *kern_boot; 4234b62331fSPekka Enberg unsigned long setup_sects; 424b9271160SPekka Enberg struct boot_params boot; 4252dd4a4edSCyrill Gorcunov size_t cmdline_size; 4267fb218bdSPekka Enberg ssize_t setup_size; 42722489bb0SCyrill Gorcunov void *p; 428ae1fae34SPekka Enberg int nr; 429ae1fae34SPekka Enberg 4305d67eaf6SPekka Enberg /* 4315d67eaf6SPekka Enberg * See Documentation/x86/boot.txt for details no bzImage on-disk and 4325d67eaf6SPekka Enberg * memory layout. 4335d67eaf6SPekka Enberg */ 4345d67eaf6SPekka Enberg 4352065a6f7SCyrill Gorcunov if (lseek(fd_kernel, 0, SEEK_SET) < 0) 436009b0758SPekka Enberg die_perror("lseek"); 437009b0758SPekka Enberg 4380b62d2bbSPekka Enberg if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot)) 4392346d461SPekka Enberg return false; 440ae1fae34SPekka Enberg 4410b62d2bbSPekka Enberg if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC))) 4427fb218bdSPekka Enberg return false; 443ae1fae34SPekka Enberg 4440ea58e5bSPekka Enberg if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) 4450b62d2bbSPekka Enberg die("Too old kernel"); 446ad681038SCyrill Gorcunov 4472065a6f7SCyrill Gorcunov if (lseek(fd_kernel, 0, SEEK_SET) < 0) 448e93ab78aSPekka Enberg die_perror("lseek"); 449e93ab78aSPekka Enberg 4504cf542bbSCyrill Gorcunov if (!boot.hdr.setup_sects) 4514cf542bbSCyrill Gorcunov boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS; 45210943d14SPekka Enberg setup_sects = boot.hdr.setup_sects + 1; 45310943d14SPekka Enberg 45454d4a626SPekka Enberg setup_size = setup_sects << 9; 45543835ac9SSasha Levin p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP); 456ae1fae34SPekka Enberg 4572065a6f7SCyrill Gorcunov /* copy setup.bin to mem*/ 4582065a6f7SCyrill Gorcunov if (read(fd_kernel, p, setup_size) != setup_size) 4597fb218bdSPekka Enberg die_perror("read"); 4607fb218bdSPekka Enberg 4612065a6f7SCyrill Gorcunov /* copy vmlinux.bin to BZ_KERNEL_START*/ 46243835ac9SSasha Levin p = guest_flat_to_host(kvm, BZ_KERNEL_START); 463ae1fae34SPekka Enberg 4642065a6f7SCyrill Gorcunov while ((nr = read(fd_kernel, p, 65536)) > 0) 465ae1fae34SPekka Enberg p += nr; 466ae1fae34SPekka Enberg 46743835ac9SSasha Levin p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET); 468debcfac0SCyrill Gorcunov if (kernel_cmdline) { 469debcfac0SCyrill Gorcunov cmdline_size = strlen(kernel_cmdline) + 1; 470debcfac0SCyrill Gorcunov if (cmdline_size > boot.hdr.cmdline_size) 471debcfac0SCyrill Gorcunov cmdline_size = boot.hdr.cmdline_size; 472ad681038SCyrill Gorcunov 4732dd4a4edSCyrill Gorcunov memset(p, 0, boot.hdr.cmdline_size); 4742dd4a4edSCyrill Gorcunov memcpy(p, kernel_cmdline, cmdline_size - 1); 475debcfac0SCyrill Gorcunov } 476debcfac0SCyrill Gorcunov 47743835ac9SSasha Levin kern_boot = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00); 478a43f6460SCyrill Gorcunov 479b9271160SPekka Enberg kern_boot->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET; 480b9271160SPekka Enberg kern_boot->hdr.type_of_loader = 0xff; 481b9271160SPekka Enberg kern_boot->hdr.heap_end_ptr = 0xfe00; 482b9271160SPekka Enberg kern_boot->hdr.loadflags |= CAN_USE_HEAP; 48353861c74SJohn Floren kern_boot->hdr.vid_mode = vidmode; 484a43f6460SCyrill Gorcunov 4852065a6f7SCyrill Gorcunov /* 4862065a6f7SCyrill Gorcunov * Read initrd image into guest memory 4872065a6f7SCyrill Gorcunov */ 4882065a6f7SCyrill Gorcunov if (fd_initrd >= 0) { 4892065a6f7SCyrill Gorcunov struct stat initrd_stat; 4902065a6f7SCyrill Gorcunov unsigned long addr; 4912065a6f7SCyrill Gorcunov 4922065a6f7SCyrill Gorcunov if (fstat(fd_initrd, &initrd_stat)) 4932065a6f7SCyrill Gorcunov die_perror("fstat"); 4942065a6f7SCyrill Gorcunov 4952065a6f7SCyrill Gorcunov addr = boot.hdr.initrd_addr_max & ~0xfffff; 4962065a6f7SCyrill Gorcunov for (;;) { 4972065a6f7SCyrill Gorcunov if (addr < BZ_KERNEL_START) 4982065a6f7SCyrill Gorcunov die("Not enough memory for initrd"); 49943835ac9SSasha Levin else if (addr < (kvm->ram_size - initrd_stat.st_size)) 5002065a6f7SCyrill Gorcunov break; 5012065a6f7SCyrill Gorcunov addr -= 0x100000; 5022065a6f7SCyrill Gorcunov } 5032065a6f7SCyrill Gorcunov 50443835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 5052065a6f7SCyrill Gorcunov nr = read(fd_initrd, p, initrd_stat.st_size); 5062065a6f7SCyrill Gorcunov if (nr != initrd_stat.st_size) 5072065a6f7SCyrill Gorcunov die("Failed to read initrd"); 5082065a6f7SCyrill Gorcunov 5092065a6f7SCyrill Gorcunov kern_boot->hdr.ramdisk_image = addr; 5102065a6f7SCyrill Gorcunov kern_boot->hdr.ramdisk_size = initrd_stat.st_size; 5112065a6f7SCyrill Gorcunov } 5122065a6f7SCyrill Gorcunov 51343835ac9SSasha Levin kvm->boot_selector = BOOT_LOADER_SELECTOR; 514edc8a14dSPekka Enberg /* 515edc8a14dSPekka Enberg * The real-mode setup code starts at offset 0x200 of a bzImage. See 516edc8a14dSPekka Enberg * Documentation/x86/boot.txt for details. 517edc8a14dSPekka Enberg */ 51843835ac9SSasha Levin kvm->boot_ip = BOOT_LOADER_IP + 0x200; 51943835ac9SSasha Levin kvm->boot_sp = BOOT_LOADER_SP; 520edc8a14dSPekka Enberg 5217fb218bdSPekka Enberg return true; 522ae1fae34SPekka Enberg } 523ae1fae34SPekka Enberg 52472811558SPekka Enberg /* RFC 1952 */ 52572811558SPekka Enberg #define GZIP_ID1 0x1f 52672811558SPekka Enberg #define GZIP_ID2 0x8b 52772811558SPekka Enberg 52872811558SPekka Enberg static bool initrd_check(int fd) 52972811558SPekka Enberg { 53072811558SPekka Enberg unsigned char id[2]; 53172811558SPekka Enberg 53272811558SPekka Enberg if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0) 53372811558SPekka Enberg return false; 53472811558SPekka Enberg 53572811558SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 53672811558SPekka Enberg die_perror("lseek"); 53772811558SPekka Enberg 53872811558SPekka Enberg return id[0] == GZIP_ID1 && id[1] == GZIP_ID2; 53972811558SPekka Enberg } 54072811558SPekka Enberg 5416d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename, 54253861c74SJohn Floren const char *initrd_filename, const char *kernel_cmdline, u16 vidmode) 543ae1fae34SPekka Enberg { 5447fb218bdSPekka Enberg bool ret; 5452065a6f7SCyrill Gorcunov int fd_kernel = -1, fd_initrd = -1; 546ae1fae34SPekka Enberg 5472065a6f7SCyrill Gorcunov fd_kernel = open(kernel_filename, O_RDONLY); 5482065a6f7SCyrill Gorcunov if (fd_kernel < 0) 5490b62d2bbSPekka Enberg die("Unable to open kernel %s", kernel_filename); 550ae1fae34SPekka Enberg 5512065a6f7SCyrill Gorcunov if (initrd_filename) { 5522065a6f7SCyrill Gorcunov fd_initrd = open(initrd_filename, O_RDONLY); 5532065a6f7SCyrill Gorcunov if (fd_initrd < 0) 5540b62d2bbSPekka Enberg die("Unable to open initrd %s", initrd_filename); 55572811558SPekka Enberg 55672811558SPekka Enberg if (!initrd_check(fd_initrd)) 55772811558SPekka Enberg die("%s is not an initrd", initrd_filename); 5582065a6f7SCyrill Gorcunov } 5592065a6f7SCyrill Gorcunov 56053861c74SJohn Floren ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode); 56128972750SCyrill Gorcunov 56228972750SCyrill Gorcunov if (initrd_filename) 56328972750SCyrill Gorcunov close(fd_initrd); 56428972750SCyrill Gorcunov 565009b0758SPekka Enberg if (ret) 566009b0758SPekka Enberg goto found_kernel; 567ae1fae34SPekka Enberg 5684542f276SCyrill Gorcunov pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename); 5690b62d2bbSPekka Enberg 5702065a6f7SCyrill Gorcunov ret = load_flat_binary(kvm, fd_kernel); 571009b0758SPekka Enberg if (ret) 572009b0758SPekka Enberg goto found_kernel; 573009b0758SPekka Enberg 5745a6ac675SSasha Levin close(fd_kernel); 5755a6ac675SSasha Levin 576009b0758SPekka Enberg die("%s is not a valid bzImage or flat binary", kernel_filename); 577009b0758SPekka Enberg 578009b0758SPekka Enberg found_kernel: 5795a6ac675SSasha Levin close(fd_kernel); 5805a6ac675SSasha Levin 581ae1fae34SPekka Enberg return ret; 582ae1fae34SPekka Enberg } 583ae1fae34SPekka Enberg 584b3594ec7SCyrill Gorcunov /** 585b3594ec7SCyrill Gorcunov * kvm__setup_bios - inject BIOS into guest system memory 58643835ac9SSasha Levin * @kvm - guest system descriptor 587b3594ec7SCyrill Gorcunov * 588b3594ec7SCyrill Gorcunov * This function is a main routine where we poke guest memory 589b3594ec7SCyrill Gorcunov * and install BIOS there. 590b3594ec7SCyrill Gorcunov */ 59143835ac9SSasha Levin void kvm__setup_bios(struct kvm *kvm) 5922f3976eeSPekka Enberg { 593b3594ec7SCyrill Gorcunov /* standart minimal configuration */ 59443835ac9SSasha Levin setup_bios(kvm); 5952f3976eeSPekka Enberg 596b3594ec7SCyrill Gorcunov /* FIXME: SMP, ACPI and friends here */ 5970c7c14a7SCyrill Gorcunov 5980c7c14a7SCyrill Gorcunov /* MP table */ 59943835ac9SSasha Levin mptable_setup(kvm, kvm->nrcpus); 6002f3976eeSPekka Enberg } 6012f3976eeSPekka Enberg 602ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000 /* 1 msec */ 603ce79f1caSPekka Enberg 604ce79f1caSPekka Enberg /* 605ce79f1caSPekka Enberg * This function sets up a timer that's used to inject interrupts from the 606ce79f1caSPekka Enberg * userspace hypervisor into the guest at periodical intervals. Please note 607ce79f1caSPekka Enberg * that clock interrupt, for example, is not handled here. 608ce79f1caSPekka Enberg */ 60943835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm) 610ce79f1caSPekka Enberg { 611ce79f1caSPekka Enberg struct itimerspec its; 612ce79f1caSPekka Enberg struct sigevent sev; 613ce79f1caSPekka Enberg 614ce79f1caSPekka Enberg memset(&sev, 0, sizeof(struct sigevent)); 615ce79f1caSPekka Enberg sev.sigev_value.sival_int = 0; 616c7828731SSasha Levin sev.sigev_notify = SIGEV_THREAD_ID; 617ce79f1caSPekka Enberg sev.sigev_signo = SIGALRM; 618c7828731SSasha Levin sev._sigev_un._tid = syscall(__NR_gettid); 619ce79f1caSPekka Enberg 62043835ac9SSasha Levin if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0) 621ce79f1caSPekka Enberg die("timer_create()"); 622ce79f1caSPekka Enberg 623ce79f1caSPekka Enberg its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000; 624ce79f1caSPekka Enberg its.it_value.tv_nsec = TIMER_INTERVAL_NS % 1000000000; 625ce79f1caSPekka Enberg its.it_interval.tv_sec = its.it_value.tv_sec; 626ce79f1caSPekka Enberg its.it_interval.tv_nsec = its.it_value.tv_nsec; 627ce79f1caSPekka Enberg 62843835ac9SSasha Levin if (timer_settime(kvm->timerid, 0, &its, NULL) < 0) 629ce79f1caSPekka Enberg die("timer_settime()"); 630ce79f1caSPekka Enberg } 631ce79f1caSPekka Enberg 63243835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm) 633fbfe68b7SSasha Levin { 63443835ac9SSasha Levin if (kvm->timerid) 63543835ac9SSasha Levin if (timer_delete(kvm->timerid) < 0) 636fbfe68b7SSasha Levin die("timer_delete()"); 637fbfe68b7SSasha Levin 63843835ac9SSasha Levin kvm->timerid = 0; 639fbfe68b7SSasha Levin } 640fbfe68b7SSasha Levin 64143835ac9SSasha Levin void kvm__irq_line(struct kvm *kvm, int irq, int level) 6428b1ff07eSPekka Enberg { 6438b1ff07eSPekka Enberg struct kvm_irq_level irq_level; 6448b1ff07eSPekka Enberg 6458b1ff07eSPekka Enberg irq_level = (struct kvm_irq_level) { 6468b1ff07eSPekka Enberg { 6478b1ff07eSPekka Enberg .irq = irq, 6488b1ff07eSPekka Enberg }, 6498b1ff07eSPekka Enberg .level = level, 6508b1ff07eSPekka Enberg }; 6518b1ff07eSPekka Enberg 65243835ac9SSasha Levin if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0) 6538b1ff07eSPekka Enberg die_perror("KVM_IRQ_LINE failed"); 6548b1ff07eSPekka Enberg } 6558b1ff07eSPekka Enberg 65643835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size) 657090f898eSCyrill Gorcunov { 658090f898eSCyrill Gorcunov unsigned char *p; 659090f898eSCyrill Gorcunov unsigned long n; 660090f898eSCyrill Gorcunov 661090f898eSCyrill Gorcunov size &= ~7; /* mod 8 */ 662090f898eSCyrill Gorcunov if (!size) 663090f898eSCyrill Gorcunov return; 664090f898eSCyrill Gorcunov 66543835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 666090f898eSCyrill Gorcunov 66748cf3877SPekka Enberg for (n = 0; n < size; n += 8) { 66843835ac9SSasha Levin if (!host_ptr_in_ram(kvm, p + n)) 66948cf3877SPekka Enberg break; 67048cf3877SPekka Enberg 671090f898eSCyrill Gorcunov printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n", 672090f898eSCyrill Gorcunov addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3], 673090f898eSCyrill Gorcunov p[n + 4], p[n + 5], p[n + 6], p[n + 7]); 674090f898eSCyrill Gorcunov } 67548cf3877SPekka Enberg } 6764298ddadSSasha Levin 6774298ddadSSasha Levin void kvm__pause(void) 6784298ddadSSasha Levin { 6794298ddadSSasha Levin int i, paused_vcpus = 0; 6804298ddadSSasha Levin 6814298ddadSSasha Levin /* Check if the guest is running */ 6824298ddadSSasha Levin if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0) 6834298ddadSSasha Levin return; 6844298ddadSSasha Levin 6854298ddadSSasha Levin mutex_lock(&pause_lock); 6864298ddadSSasha Levin 6874298ddadSSasha Levin pause_event = eventfd(0, 0); 6884298ddadSSasha Levin if (pause_event < 0) 6894298ddadSSasha Levin die("Failed creating pause notification event"); 6904298ddadSSasha Levin for (i = 0; i < kvm->nrcpus; i++) 6914298ddadSSasha Levin pthread_kill(kvm_cpus[i]->thread, SIGKVMPAUSE); 6924298ddadSSasha Levin 6934298ddadSSasha Levin while (paused_vcpus < kvm->nrcpus) { 6944298ddadSSasha Levin u64 cur_read; 6954298ddadSSasha Levin 6964298ddadSSasha Levin if (read(pause_event, &cur_read, sizeof(cur_read)) < 0) 6974298ddadSSasha Levin die("Failed reading pause event"); 6984298ddadSSasha Levin paused_vcpus += cur_read; 6994298ddadSSasha Levin } 7004298ddadSSasha Levin close(pause_event); 7014298ddadSSasha Levin } 7024298ddadSSasha Levin 7034298ddadSSasha Levin void kvm__continue(void) 7044298ddadSSasha Levin { 7054298ddadSSasha Levin /* Check if the guest is running */ 7064298ddadSSasha Levin if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0) 7074298ddadSSasha Levin return; 7084298ddadSSasha Levin 7094298ddadSSasha Levin mutex_unlock(&pause_lock); 7104298ddadSSasha Levin } 7114298ddadSSasha Levin 7124298ddadSSasha Levin void kvm__notify_paused(void) 7134298ddadSSasha Levin { 7144298ddadSSasha Levin u64 p = 1; 7154298ddadSSasha Levin 7164298ddadSSasha Levin if (write(pause_event, &p, sizeof(p)) < 0) 7174298ddadSSasha Levin die("Failed notifying of paused VCPU."); 7184298ddadSSasha Levin 7194298ddadSSasha Levin mutex_lock(&pause_lock); 7204298ddadSSasha Levin mutex_unlock(&pause_lock); 7214298ddadSSasha Levin } 722