1ae1fae34SPekka Enberg #include "kvm/kvm.h" 272811558SPekka Enberg #include "kvm/read-write.h" 372811558SPekka Enberg #include "kvm/util.h" 424ed52dbSCyrill Gorcunov #include "kvm/strbuf.h" 54298ddadSSasha Levin #include "kvm/mutex.h" 64298ddadSSasha Levin #include "kvm/kvm-cpu.h" 74b1addaeSSasha Levin #include "kvm/kvm-ipc.h" 8eda03319SPekka Enberg 9d82350d3SWill Deacon #include <linux/kernel.h> 106c7d8514SPekka Enberg #include <linux/kvm.h> 11d82350d3SWill Deacon #include <linux/list.h> 12495fbd4eSSasha Levin #include <linux/err.h> 13f5ab5f67SPekka Enberg 144b1addaeSSasha Levin #include <sys/un.h> 15e2e876c2SMatt Evans #include <sys/stat.h> 164b1addaeSSasha Levin #include <sys/types.h> 174b1addaeSSasha Levin #include <sys/socket.h> 18ae1fae34SPekka Enberg #include <sys/ioctl.h> 191f9cff23SPekka Enberg #include <sys/mman.h> 202da26a59SPekka Enberg #include <stdbool.h> 2106e41eeaSPekka Enberg #include <limits.h> 22ce79f1caSPekka Enberg #include <signal.h> 23f5ab5f67SPekka Enberg #include <stdarg.h> 24b8f6afcdSPekka Enberg #include <stdlib.h> 25f5ab5f67SPekka Enberg #include <string.h> 260d1f17ecSPekka Enberg #include <unistd.h> 271f9cff23SPekka Enberg #include <stdio.h> 28b8f6afcdSPekka Enberg #include <fcntl.h> 29ce79f1caSPekka Enberg #include <time.h> 304298ddadSSasha Levin #include <sys/eventfd.h> 31c7828731SSasha Levin #include <asm/unistd.h> 3263bc8503SSasha Levin #include <dirent.h> 33b8f6afcdSPekka Enberg 34ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason 350d1f17ecSPekka Enberg 36ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = { 37ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN), 38ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION), 39ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO), 40ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL), 41ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG), 42ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT), 43ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO), 44ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN), 45ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN), 46ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY), 47ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR), 48ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR), 49ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS), 50ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC), 51ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET), 52ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR), 53ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI), 54ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR), 5563e158a0SMatt Evans #ifdef CONFIG_PPC64 5663e158a0SMatt Evans DEFINE_KVM_EXIT_REASON(KVM_EXIT_PAPR_HCALL), 5763e158a0SMatt Evans #endif 589b1fb1c3SPekka Enberg }; 599b1fb1c3SPekka Enberg 604298ddadSSasha Levin static int pause_event; 614298ddadSSasha Levin static DEFINE_MUTEX(pause_lock); 62af7b0868SMatt Evans extern struct kvm_ext kvm_req_ext[]; 634298ddadSSasha Levin 649667701cSPekka Enberg static char kvm_dir[PATH_MAX]; 659667701cSPekka Enberg 66495fbd4eSSasha Levin static int set_dir(const char *fmt, va_list args) 679667701cSPekka Enberg { 68dd188f9fSPekka Enberg char tmp[PATH_MAX]; 69dd188f9fSPekka Enberg 70dd188f9fSPekka Enberg vsnprintf(tmp, sizeof(tmp), fmt, args); 71dd188f9fSPekka Enberg 722bc995fbSPekka Enberg mkdir(tmp, 0777); 732bc995fbSPekka Enberg 74dd188f9fSPekka Enberg if (!realpath(tmp, kvm_dir)) 75495fbd4eSSasha Levin return -errno; 76f76a3285SPekka Enberg 77f76a3285SPekka Enberg strcat(kvm_dir, "/"); 78495fbd4eSSasha Levin 79495fbd4eSSasha Levin return 0; 809667701cSPekka Enberg } 819667701cSPekka Enberg 829667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...) 839667701cSPekka Enberg { 849667701cSPekka Enberg va_list args; 859667701cSPekka Enberg 869667701cSPekka Enberg va_start(args, fmt); 879667701cSPekka Enberg set_dir(fmt, args); 889667701cSPekka Enberg va_end(args); 899667701cSPekka Enberg } 909667701cSPekka Enberg 919667701cSPekka Enberg const char *kvm__get_dir(void) 929667701cSPekka Enberg { 939667701cSPekka Enberg return kvm_dir; 949667701cSPekka Enberg } 959667701cSPekka Enberg 96663165a2SAndre Przywara bool kvm__supports_vm_extension(struct kvm *kvm, unsigned int extension) 97663165a2SAndre Przywara { 98663165a2SAndre Przywara static int supports_vm_ext_check = 0; 99663165a2SAndre Przywara int ret; 100663165a2SAndre Przywara 101663165a2SAndre Przywara switch (supports_vm_ext_check) { 102663165a2SAndre Przywara case 0: 103663165a2SAndre Przywara ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, 104663165a2SAndre Przywara KVM_CAP_CHECK_EXTENSION_VM); 105663165a2SAndre Przywara if (ret <= 0) { 106663165a2SAndre Przywara supports_vm_ext_check = -1; 107663165a2SAndre Przywara return false; 108663165a2SAndre Przywara } 109663165a2SAndre Przywara supports_vm_ext_check = 1; 110663165a2SAndre Przywara /* fall through */ 111663165a2SAndre Przywara case 1: 112663165a2SAndre Przywara break; 113663165a2SAndre Przywara case -1: 114663165a2SAndre Przywara return false; 115663165a2SAndre Przywara } 116663165a2SAndre Przywara 117663165a2SAndre Przywara ret = ioctl(kvm->vm_fd, KVM_CHECK_EXTENSION, extension); 118663165a2SAndre Przywara if (ret < 0) 119663165a2SAndre Przywara return false; 120663165a2SAndre Przywara 121663165a2SAndre Przywara return ret; 122663165a2SAndre Przywara } 123663165a2SAndre Przywara 1241d6fb3f2SSasha Levin bool kvm__supports_extension(struct kvm *kvm, unsigned int extension) 125b8f6afcdSPekka Enberg { 12628fa19c0SPekka Enberg int ret; 127b8f6afcdSPekka Enberg 12843835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension); 1294076b041SPekka Enberg if (ret < 0) 1304076b041SPekka Enberg return false; 1314076b041SPekka Enberg 1324076b041SPekka Enberg return ret; 1334076b041SPekka Enberg } 1344076b041SPekka Enberg 13543835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm) 13655e19624SCyrill Gorcunov { 137495fbd4eSSasha Levin int i; 13855e19624SCyrill Gorcunov 139af7b0868SMatt Evans for (i = 0; ; i++) { 140af7b0868SMatt Evans if (!kvm_req_ext[i].name) 141af7b0868SMatt Evans break; 14243835ac9SSasha Levin if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) { 14350687d87SWill Deacon pr_err("Unsupported KVM extension detected: %s", 14455e19624SCyrill Gorcunov kvm_req_ext[i].name); 145495fbd4eSSasha Levin return -i; 14655e19624SCyrill Gorcunov } 14755e19624SCyrill Gorcunov } 14855e19624SCyrill Gorcunov 14955e19624SCyrill Gorcunov return 0; 15055e19624SCyrill Gorcunov } 15155e19624SCyrill Gorcunov 15247621338SSasha Levin struct kvm *kvm__new(void) 1534076b041SPekka Enberg { 154495fbd4eSSasha Levin struct kvm *kvm = calloc(1, sizeof(*kvm)); 15543835ac9SSasha Levin if (!kvm) 156495fbd4eSSasha Levin return ERR_PTR(-ENOMEM); 1574076b041SPekka Enberg 158d648dbf5SCyrill Gorcunov kvm->sys_fd = -1; 159d648dbf5SCyrill Gorcunov kvm->vm_fd = -1; 160d648dbf5SCyrill Gorcunov 16143835ac9SSasha Levin return kvm; 1624076b041SPekka Enberg } 1634076b041SPekka Enberg 164495fbd4eSSasha Levin int kvm__exit(struct kvm *kvm) 1659ef4c68eSPekka Enberg { 166d82350d3SWill Deacon struct kvm_mem_bank *bank, *tmp; 167495fbd4eSSasha Levin 168d82350d3SWill Deacon kvm__arch_delete_ram(kvm); 169d82350d3SWill Deacon 170d82350d3SWill Deacon list_for_each_entry_safe(bank, tmp, &kvm->mem_banks, list) { 171d82350d3SWill Deacon list_del(&bank->list); 172d82350d3SWill Deacon free(bank); 173d82350d3SWill Deacon } 174d82350d3SWill Deacon 175d82350d3SWill Deacon free(kvm); 176495fbd4eSSasha Levin return 0; 1779ef4c68eSPekka Enberg } 17849a8afd1SSasha Levin core_exit(kvm__exit); 1799ef4c68eSPekka Enberg 18096feb589SPekka Enberg /* 18196feb589SPekka Enberg * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping 18296feb589SPekka Enberg * memory regions to it. Therefore, be careful if you use this function for 18396feb589SPekka Enberg * registering memory regions for emulating hardware. 18496feb589SPekka Enberg */ 185*8f46c736SJean-Philippe Brucker int kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, 186*8f46c736SJean-Philippe Brucker void *userspace_addr, enum kvm_mem_type type) 1874076b041SPekka Enberg { 1882b0e3342SPekka Enberg struct kvm_userspace_memory_region mem; 189d82350d3SWill Deacon struct kvm_mem_bank *bank; 190839051d9SSasha Levin int ret; 191839051d9SSasha Levin 192d82350d3SWill Deacon bank = malloc(sizeof(*bank)); 193d82350d3SWill Deacon if (!bank) 194d82350d3SWill Deacon return -ENOMEM; 195d82350d3SWill Deacon 196d82350d3SWill Deacon INIT_LIST_HEAD(&bank->list); 197d82350d3SWill Deacon bank->guest_phys_addr = guest_phys; 198d82350d3SWill Deacon bank->host_addr = userspace_addr; 199d82350d3SWill Deacon bank->size = size; 200*8f46c736SJean-Philippe Brucker bank->type = type; 201d82350d3SWill Deacon 202839051d9SSasha Levin mem = (struct kvm_userspace_memory_region) { 20396feb589SPekka Enberg .slot = kvm->mem_slots++, 204874467f8SSasha Levin .guest_phys_addr = guest_phys, 205874467f8SSasha Levin .memory_size = size, 206c4acb611SIngo Molnar .userspace_addr = (unsigned long)userspace_addr, 207839051d9SSasha Levin }; 208839051d9SSasha Levin 209874467f8SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem); 210839051d9SSasha Levin if (ret < 0) 211495fbd4eSSasha Levin return -errno; 212495fbd4eSSasha Levin 213d82350d3SWill Deacon list_add(&bank->list, &kvm->mem_banks); 214495fbd4eSSasha Levin return 0; 215839051d9SSasha Levin } 216839051d9SSasha Levin 217f412251fSWill Deacon void *guest_flat_to_host(struct kvm *kvm, u64 offset) 218f412251fSWill Deacon { 219f412251fSWill Deacon struct kvm_mem_bank *bank; 220f412251fSWill Deacon 221f412251fSWill Deacon list_for_each_entry(bank, &kvm->mem_banks, list) { 222f412251fSWill Deacon u64 bank_start = bank->guest_phys_addr; 223f412251fSWill Deacon u64 bank_end = bank_start + bank->size; 224f412251fSWill Deacon 225f412251fSWill Deacon if (offset >= bank_start && offset < bank_end) 226f412251fSWill Deacon return bank->host_addr + (offset - bank_start); 227f412251fSWill Deacon } 228f412251fSWill Deacon 229f412251fSWill Deacon pr_warning("unable to translate guest address 0x%llx to host", 230f412251fSWill Deacon (unsigned long long)offset); 231f412251fSWill Deacon return NULL; 232f412251fSWill Deacon } 233f412251fSWill Deacon 2340cb41990SWill Deacon u64 host_to_guest_flat(struct kvm *kvm, void *ptr) 2350cb41990SWill Deacon { 2360cb41990SWill Deacon struct kvm_mem_bank *bank; 2370cb41990SWill Deacon 2380cb41990SWill Deacon list_for_each_entry(bank, &kvm->mem_banks, list) { 2390cb41990SWill Deacon void *bank_start = bank->host_addr; 2400cb41990SWill Deacon void *bank_end = bank_start + bank->size; 2410cb41990SWill Deacon 2420cb41990SWill Deacon if (ptr >= bank_start && ptr < bank_end) 2430cb41990SWill Deacon return bank->guest_phys_addr + (ptr - bank_start); 2440cb41990SWill Deacon } 2450cb41990SWill Deacon 2460cb41990SWill Deacon pr_warning("unable to translate host address %p to guest", ptr); 2470cb41990SWill Deacon return 0; 2480cb41990SWill Deacon } 2490cb41990SWill Deacon 250*8f46c736SJean-Philippe Brucker /* 251*8f46c736SJean-Philippe Brucker * Iterate over each registered memory bank. Call @fun for each bank with @data 252*8f46c736SJean-Philippe Brucker * as argument. @type is a bitmask that allows to filter banks according to 253*8f46c736SJean-Philippe Brucker * their type. 254*8f46c736SJean-Philippe Brucker * 255*8f46c736SJean-Philippe Brucker * If one call to @fun returns a non-zero value, stop iterating and return the 256*8f46c736SJean-Philippe Brucker * value. Otherwise, return zero. 257*8f46c736SJean-Philippe Brucker */ 258*8f46c736SJean-Philippe Brucker int kvm__for_each_mem_bank(struct kvm *kvm, enum kvm_mem_type type, 259*8f46c736SJean-Philippe Brucker int (*fun)(struct kvm *kvm, struct kvm_mem_bank *bank, void *data), 260*8f46c736SJean-Philippe Brucker void *data) 261*8f46c736SJean-Philippe Brucker { 262*8f46c736SJean-Philippe Brucker int ret; 263*8f46c736SJean-Philippe Brucker struct kvm_mem_bank *bank; 264*8f46c736SJean-Philippe Brucker 265*8f46c736SJean-Philippe Brucker list_for_each_entry(bank, &kvm->mem_banks, list) { 266*8f46c736SJean-Philippe Brucker if (type != KVM_MEM_TYPE_ALL && !(bank->type & type)) 267*8f46c736SJean-Philippe Brucker continue; 268*8f46c736SJean-Philippe Brucker 269*8f46c736SJean-Philippe Brucker ret = fun(kvm, bank, data); 270*8f46c736SJean-Philippe Brucker if (ret) 271*8f46c736SJean-Philippe Brucker break; 272*8f46c736SJean-Philippe Brucker } 273*8f46c736SJean-Philippe Brucker 274*8f46c736SJean-Philippe Brucker return ret; 275*8f46c736SJean-Philippe Brucker } 276*8f46c736SJean-Philippe Brucker 2778259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm) 278384922b3SPekka Enberg { 279384922b3SPekka Enberg int ret; 280384922b3SPekka Enberg 28143835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); 2828259b8ccSSasha Levin if (ret <= 0) 2833b9b691dSMatt Evans /* 2843b9b691dSMatt Evans * api.txt states that if KVM_CAP_NR_VCPUS does not exist, 2853b9b691dSMatt Evans * assume 4. 2863b9b691dSMatt Evans */ 2873b9b691dSMatt Evans return 4; 288384922b3SPekka Enberg 289384922b3SPekka Enberg return ret; 290384922b3SPekka Enberg } 291384922b3SPekka Enberg 2928259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm) 2938259b8ccSSasha Levin { 2948259b8ccSSasha Levin int ret; 2958259b8ccSSasha Levin 2968259b8ccSSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); 2978259b8ccSSasha Levin if (ret <= 0) 2988259b8ccSSasha Levin ret = kvm__recommended_cpus(kvm); 2998259b8ccSSasha Levin 3008259b8ccSSasha Levin return ret; 3018259b8ccSSasha Levin } 3028259b8ccSSasha Levin 30347621338SSasha Levin int kvm__init(struct kvm *kvm) 304839051d9SSasha Levin { 3054076b041SPekka Enberg int ret; 3064076b041SPekka Enberg 307495fbd4eSSasha Levin if (!kvm__arch_cpu_supports_vm()) { 308495fbd4eSSasha Levin pr_err("Your CPU does not support hardware virtualization"); 3096fce7105SYang Bai ret = -ENOSYS; 3106fce7105SYang Bai goto err; 311495fbd4eSSasha Levin } 312c78b8713SAsias He 31347621338SSasha Levin kvm->sys_fd = open(kvm->cfg.dev, O_RDWR); 31443835ac9SSasha Levin if (kvm->sys_fd < 0) { 315d648dbf5SCyrill Gorcunov if (errno == ENOENT) 316495fbd4eSSasha Levin pr_err("'%s' not found. Please make sure your kernel has CONFIG_KVM " 31747621338SSasha Levin "enabled and that the KVM modules are loaded.", kvm->cfg.dev); 318d648dbf5SCyrill Gorcunov else if (errno == ENODEV) 319d648dbf5SCyrill Gorcunov pr_err("'%s' KVM driver not available.\n # (If the KVM " 320495fbd4eSSasha Levin "module is loaded then 'dmesg' may offer further clues " 32147621338SSasha Levin "about the failure.)", kvm->cfg.dev); 322d648dbf5SCyrill Gorcunov else 32347621338SSasha Levin pr_err("Could not open %s: ", kvm->cfg.dev); 324d648dbf5SCyrill Gorcunov 325495fbd4eSSasha Levin ret = -errno; 326d648dbf5SCyrill Gorcunov goto err_free; 3276d7c36ceSPekka Enberg } 328b8f6afcdSPekka Enberg 32943835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0); 330495fbd4eSSasha Levin if (ret != KVM_API_VERSION) { 331495fbd4eSSasha Levin pr_err("KVM_API_VERSION ioctl"); 332495fbd4eSSasha Levin ret = -errno; 333d648dbf5SCyrill Gorcunov goto err_sys_fd; 334495fbd4eSSasha Levin } 3356c7d8514SPekka Enberg 336b5a5cd67SAndreas Herrmann kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, KVM_VM_TYPE); 337495fbd4eSSasha Levin if (kvm->vm_fd < 0) { 33881404cdbSDavid Daney pr_err("KVM_CREATE_VM ioctl"); 339495fbd4eSSasha Levin ret = kvm->vm_fd; 340d648dbf5SCyrill Gorcunov goto err_sys_fd; 341495fbd4eSSasha Levin } 34228fa19c0SPekka Enberg 343495fbd4eSSasha Levin if (kvm__check_extensions(kvm)) { 3445f5b0144SWill Deacon pr_err("A required KVM extension is not supported by OS"); 345495fbd4eSSasha Levin ret = -ENOSYS; 3466fce7105SYang Bai goto err_vm_fd; 347495fbd4eSSasha Levin } 3489687927dSAsias He 34947621338SSasha Levin kvm__arch_init(kvm, kvm->cfg.hugetlbfs_path, kvm->cfg.ram_size); 3509687927dSAsias He 351d82350d3SWill Deacon INIT_LIST_HEAD(&kvm->mem_banks); 352abee258bSSasha Levin kvm__init_ram(kvm); 353abee258bSSasha Levin 354084a1356SSasha Levin if (!kvm->cfg.firmware_filename) { 355084a1356SSasha Levin if (!kvm__load_kernel(kvm, kvm->cfg.kernel_filename, 356ff7ba6faSWill Deacon kvm->cfg.initrd_filename, kvm->cfg.real_cmdline)) 357084a1356SSasha Levin die("unable to load kernel %s", kvm->cfg.kernel_filename); 358084a1356SSasha Levin } 359084a1356SSasha Levin 360084a1356SSasha Levin if (kvm->cfg.firmware_filename) { 361084a1356SSasha Levin if (!kvm__load_firmware(kvm, kvm->cfg.firmware_filename)) 362084a1356SSasha Levin die("unable to load firmware image %s: %s", kvm->cfg.firmware_filename, strerror(errno)); 363084a1356SSasha Levin } else { 364084a1356SSasha Levin ret = kvm__arch_setup_firmware(kvm); 365084a1356SSasha Levin if (ret < 0) 366084a1356SSasha Levin die("kvm__arch_setup_firmware() failed with error %d\n", ret); 367084a1356SSasha Levin } 368084a1356SSasha Levin 36947621338SSasha Levin return 0; 370d648dbf5SCyrill Gorcunov 3716fce7105SYang Bai err_vm_fd: 372495fbd4eSSasha Levin close(kvm->vm_fd); 373d648dbf5SCyrill Gorcunov err_sys_fd: 374495fbd4eSSasha Levin close(kvm->sys_fd); 375d648dbf5SCyrill Gorcunov err_free: 376495fbd4eSSasha Levin free(kvm); 3776fce7105SYang Bai err: 37847621338SSasha Levin return ret; 3794076b041SPekka Enberg } 38049a8afd1SSasha Levin core_init(kvm__init); 3814076b041SPekka Enberg 38272811558SPekka Enberg /* RFC 1952 */ 38372811558SPekka Enberg #define GZIP_ID1 0x1f 38472811558SPekka Enberg #define GZIP_ID2 0x8b 385663ce1dfSMatt Evans #define CPIO_MAGIC "0707" 386663ce1dfSMatt Evans /* initrd may be gzipped, or a plain cpio */ 38772811558SPekka Enberg static bool initrd_check(int fd) 38872811558SPekka Enberg { 389663ce1dfSMatt Evans unsigned char id[4]; 39072811558SPekka Enberg 39172811558SPekka Enberg if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0) 39272811558SPekka Enberg return false; 39372811558SPekka Enberg 39472811558SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 39572811558SPekka Enberg die_perror("lseek"); 39672811558SPekka Enberg 397663ce1dfSMatt Evans return (id[0] == GZIP_ID1 && id[1] == GZIP_ID2) || 398663ce1dfSMatt Evans !memcmp(id, CPIO_MAGIC, 4); 39972811558SPekka Enberg } 40072811558SPekka Enberg 4016d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename, 402ff7ba6faSWill Deacon const char *initrd_filename, const char *kernel_cmdline) 403ae1fae34SPekka Enberg { 4047fb218bdSPekka Enberg bool ret; 4052065a6f7SCyrill Gorcunov int fd_kernel = -1, fd_initrd = -1; 406ae1fae34SPekka Enberg 4072065a6f7SCyrill Gorcunov fd_kernel = open(kernel_filename, O_RDONLY); 4082065a6f7SCyrill Gorcunov if (fd_kernel < 0) 4090b62d2bbSPekka Enberg die("Unable to open kernel %s", kernel_filename); 410ae1fae34SPekka Enberg 4112065a6f7SCyrill Gorcunov if (initrd_filename) { 4122065a6f7SCyrill Gorcunov fd_initrd = open(initrd_filename, O_RDONLY); 4132065a6f7SCyrill Gorcunov if (fd_initrd < 0) 4140b62d2bbSPekka Enberg die("Unable to open initrd %s", initrd_filename); 41572811558SPekka Enberg 41672811558SPekka Enberg if (!initrd_check(fd_initrd)) 41772811558SPekka Enberg die("%s is not an initrd", initrd_filename); 4182065a6f7SCyrill Gorcunov } 4192065a6f7SCyrill Gorcunov 420004f7684SAndre Przywara ret = kvm__arch_load_kernel_image(kvm, fd_kernel, fd_initrd, 421004f7684SAndre Przywara kernel_cmdline); 422009b0758SPekka Enberg 423604dbd63SMatt Evans if (initrd_filename) 424604dbd63SMatt Evans close(fd_initrd); 4255a6ac675SSasha Levin close(fd_kernel); 4265a6ac675SSasha Levin 427004f7684SAndre Przywara if (!ret) 428004f7684SAndre Przywara die("%s is not a valid kernel image", kernel_filename); 429ae1fae34SPekka Enberg return ret; 430ae1fae34SPekka Enberg } 431ae1fae34SPekka Enberg 432b2cf1e9fSAsias He void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size, int debug_fd) 433090f898eSCyrill Gorcunov { 434090f898eSCyrill Gorcunov unsigned char *p; 435090f898eSCyrill Gorcunov unsigned long n; 436090f898eSCyrill Gorcunov 437090f898eSCyrill Gorcunov size &= ~7; /* mod 8 */ 438090f898eSCyrill Gorcunov if (!size) 439090f898eSCyrill Gorcunov return; 440090f898eSCyrill Gorcunov 44143835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 442090f898eSCyrill Gorcunov 44348cf3877SPekka Enberg for (n = 0; n < size; n += 8) { 444b2cf1e9fSAsias He if (!host_ptr_in_ram(kvm, p + n)) { 445b2cf1e9fSAsias He dprintf(debug_fd, " 0x%08lx: <unknown>\n", addr + n); 446b2cf1e9fSAsias He continue; 447b2cf1e9fSAsias He } 448b2cf1e9fSAsias He dprintf(debug_fd, " 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n", 449090f898eSCyrill Gorcunov addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3], 450090f898eSCyrill Gorcunov p[n + 4], p[n + 5], p[n + 6], p[n + 7]); 451090f898eSCyrill Gorcunov } 45248cf3877SPekka Enberg } 4534298ddadSSasha Levin 4542aa76b26SWill Deacon void kvm__reboot(struct kvm *kvm) 4552aa76b26SWill Deacon { 4562aa76b26SWill Deacon /* Check if the guest is running */ 4572aa76b26SWill Deacon if (!kvm->cpus[0] || kvm->cpus[0]->thread == 0) 4582aa76b26SWill Deacon return; 4592aa76b26SWill Deacon 460e8cb90fbSWill Deacon pthread_kill(kvm->cpus[0]->thread, SIGKVMEXIT); 4612aa76b26SWill Deacon } 4622aa76b26SWill Deacon 463e8cb90fbSWill Deacon void kvm__continue(struct kvm *kvm) 464e8cb90fbSWill Deacon { 4652aa76b26SWill Deacon mutex_unlock(&pause_lock); 4662aa76b26SWill Deacon } 4672aa76b26SWill Deacon 4684346fd8fSSasha Levin void kvm__pause(struct kvm *kvm) 4694298ddadSSasha Levin { 4704298ddadSSasha Levin int i, paused_vcpus = 0; 4714298ddadSSasha Levin 472e8cb90fbSWill Deacon mutex_lock(&pause_lock); 473e8cb90fbSWill Deacon 4744298ddadSSasha Levin /* Check if the guest is running */ 47537b8e06bSJean-Philippe Brucker if (!kvm->cpus || !kvm->cpus[0] || kvm->cpus[0]->thread == 0) 4764298ddadSSasha Levin return; 4774298ddadSSasha Levin 4784298ddadSSasha Levin pause_event = eventfd(0, 0); 4794298ddadSSasha Levin if (pause_event < 0) 4804298ddadSSasha Levin die("Failed creating pause notification event"); 4812aa76b26SWill Deacon for (i = 0; i < kvm->nrcpus; i++) { 4822aa76b26SWill Deacon if (kvm->cpus[i]->is_running) 483df4239fbSSasha Levin pthread_kill(kvm->cpus[i]->thread, SIGKVMPAUSE); 4842aa76b26SWill Deacon else 4852aa76b26SWill Deacon paused_vcpus++; 4862aa76b26SWill Deacon } 4874298ddadSSasha Levin 4884298ddadSSasha Levin while (paused_vcpus < kvm->nrcpus) { 4894298ddadSSasha Levin u64 cur_read; 4904298ddadSSasha Levin 4914298ddadSSasha Levin if (read(pause_event, &cur_read, sizeof(cur_read)) < 0) 4924298ddadSSasha Levin die("Failed reading pause event"); 4934298ddadSSasha Levin paused_vcpus += cur_read; 4944298ddadSSasha Levin } 4954298ddadSSasha Levin close(pause_event); 4964298ddadSSasha Levin } 4974298ddadSSasha Levin 4984298ddadSSasha Levin void kvm__notify_paused(void) 4994298ddadSSasha Levin { 5004298ddadSSasha Levin u64 p = 1; 5014298ddadSSasha Levin 5024298ddadSSasha Levin if (write(pause_event, &p, sizeof(p)) < 0) 5034298ddadSSasha Levin die("Failed notifying of paused VCPU."); 5044298ddadSSasha Levin 5054298ddadSSasha Levin mutex_lock(&pause_lock); 5064298ddadSSasha Levin mutex_unlock(&pause_lock); 5074298ddadSSasha Levin } 508