1ae1fae34SPekka Enberg #include "kvm/kvm.h" 272811558SPekka Enberg #include "kvm/read-write.h" 372811558SPekka Enberg #include "kvm/util.h" 424ed52dbSCyrill Gorcunov #include "kvm/strbuf.h" 54298ddadSSasha Levin #include "kvm/mutex.h" 64298ddadSSasha Levin #include "kvm/kvm-cpu.h" 74b1addaeSSasha Levin #include "kvm/kvm-ipc.h" 8eda03319SPekka Enberg 9d82350d3SWill Deacon #include <linux/kernel.h> 106c7d8514SPekka Enberg #include <linux/kvm.h> 11d82350d3SWill Deacon #include <linux/list.h> 12495fbd4eSSasha Levin #include <linux/err.h> 13f5ab5f67SPekka Enberg 144b1addaeSSasha Levin #include <sys/un.h> 15e2e876c2SMatt Evans #include <sys/stat.h> 164b1addaeSSasha Levin #include <sys/types.h> 174b1addaeSSasha Levin #include <sys/socket.h> 18ae1fae34SPekka Enberg #include <sys/ioctl.h> 191f9cff23SPekka Enberg #include <sys/mman.h> 202da26a59SPekka Enberg #include <stdbool.h> 2106e41eeaSPekka Enberg #include <limits.h> 22ce79f1caSPekka Enberg #include <signal.h> 23f5ab5f67SPekka Enberg #include <stdarg.h> 24b8f6afcdSPekka Enberg #include <stdlib.h> 25f5ab5f67SPekka Enberg #include <string.h> 260d1f17ecSPekka Enberg #include <unistd.h> 271f9cff23SPekka Enberg #include <stdio.h> 28b8f6afcdSPekka Enberg #include <fcntl.h> 29ce79f1caSPekka Enberg #include <time.h> 304298ddadSSasha Levin #include <sys/eventfd.h> 31c7828731SSasha Levin #include <asm/unistd.h> 3263bc8503SSasha Levin #include <dirent.h> 33b8f6afcdSPekka Enberg 34ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason 350d1f17ecSPekka Enberg 36ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = { 37ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN), 38ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION), 39ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO), 40ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL), 41ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG), 42ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT), 43ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO), 44ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN), 45ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN), 46ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY), 47ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR), 48ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR), 49ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS), 50ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC), 51ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET), 52ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR), 53ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI), 54ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR), 5563e158a0SMatt Evans #ifdef CONFIG_PPC64 5663e158a0SMatt Evans DEFINE_KVM_EXIT_REASON(KVM_EXIT_PAPR_HCALL), 5763e158a0SMatt Evans #endif 589b1fb1c3SPekka Enberg }; 599b1fb1c3SPekka Enberg 604298ddadSSasha Levin static int pause_event; 614298ddadSSasha Levin static DEFINE_MUTEX(pause_lock); 62af7b0868SMatt Evans extern struct kvm_ext kvm_req_ext[]; 634298ddadSSasha Levin 649667701cSPekka Enberg static char kvm_dir[PATH_MAX]; 659667701cSPekka Enberg 6629f4ec31SJulien Thierry extern __thread struct kvm_cpu *current_kvm_cpu; 6729f4ec31SJulien Thierry 68495fbd4eSSasha Levin static int set_dir(const char *fmt, va_list args) 699667701cSPekka Enberg { 70dd188f9fSPekka Enberg char tmp[PATH_MAX]; 71dd188f9fSPekka Enberg 72dd188f9fSPekka Enberg vsnprintf(tmp, sizeof(tmp), fmt, args); 73dd188f9fSPekka Enberg 742bc995fbSPekka Enberg mkdir(tmp, 0777); 752bc995fbSPekka Enberg 76dd188f9fSPekka Enberg if (!realpath(tmp, kvm_dir)) 77495fbd4eSSasha Levin return -errno; 78f76a3285SPekka Enberg 79f76a3285SPekka Enberg strcat(kvm_dir, "/"); 80495fbd4eSSasha Levin 81495fbd4eSSasha Levin return 0; 829667701cSPekka Enberg } 839667701cSPekka Enberg 849667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...) 859667701cSPekka Enberg { 869667701cSPekka Enberg va_list args; 879667701cSPekka Enberg 889667701cSPekka Enberg va_start(args, fmt); 899667701cSPekka Enberg set_dir(fmt, args); 909667701cSPekka Enberg va_end(args); 919667701cSPekka Enberg } 929667701cSPekka Enberg 939667701cSPekka Enberg const char *kvm__get_dir(void) 949667701cSPekka Enberg { 959667701cSPekka Enberg return kvm_dir; 969667701cSPekka Enberg } 979667701cSPekka Enberg 98663165a2SAndre Przywara bool kvm__supports_vm_extension(struct kvm *kvm, unsigned int extension) 99663165a2SAndre Przywara { 100663165a2SAndre Przywara static int supports_vm_ext_check = 0; 101663165a2SAndre Przywara int ret; 102663165a2SAndre Przywara 103663165a2SAndre Przywara switch (supports_vm_ext_check) { 104663165a2SAndre Przywara case 0: 105663165a2SAndre Przywara ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, 106663165a2SAndre Przywara KVM_CAP_CHECK_EXTENSION_VM); 107663165a2SAndre Przywara if (ret <= 0) { 108663165a2SAndre Przywara supports_vm_ext_check = -1; 109663165a2SAndre Przywara return false; 110663165a2SAndre Przywara } 111663165a2SAndre Przywara supports_vm_ext_check = 1; 112663165a2SAndre Przywara /* fall through */ 113663165a2SAndre Przywara case 1: 114663165a2SAndre Przywara break; 115663165a2SAndre Przywara case -1: 116663165a2SAndre Przywara return false; 117663165a2SAndre Przywara } 118663165a2SAndre Przywara 119663165a2SAndre Przywara ret = ioctl(kvm->vm_fd, KVM_CHECK_EXTENSION, extension); 120663165a2SAndre Przywara if (ret < 0) 121663165a2SAndre Przywara return false; 122663165a2SAndre Przywara 123663165a2SAndre Przywara return ret; 124663165a2SAndre Przywara } 125663165a2SAndre Przywara 1261d6fb3f2SSasha Levin bool kvm__supports_extension(struct kvm *kvm, unsigned int extension) 127b8f6afcdSPekka Enberg { 12828fa19c0SPekka Enberg int ret; 129b8f6afcdSPekka Enberg 13043835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension); 1314076b041SPekka Enberg if (ret < 0) 1324076b041SPekka Enberg return false; 1334076b041SPekka Enberg 1344076b041SPekka Enberg return ret; 1354076b041SPekka Enberg } 1364076b041SPekka Enberg 13743835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm) 13855e19624SCyrill Gorcunov { 139495fbd4eSSasha Levin int i; 14055e19624SCyrill Gorcunov 141af7b0868SMatt Evans for (i = 0; ; i++) { 142af7b0868SMatt Evans if (!kvm_req_ext[i].name) 143af7b0868SMatt Evans break; 14443835ac9SSasha Levin if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) { 14550687d87SWill Deacon pr_err("Unsupported KVM extension detected: %s", 14655e19624SCyrill Gorcunov kvm_req_ext[i].name); 147495fbd4eSSasha Levin return -i; 14855e19624SCyrill Gorcunov } 14955e19624SCyrill Gorcunov } 15055e19624SCyrill Gorcunov 15155e19624SCyrill Gorcunov return 0; 15255e19624SCyrill Gorcunov } 15355e19624SCyrill Gorcunov 15447621338SSasha Levin struct kvm *kvm__new(void) 1554076b041SPekka Enberg { 156495fbd4eSSasha Levin struct kvm *kvm = calloc(1, sizeof(*kvm)); 15743835ac9SSasha Levin if (!kvm) 158495fbd4eSSasha Levin return ERR_PTR(-ENOMEM); 1594076b041SPekka Enberg 160*8d987725SAlexandru Elisei mutex_init(&kvm->mem_banks_lock); 161d648dbf5SCyrill Gorcunov kvm->sys_fd = -1; 162d648dbf5SCyrill Gorcunov kvm->vm_fd = -1; 163d648dbf5SCyrill Gorcunov 16420b65266SJulien Thierry #ifdef KVM_BRLOCK_DEBUG 16520b65266SJulien Thierry kvm->brlock_sem = (pthread_rwlock_t) PTHREAD_RWLOCK_INITIALIZER; 16620b65266SJulien Thierry #endif 16720b65266SJulien Thierry 16843835ac9SSasha Levin return kvm; 1694076b041SPekka Enberg } 1704076b041SPekka Enberg 171495fbd4eSSasha Levin int kvm__exit(struct kvm *kvm) 1729ef4c68eSPekka Enberg { 173d82350d3SWill Deacon struct kvm_mem_bank *bank, *tmp; 174495fbd4eSSasha Levin 175d82350d3SWill Deacon kvm__arch_delete_ram(kvm); 176d82350d3SWill Deacon 177d82350d3SWill Deacon list_for_each_entry_safe(bank, tmp, &kvm->mem_banks, list) { 178d82350d3SWill Deacon list_del(&bank->list); 179d82350d3SWill Deacon free(bank); 180d82350d3SWill Deacon } 181d82350d3SWill Deacon 182d82350d3SWill Deacon free(kvm); 183495fbd4eSSasha Levin return 0; 1849ef4c68eSPekka Enberg } 18549a8afd1SSasha Levin core_exit(kvm__exit); 1869ef4c68eSPekka Enberg 187*8d987725SAlexandru Elisei int kvm__destroy_mem(struct kvm *kvm, u64 guest_phys, u64 size, 188*8d987725SAlexandru Elisei void *userspace_addr) 189*8d987725SAlexandru Elisei { 190*8d987725SAlexandru Elisei struct kvm_userspace_memory_region mem; 191*8d987725SAlexandru Elisei struct kvm_mem_bank *bank; 192*8d987725SAlexandru Elisei int ret; 193*8d987725SAlexandru Elisei 194*8d987725SAlexandru Elisei mutex_lock(&kvm->mem_banks_lock); 195*8d987725SAlexandru Elisei list_for_each_entry(bank, &kvm->mem_banks, list) 196*8d987725SAlexandru Elisei if (bank->guest_phys_addr == guest_phys && 197*8d987725SAlexandru Elisei bank->size == size && bank->host_addr == userspace_addr) 198*8d987725SAlexandru Elisei break; 199*8d987725SAlexandru Elisei 200*8d987725SAlexandru Elisei if (&bank->list == &kvm->mem_banks) { 201*8d987725SAlexandru Elisei pr_err("Region [%llx-%llx] not found", guest_phys, 202*8d987725SAlexandru Elisei guest_phys + size - 1); 203*8d987725SAlexandru Elisei ret = -EINVAL; 204*8d987725SAlexandru Elisei goto out; 205*8d987725SAlexandru Elisei } 206*8d987725SAlexandru Elisei 207*8d987725SAlexandru Elisei if (bank->type == KVM_MEM_TYPE_RESERVED) { 208*8d987725SAlexandru Elisei pr_err("Cannot delete reserved region [%llx-%llx]", 209*8d987725SAlexandru Elisei guest_phys, guest_phys + size - 1); 210*8d987725SAlexandru Elisei ret = -EINVAL; 211*8d987725SAlexandru Elisei goto out; 212*8d987725SAlexandru Elisei } 213*8d987725SAlexandru Elisei 214*8d987725SAlexandru Elisei mem = (struct kvm_userspace_memory_region) { 215*8d987725SAlexandru Elisei .slot = bank->slot, 216*8d987725SAlexandru Elisei .guest_phys_addr = guest_phys, 217*8d987725SAlexandru Elisei .memory_size = 0, 218*8d987725SAlexandru Elisei .userspace_addr = (unsigned long)userspace_addr, 219*8d987725SAlexandru Elisei }; 220*8d987725SAlexandru Elisei 221*8d987725SAlexandru Elisei ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem); 222*8d987725SAlexandru Elisei if (ret < 0) { 223*8d987725SAlexandru Elisei ret = -errno; 224*8d987725SAlexandru Elisei goto out; 225*8d987725SAlexandru Elisei } 226*8d987725SAlexandru Elisei 227*8d987725SAlexandru Elisei list_del(&bank->list); 228*8d987725SAlexandru Elisei free(bank); 229*8d987725SAlexandru Elisei kvm->mem_slots--; 230*8d987725SAlexandru Elisei ret = 0; 231*8d987725SAlexandru Elisei 232*8d987725SAlexandru Elisei out: 233*8d987725SAlexandru Elisei mutex_unlock(&kvm->mem_banks_lock); 234*8d987725SAlexandru Elisei return ret; 235*8d987725SAlexandru Elisei } 236*8d987725SAlexandru Elisei 2378f46c736SJean-Philippe Brucker int kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, 2388f46c736SJean-Philippe Brucker void *userspace_addr, enum kvm_mem_type type) 2394076b041SPekka Enberg { 2402b0e3342SPekka Enberg struct kvm_userspace_memory_region mem; 241fa1076abSJean-Philippe Brucker struct kvm_mem_bank *merged = NULL; 242d82350d3SWill Deacon struct kvm_mem_bank *bank; 243*8d987725SAlexandru Elisei struct list_head *prev_entry; 244*8d987725SAlexandru Elisei u32 slot; 245839051d9SSasha Levin int ret; 246839051d9SSasha Levin 247*8d987725SAlexandru Elisei mutex_lock(&kvm->mem_banks_lock); 248*8d987725SAlexandru Elisei /* Check for overlap and find first empty slot. */ 249*8d987725SAlexandru Elisei slot = 0; 250*8d987725SAlexandru Elisei prev_entry = &kvm->mem_banks; 251fa1076abSJean-Philippe Brucker list_for_each_entry(bank, &kvm->mem_banks, list) { 252fa1076abSJean-Philippe Brucker u64 bank_end = bank->guest_phys_addr + bank->size - 1; 253fa1076abSJean-Philippe Brucker u64 end = guest_phys + size - 1; 254*8d987725SAlexandru Elisei if (guest_phys > bank_end || end < bank->guest_phys_addr) { 255*8d987725SAlexandru Elisei /* 256*8d987725SAlexandru Elisei * Keep the banks sorted ascending by slot, so it's 257*8d987725SAlexandru Elisei * easier for us to find a free slot. 258*8d987725SAlexandru Elisei */ 259*8d987725SAlexandru Elisei if (bank->slot == slot) { 260*8d987725SAlexandru Elisei slot++; 261*8d987725SAlexandru Elisei prev_entry = &bank->list; 262*8d987725SAlexandru Elisei } 263fa1076abSJean-Philippe Brucker continue; 264*8d987725SAlexandru Elisei } 265fa1076abSJean-Philippe Brucker 266fa1076abSJean-Philippe Brucker /* Merge overlapping reserved regions */ 267fa1076abSJean-Philippe Brucker if (bank->type == KVM_MEM_TYPE_RESERVED && 268fa1076abSJean-Philippe Brucker type == KVM_MEM_TYPE_RESERVED) { 269fa1076abSJean-Philippe Brucker bank->guest_phys_addr = min(bank->guest_phys_addr, guest_phys); 270fa1076abSJean-Philippe Brucker bank->size = max(bank_end, end) - bank->guest_phys_addr + 1; 271fa1076abSJean-Philippe Brucker 272fa1076abSJean-Philippe Brucker if (merged) { 273fa1076abSJean-Philippe Brucker /* 274fa1076abSJean-Philippe Brucker * This is at least the second merge, remove 275fa1076abSJean-Philippe Brucker * previous result. 276fa1076abSJean-Philippe Brucker */ 277fa1076abSJean-Philippe Brucker list_del(&merged->list); 278fa1076abSJean-Philippe Brucker free(merged); 279fa1076abSJean-Philippe Brucker } 280fa1076abSJean-Philippe Brucker 281fa1076abSJean-Philippe Brucker guest_phys = bank->guest_phys_addr; 282fa1076abSJean-Philippe Brucker size = bank->size; 283fa1076abSJean-Philippe Brucker merged = bank; 284fa1076abSJean-Philippe Brucker 285fa1076abSJean-Philippe Brucker /* Keep checking that we don't overlap another region */ 286fa1076abSJean-Philippe Brucker continue; 287fa1076abSJean-Philippe Brucker } 288fa1076abSJean-Philippe Brucker 289fa1076abSJean-Philippe Brucker pr_err("%s region [%llx-%llx] would overlap %s region [%llx-%llx]", 290fa1076abSJean-Philippe Brucker kvm_mem_type_to_string(type), guest_phys, guest_phys + size - 1, 291fa1076abSJean-Philippe Brucker kvm_mem_type_to_string(bank->type), bank->guest_phys_addr, 292fa1076abSJean-Philippe Brucker bank->guest_phys_addr + bank->size - 1); 293fa1076abSJean-Philippe Brucker 294*8d987725SAlexandru Elisei ret = -EINVAL; 295*8d987725SAlexandru Elisei goto out; 296fa1076abSJean-Philippe Brucker } 297fa1076abSJean-Philippe Brucker 298*8d987725SAlexandru Elisei if (merged) { 299*8d987725SAlexandru Elisei ret = 0; 300*8d987725SAlexandru Elisei goto out; 301*8d987725SAlexandru Elisei } 302fa1076abSJean-Philippe Brucker 303d82350d3SWill Deacon bank = malloc(sizeof(*bank)); 304*8d987725SAlexandru Elisei if (!bank) { 305*8d987725SAlexandru Elisei ret = -ENOMEM; 306*8d987725SAlexandru Elisei goto out; 307*8d987725SAlexandru Elisei } 308d82350d3SWill Deacon 309d82350d3SWill Deacon INIT_LIST_HEAD(&bank->list); 310d82350d3SWill Deacon bank->guest_phys_addr = guest_phys; 311d82350d3SWill Deacon bank->host_addr = userspace_addr; 312d82350d3SWill Deacon bank->size = size; 3138f46c736SJean-Philippe Brucker bank->type = type; 314*8d987725SAlexandru Elisei bank->slot = slot; 315d82350d3SWill Deacon 316fa1076abSJean-Philippe Brucker if (type != KVM_MEM_TYPE_RESERVED) { 317839051d9SSasha Levin mem = (struct kvm_userspace_memory_region) { 318*8d987725SAlexandru Elisei .slot = slot, 319874467f8SSasha Levin .guest_phys_addr = guest_phys, 320874467f8SSasha Levin .memory_size = size, 321c4acb611SIngo Molnar .userspace_addr = (unsigned long)userspace_addr, 322839051d9SSasha Levin }; 323839051d9SSasha Levin 324874467f8SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem); 325*8d987725SAlexandru Elisei if (ret < 0) { 326*8d987725SAlexandru Elisei ret = -errno; 327*8d987725SAlexandru Elisei goto out; 328*8d987725SAlexandru Elisei } 329fa1076abSJean-Philippe Brucker } 330495fbd4eSSasha Levin 331*8d987725SAlexandru Elisei list_add(&bank->list, prev_entry); 332*8d987725SAlexandru Elisei kvm->mem_slots++; 333*8d987725SAlexandru Elisei ret = 0; 334fa1076abSJean-Philippe Brucker 335*8d987725SAlexandru Elisei out: 336*8d987725SAlexandru Elisei mutex_unlock(&kvm->mem_banks_lock); 337*8d987725SAlexandru Elisei return ret; 338839051d9SSasha Levin } 339839051d9SSasha Levin 340f412251fSWill Deacon void *guest_flat_to_host(struct kvm *kvm, u64 offset) 341f412251fSWill Deacon { 342f412251fSWill Deacon struct kvm_mem_bank *bank; 343f412251fSWill Deacon 344f412251fSWill Deacon list_for_each_entry(bank, &kvm->mem_banks, list) { 345f412251fSWill Deacon u64 bank_start = bank->guest_phys_addr; 346f412251fSWill Deacon u64 bank_end = bank_start + bank->size; 347f412251fSWill Deacon 348f412251fSWill Deacon if (offset >= bank_start && offset < bank_end) 349f412251fSWill Deacon return bank->host_addr + (offset - bank_start); 350f412251fSWill Deacon } 351f412251fSWill Deacon 352f412251fSWill Deacon pr_warning("unable to translate guest address 0x%llx to host", 353f412251fSWill Deacon (unsigned long long)offset); 354f412251fSWill Deacon return NULL; 355f412251fSWill Deacon } 356f412251fSWill Deacon 3570cb41990SWill Deacon u64 host_to_guest_flat(struct kvm *kvm, void *ptr) 3580cb41990SWill Deacon { 3590cb41990SWill Deacon struct kvm_mem_bank *bank; 3600cb41990SWill Deacon 3610cb41990SWill Deacon list_for_each_entry(bank, &kvm->mem_banks, list) { 3620cb41990SWill Deacon void *bank_start = bank->host_addr; 3630cb41990SWill Deacon void *bank_end = bank_start + bank->size; 3640cb41990SWill Deacon 3650cb41990SWill Deacon if (ptr >= bank_start && ptr < bank_end) 3660cb41990SWill Deacon return bank->guest_phys_addr + (ptr - bank_start); 3670cb41990SWill Deacon } 3680cb41990SWill Deacon 3690cb41990SWill Deacon pr_warning("unable to translate host address %p to guest", ptr); 3700cb41990SWill Deacon return 0; 3710cb41990SWill Deacon } 3720cb41990SWill Deacon 3738f46c736SJean-Philippe Brucker /* 3748f46c736SJean-Philippe Brucker * Iterate over each registered memory bank. Call @fun for each bank with @data 3758f46c736SJean-Philippe Brucker * as argument. @type is a bitmask that allows to filter banks according to 3768f46c736SJean-Philippe Brucker * their type. 3778f46c736SJean-Philippe Brucker * 3788f46c736SJean-Philippe Brucker * If one call to @fun returns a non-zero value, stop iterating and return the 3798f46c736SJean-Philippe Brucker * value. Otherwise, return zero. 3808f46c736SJean-Philippe Brucker */ 3818f46c736SJean-Philippe Brucker int kvm__for_each_mem_bank(struct kvm *kvm, enum kvm_mem_type type, 3828f46c736SJean-Philippe Brucker int (*fun)(struct kvm *kvm, struct kvm_mem_bank *bank, void *data), 3838f46c736SJean-Philippe Brucker void *data) 3848f46c736SJean-Philippe Brucker { 3858f46c736SJean-Philippe Brucker int ret; 3868f46c736SJean-Philippe Brucker struct kvm_mem_bank *bank; 3878f46c736SJean-Philippe Brucker 3888f46c736SJean-Philippe Brucker list_for_each_entry(bank, &kvm->mem_banks, list) { 3898f46c736SJean-Philippe Brucker if (type != KVM_MEM_TYPE_ALL && !(bank->type & type)) 3908f46c736SJean-Philippe Brucker continue; 3918f46c736SJean-Philippe Brucker 3928f46c736SJean-Philippe Brucker ret = fun(kvm, bank, data); 3938f46c736SJean-Philippe Brucker if (ret) 3948f46c736SJean-Philippe Brucker break; 3958f46c736SJean-Philippe Brucker } 3968f46c736SJean-Philippe Brucker 3978f46c736SJean-Philippe Brucker return ret; 3988f46c736SJean-Philippe Brucker } 3998f46c736SJean-Philippe Brucker 4008259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm) 401384922b3SPekka Enberg { 402384922b3SPekka Enberg int ret; 403384922b3SPekka Enberg 40443835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); 4058259b8ccSSasha Levin if (ret <= 0) 4063b9b691dSMatt Evans /* 4073b9b691dSMatt Evans * api.txt states that if KVM_CAP_NR_VCPUS does not exist, 4083b9b691dSMatt Evans * assume 4. 4093b9b691dSMatt Evans */ 4103b9b691dSMatt Evans return 4; 411384922b3SPekka Enberg 412384922b3SPekka Enberg return ret; 413384922b3SPekka Enberg } 414384922b3SPekka Enberg 4158259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm) 4168259b8ccSSasha Levin { 4178259b8ccSSasha Levin int ret; 4188259b8ccSSasha Levin 4198259b8ccSSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); 4208259b8ccSSasha Levin if (ret <= 0) 4218259b8ccSSasha Levin ret = kvm__recommended_cpus(kvm); 4228259b8ccSSasha Levin 4238259b8ccSSasha Levin return ret; 4248259b8ccSSasha Levin } 4258259b8ccSSasha Levin 42647621338SSasha Levin int kvm__init(struct kvm *kvm) 427839051d9SSasha Levin { 4284076b041SPekka Enberg int ret; 4294076b041SPekka Enberg 430495fbd4eSSasha Levin if (!kvm__arch_cpu_supports_vm()) { 431495fbd4eSSasha Levin pr_err("Your CPU does not support hardware virtualization"); 4326fce7105SYang Bai ret = -ENOSYS; 4336fce7105SYang Bai goto err; 434495fbd4eSSasha Levin } 435c78b8713SAsias He 43647621338SSasha Levin kvm->sys_fd = open(kvm->cfg.dev, O_RDWR); 43743835ac9SSasha Levin if (kvm->sys_fd < 0) { 438d648dbf5SCyrill Gorcunov if (errno == ENOENT) 439495fbd4eSSasha Levin pr_err("'%s' not found. Please make sure your kernel has CONFIG_KVM " 44047621338SSasha Levin "enabled and that the KVM modules are loaded.", kvm->cfg.dev); 441d648dbf5SCyrill Gorcunov else if (errno == ENODEV) 442d648dbf5SCyrill Gorcunov pr_err("'%s' KVM driver not available.\n # (If the KVM " 443495fbd4eSSasha Levin "module is loaded then 'dmesg' may offer further clues " 44447621338SSasha Levin "about the failure.)", kvm->cfg.dev); 445d648dbf5SCyrill Gorcunov else 44647621338SSasha Levin pr_err("Could not open %s: ", kvm->cfg.dev); 447d648dbf5SCyrill Gorcunov 448495fbd4eSSasha Levin ret = -errno; 449d648dbf5SCyrill Gorcunov goto err_free; 4506d7c36ceSPekka Enberg } 451b8f6afcdSPekka Enberg 45243835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0); 453495fbd4eSSasha Levin if (ret != KVM_API_VERSION) { 454495fbd4eSSasha Levin pr_err("KVM_API_VERSION ioctl"); 455495fbd4eSSasha Levin ret = -errno; 456d648dbf5SCyrill Gorcunov goto err_sys_fd; 457495fbd4eSSasha Levin } 4586c7d8514SPekka Enberg 459b5a5cd67SAndreas Herrmann kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, KVM_VM_TYPE); 460495fbd4eSSasha Levin if (kvm->vm_fd < 0) { 46181404cdbSDavid Daney pr_err("KVM_CREATE_VM ioctl"); 462495fbd4eSSasha Levin ret = kvm->vm_fd; 463d648dbf5SCyrill Gorcunov goto err_sys_fd; 464495fbd4eSSasha Levin } 46528fa19c0SPekka Enberg 466495fbd4eSSasha Levin if (kvm__check_extensions(kvm)) { 4675f5b0144SWill Deacon pr_err("A required KVM extension is not supported by OS"); 468495fbd4eSSasha Levin ret = -ENOSYS; 4696fce7105SYang Bai goto err_vm_fd; 470495fbd4eSSasha Levin } 4719687927dSAsias He 47247621338SSasha Levin kvm__arch_init(kvm, kvm->cfg.hugetlbfs_path, kvm->cfg.ram_size); 4739687927dSAsias He 474d82350d3SWill Deacon INIT_LIST_HEAD(&kvm->mem_banks); 475abee258bSSasha Levin kvm__init_ram(kvm); 476abee258bSSasha Levin 477084a1356SSasha Levin if (!kvm->cfg.firmware_filename) { 478084a1356SSasha Levin if (!kvm__load_kernel(kvm, kvm->cfg.kernel_filename, 479ff7ba6faSWill Deacon kvm->cfg.initrd_filename, kvm->cfg.real_cmdline)) 480084a1356SSasha Levin die("unable to load kernel %s", kvm->cfg.kernel_filename); 481084a1356SSasha Levin } 482084a1356SSasha Levin 483084a1356SSasha Levin if (kvm->cfg.firmware_filename) { 484084a1356SSasha Levin if (!kvm__load_firmware(kvm, kvm->cfg.firmware_filename)) 485084a1356SSasha Levin die("unable to load firmware image %s: %s", kvm->cfg.firmware_filename, strerror(errno)); 486084a1356SSasha Levin } else { 487084a1356SSasha Levin ret = kvm__arch_setup_firmware(kvm); 488084a1356SSasha Levin if (ret < 0) 489084a1356SSasha Levin die("kvm__arch_setup_firmware() failed with error %d\n", ret); 490084a1356SSasha Levin } 491084a1356SSasha Levin 49247621338SSasha Levin return 0; 493d648dbf5SCyrill Gorcunov 4946fce7105SYang Bai err_vm_fd: 495495fbd4eSSasha Levin close(kvm->vm_fd); 496d648dbf5SCyrill Gorcunov err_sys_fd: 497495fbd4eSSasha Levin close(kvm->sys_fd); 498d648dbf5SCyrill Gorcunov err_free: 499495fbd4eSSasha Levin free(kvm); 5006fce7105SYang Bai err: 50147621338SSasha Levin return ret; 5024076b041SPekka Enberg } 50349a8afd1SSasha Levin core_init(kvm__init); 5044076b041SPekka Enberg 50572811558SPekka Enberg /* RFC 1952 */ 50672811558SPekka Enberg #define GZIP_ID1 0x1f 50772811558SPekka Enberg #define GZIP_ID2 0x8b 508663ce1dfSMatt Evans #define CPIO_MAGIC "0707" 509663ce1dfSMatt Evans /* initrd may be gzipped, or a plain cpio */ 51072811558SPekka Enberg static bool initrd_check(int fd) 51172811558SPekka Enberg { 512663ce1dfSMatt Evans unsigned char id[4]; 51372811558SPekka Enberg 51472811558SPekka Enberg if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0) 51572811558SPekka Enberg return false; 51672811558SPekka Enberg 51772811558SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 51872811558SPekka Enberg die_perror("lseek"); 51972811558SPekka Enberg 520663ce1dfSMatt Evans return (id[0] == GZIP_ID1 && id[1] == GZIP_ID2) || 521663ce1dfSMatt Evans !memcmp(id, CPIO_MAGIC, 4); 52272811558SPekka Enberg } 52372811558SPekka Enberg 5246d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename, 525ff7ba6faSWill Deacon const char *initrd_filename, const char *kernel_cmdline) 526ae1fae34SPekka Enberg { 5277fb218bdSPekka Enberg bool ret; 5282065a6f7SCyrill Gorcunov int fd_kernel = -1, fd_initrd = -1; 529ae1fae34SPekka Enberg 5302065a6f7SCyrill Gorcunov fd_kernel = open(kernel_filename, O_RDONLY); 5312065a6f7SCyrill Gorcunov if (fd_kernel < 0) 5320b62d2bbSPekka Enberg die("Unable to open kernel %s", kernel_filename); 533ae1fae34SPekka Enberg 5342065a6f7SCyrill Gorcunov if (initrd_filename) { 5352065a6f7SCyrill Gorcunov fd_initrd = open(initrd_filename, O_RDONLY); 5362065a6f7SCyrill Gorcunov if (fd_initrd < 0) 5370b62d2bbSPekka Enberg die("Unable to open initrd %s", initrd_filename); 53872811558SPekka Enberg 53972811558SPekka Enberg if (!initrd_check(fd_initrd)) 54072811558SPekka Enberg die("%s is not an initrd", initrd_filename); 5412065a6f7SCyrill Gorcunov } 5422065a6f7SCyrill Gorcunov 543004f7684SAndre Przywara ret = kvm__arch_load_kernel_image(kvm, fd_kernel, fd_initrd, 544004f7684SAndre Przywara kernel_cmdline); 545009b0758SPekka Enberg 546604dbd63SMatt Evans if (initrd_filename) 547604dbd63SMatt Evans close(fd_initrd); 5485a6ac675SSasha Levin close(fd_kernel); 5495a6ac675SSasha Levin 550004f7684SAndre Przywara if (!ret) 551004f7684SAndre Przywara die("%s is not a valid kernel image", kernel_filename); 552ae1fae34SPekka Enberg return ret; 553ae1fae34SPekka Enberg } 554ae1fae34SPekka Enberg 555b2cf1e9fSAsias He void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size, int debug_fd) 556090f898eSCyrill Gorcunov { 557090f898eSCyrill Gorcunov unsigned char *p; 558090f898eSCyrill Gorcunov unsigned long n; 559090f898eSCyrill Gorcunov 560090f898eSCyrill Gorcunov size &= ~7; /* mod 8 */ 561090f898eSCyrill Gorcunov if (!size) 562090f898eSCyrill Gorcunov return; 563090f898eSCyrill Gorcunov 56443835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 565090f898eSCyrill Gorcunov 56648cf3877SPekka Enberg for (n = 0; n < size; n += 8) { 567b2cf1e9fSAsias He if (!host_ptr_in_ram(kvm, p + n)) { 568b2cf1e9fSAsias He dprintf(debug_fd, " 0x%08lx: <unknown>\n", addr + n); 569b2cf1e9fSAsias He continue; 570b2cf1e9fSAsias He } 571b2cf1e9fSAsias He dprintf(debug_fd, " 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n", 572090f898eSCyrill Gorcunov addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3], 573090f898eSCyrill Gorcunov p[n + 4], p[n + 5], p[n + 6], p[n + 7]); 574090f898eSCyrill Gorcunov } 57548cf3877SPekka Enberg } 5764298ddadSSasha Levin 5772aa76b26SWill Deacon void kvm__reboot(struct kvm *kvm) 5782aa76b26SWill Deacon { 5792aa76b26SWill Deacon /* Check if the guest is running */ 5802aa76b26SWill Deacon if (!kvm->cpus[0] || kvm->cpus[0]->thread == 0) 5812aa76b26SWill Deacon return; 5822aa76b26SWill Deacon 583e8cb90fbSWill Deacon pthread_kill(kvm->cpus[0]->thread, SIGKVMEXIT); 5842aa76b26SWill Deacon } 5852aa76b26SWill Deacon 586e8cb90fbSWill Deacon void kvm__continue(struct kvm *kvm) 587e8cb90fbSWill Deacon { 5882aa76b26SWill Deacon mutex_unlock(&pause_lock); 5892aa76b26SWill Deacon } 5902aa76b26SWill Deacon 5914346fd8fSSasha Levin void kvm__pause(struct kvm *kvm) 5924298ddadSSasha Levin { 5934298ddadSSasha Levin int i, paused_vcpus = 0; 5944298ddadSSasha Levin 595e8cb90fbSWill Deacon mutex_lock(&pause_lock); 596e8cb90fbSWill Deacon 5974298ddadSSasha Levin /* Check if the guest is running */ 59837b8e06bSJean-Philippe Brucker if (!kvm->cpus || !kvm->cpus[0] || kvm->cpus[0]->thread == 0) 5994298ddadSSasha Levin return; 6004298ddadSSasha Levin 6014298ddadSSasha Levin pause_event = eventfd(0, 0); 6024298ddadSSasha Levin if (pause_event < 0) 6034298ddadSSasha Levin die("Failed creating pause notification event"); 6042aa76b26SWill Deacon for (i = 0; i < kvm->nrcpus; i++) { 60529f4ec31SJulien Thierry if (kvm->cpus[i]->is_running && kvm->cpus[i]->paused == 0) 606df4239fbSSasha Levin pthread_kill(kvm->cpus[i]->thread, SIGKVMPAUSE); 6072aa76b26SWill Deacon else 6082aa76b26SWill Deacon paused_vcpus++; 6092aa76b26SWill Deacon } 6104298ddadSSasha Levin 6114298ddadSSasha Levin while (paused_vcpus < kvm->nrcpus) { 6124298ddadSSasha Levin u64 cur_read; 6134298ddadSSasha Levin 6144298ddadSSasha Levin if (read(pause_event, &cur_read, sizeof(cur_read)) < 0) 6154298ddadSSasha Levin die("Failed reading pause event"); 6164298ddadSSasha Levin paused_vcpus += cur_read; 6174298ddadSSasha Levin } 6184298ddadSSasha Levin close(pause_event); 6194298ddadSSasha Levin } 6204298ddadSSasha Levin 6214298ddadSSasha Levin void kvm__notify_paused(void) 6224298ddadSSasha Levin { 6234298ddadSSasha Levin u64 p = 1; 6244298ddadSSasha Levin 6254298ddadSSasha Levin if (write(pause_event, &p, sizeof(p)) < 0) 6264298ddadSSasha Levin die("Failed notifying of paused VCPU."); 6274298ddadSSasha Levin 6284298ddadSSasha Levin mutex_lock(&pause_lock); 62929f4ec31SJulien Thierry current_kvm_cpu->paused = 0; 6304298ddadSSasha Levin mutex_unlock(&pause_lock); 6314298ddadSSasha Levin } 632