1ae1fae34SPekka Enberg #include "kvm/kvm.h" 2ae1fae34SPekka Enberg 3b3594ec7SCyrill Gorcunov #include "kvm/boot-protocol.h" 472811558SPekka Enberg #include "kvm/cpufeature.h" 572811558SPekka Enberg #include "kvm/read-write.h" 672811558SPekka Enberg #include "kvm/interrupt.h" 70c7c14a7SCyrill Gorcunov #include "kvm/mptable.h" 872811558SPekka Enberg #include "kvm/util.h" 94298ddadSSasha Levin #include "kvm/mutex.h" 104298ddadSSasha Levin #include "kvm/kvm-cpu.h" 114b1addaeSSasha Levin #include "kvm/kvm-ipc.h" 12eda03319SPekka Enberg 136c7d8514SPekka Enberg #include <linux/kvm.h> 14f5ab5f67SPekka Enberg 15f5ab5f67SPekka Enberg #include <asm/bootparam.h> 16f5ab5f67SPekka Enberg 174b1addaeSSasha Levin #include <sys/un.h> 18*e2e876c2SMatt Evans #include <sys/stat.h> 194b1addaeSSasha Levin #include <sys/types.h> 204b1addaeSSasha Levin #include <sys/socket.h> 21ae1fae34SPekka Enberg #include <sys/ioctl.h> 221f9cff23SPekka Enberg #include <sys/mman.h> 23ce79f1caSPekka Enberg #include <sys/stat.h> 242da26a59SPekka Enberg #include <stdbool.h> 256e5e8b8dSPekka Enberg #include <assert.h> 2606e41eeaSPekka Enberg #include <limits.h> 27ce79f1caSPekka Enberg #include <signal.h> 28f5ab5f67SPekka Enberg #include <stdarg.h> 29b8f6afcdSPekka Enberg #include <stdlib.h> 30f5ab5f67SPekka Enberg #include <string.h> 310d1f17ecSPekka Enberg #include <unistd.h> 321f9cff23SPekka Enberg #include <stdio.h> 33b8f6afcdSPekka Enberg #include <fcntl.h> 34ce79f1caSPekka Enberg #include <time.h> 354298ddadSSasha Levin #include <sys/eventfd.h> 36c7828731SSasha Levin #include <asm/unistd.h> 3763bc8503SSasha Levin #include <dirent.h> 38b8f6afcdSPekka Enberg 39ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason 400d1f17ecSPekka Enberg 41ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = { 42ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN), 43ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION), 44ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO), 45ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL), 46ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG), 47ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT), 48ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO), 49ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN), 50ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN), 51ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY), 52ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR), 53ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR), 54ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS), 55ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC), 56ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET), 57ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR), 58ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI), 59ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR), 609b1fb1c3SPekka Enberg }; 619b1fb1c3SPekka Enberg 6255e19624SCyrill Gorcunov #define DEFINE_KVM_EXT(ext) \ 6355e19624SCyrill Gorcunov .name = #ext, \ 6455e19624SCyrill Gorcunov .code = ext 6555e19624SCyrill Gorcunov 6655e19624SCyrill Gorcunov struct { 6755e19624SCyrill Gorcunov const char *name; 6855e19624SCyrill Gorcunov int code; 6955e19624SCyrill Gorcunov } kvm_req_ext[] = { 7055e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) }, 7155e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) }, 7255e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_PIT2) }, 7355e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) }, 7455e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) }, 7555e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) }, 767c0ec28fSCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_HLT) }, 7755e19624SCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) }, 78d38ad31aSCyrill Gorcunov { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) }, 7955e19624SCyrill Gorcunov }; 8055e19624SCyrill Gorcunov 814298ddadSSasha Levin extern struct kvm *kvm; 824298ddadSSasha Levin extern struct kvm_cpu *kvm_cpus[KVM_NR_CPUS]; 834298ddadSSasha Levin static int pause_event; 844298ddadSSasha Levin static DEFINE_MUTEX(pause_lock); 854298ddadSSasha Levin 869667701cSPekka Enberg static char kvm_dir[PATH_MAX]; 879667701cSPekka Enberg 889667701cSPekka Enberg static void set_dir(const char *fmt, va_list args) 899667701cSPekka Enberg { 90dd188f9fSPekka Enberg char tmp[PATH_MAX]; 91dd188f9fSPekka Enberg 92dd188f9fSPekka Enberg vsnprintf(tmp, sizeof(tmp), fmt, args); 93dd188f9fSPekka Enberg 942bc995fbSPekka Enberg mkdir(tmp, 0777); 952bc995fbSPekka Enberg 96dd188f9fSPekka Enberg if (!realpath(tmp, kvm_dir)) 97dd188f9fSPekka Enberg die("Unable to set KVM tool directory"); 98f76a3285SPekka Enberg 99f76a3285SPekka Enberg strcat(kvm_dir, "/"); 1009667701cSPekka Enberg } 1019667701cSPekka Enberg 1029667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...) 1039667701cSPekka Enberg { 1049667701cSPekka Enberg va_list args; 1059667701cSPekka Enberg 1069667701cSPekka Enberg va_start(args, fmt); 1079667701cSPekka Enberg set_dir(fmt, args); 1089667701cSPekka Enberg va_end(args); 1099667701cSPekka Enberg } 1109667701cSPekka Enberg 1119667701cSPekka Enberg const char *kvm__get_dir(void) 1129667701cSPekka Enberg { 1139667701cSPekka Enberg return kvm_dir; 1149667701cSPekka Enberg } 1159667701cSPekka Enberg 11643835ac9SSasha Levin static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension) 117b8f6afcdSPekka Enberg { 11828fa19c0SPekka Enberg int ret; 119b8f6afcdSPekka Enberg 12043835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension); 1214076b041SPekka Enberg if (ret < 0) 1224076b041SPekka Enberg return false; 1234076b041SPekka Enberg 1244076b041SPekka Enberg return ret; 1254076b041SPekka Enberg } 1264076b041SPekka Enberg 12743835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm) 12855e19624SCyrill Gorcunov { 12955e19624SCyrill Gorcunov unsigned int i; 13055e19624SCyrill Gorcunov 13155e19624SCyrill Gorcunov for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) { 13243835ac9SSasha Levin if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) { 1334542f276SCyrill Gorcunov pr_error("Unsuppored KVM extension detected: %s", 13455e19624SCyrill Gorcunov kvm_req_ext[i].name); 13555e19624SCyrill Gorcunov return (int)-i; 13655e19624SCyrill Gorcunov } 13755e19624SCyrill Gorcunov } 13855e19624SCyrill Gorcunov 13955e19624SCyrill Gorcunov return 0; 14055e19624SCyrill Gorcunov } 14155e19624SCyrill Gorcunov 1424076b041SPekka Enberg static struct kvm *kvm__new(void) 1434076b041SPekka Enberg { 14443835ac9SSasha Levin struct kvm *kvm = calloc(1, sizeof *kvm); 1454076b041SPekka Enberg 14643835ac9SSasha Levin if (!kvm) 1474076b041SPekka Enberg die("out of memory"); 1484076b041SPekka Enberg 14943835ac9SSasha Levin return kvm; 1504076b041SPekka Enberg } 1514076b041SPekka Enberg 1524b1addaeSSasha Levin static int kvm__create_socket(struct kvm *kvm) 1535358b0e6SSasha Levin { 1544b1addaeSSasha Levin char full_name[PATH_MAX]; 1554b1addaeSSasha Levin unsigned int s; 1564b1addaeSSasha Levin struct sockaddr_un local; 1574b1addaeSSasha Levin int len, r; 1585358b0e6SSasha Levin 1595358b0e6SSasha Levin if (!kvm->name) 1604b1addaeSSasha Levin return -1; 1615358b0e6SSasha Levin 1629667701cSPekka Enberg sprintf(full_name, "%s", kvm__get_dir()); 163c71efd96SSasha Levin mkdir(full_name, 0777); 1644b1addaeSSasha Levin sprintf(full_name, "%s/%s.sock", kvm__get_dir(), kvm->name); 165fa0022d2SSasha Levin if (access(full_name, F_OK) == 0) 166fa0022d2SSasha Levin die("Socket file %s already exist", full_name); 167fa0022d2SSasha Levin 1684b1addaeSSasha Levin s = socket(AF_UNIX, SOCK_STREAM, 0); 1694b1addaeSSasha Levin if (s < 0) 1704b1addaeSSasha Levin return s; 1714b1addaeSSasha Levin local.sun_family = AF_UNIX; 1724b1addaeSSasha Levin strcpy(local.sun_path, full_name); 1734b1addaeSSasha Levin unlink(local.sun_path); 1744b1addaeSSasha Levin len = strlen(local.sun_path) + sizeof(local.sun_family); 1754b1addaeSSasha Levin r = bind(s, (struct sockaddr *)&local, len); 1764b1addaeSSasha Levin if (r < 0) 1774b1addaeSSasha Levin goto fail; 1784b1addaeSSasha Levin 1794b1addaeSSasha Levin r = listen(s, 5); 1804b1addaeSSasha Levin if (r < 0) 1814b1addaeSSasha Levin goto fail; 1824b1addaeSSasha Levin 1834b1addaeSSasha Levin return s; 1844b1addaeSSasha Levin 1854b1addaeSSasha Levin fail: 1864b1addaeSSasha Levin close(s); 1874b1addaeSSasha Levin return -1; 1885358b0e6SSasha Levin } 1895358b0e6SSasha Levin 1904b1addaeSSasha Levin void kvm__remove_socket(const char *name) 1915358b0e6SSasha Levin { 1925358b0e6SSasha Levin char full_name[PATH_MAX]; 1935358b0e6SSasha Levin 1944b1addaeSSasha Levin sprintf(full_name, "%s/%s.sock", kvm__get_dir(), name); 1955358b0e6SSasha Levin unlink(full_name); 1965358b0e6SSasha Levin } 1975358b0e6SSasha Levin 1984b1addaeSSasha Levin int kvm__get_sock_by_instance(const char *name) 1995358b0e6SSasha Levin { 2004b1addaeSSasha Levin int s, len, r; 2014b1addaeSSasha Levin char sock_file[PATH_MAX]; 2024b1addaeSSasha Levin struct sockaddr_un local; 2035358b0e6SSasha Levin 2044b1addaeSSasha Levin sprintf(sock_file, "%s/%s.sock", kvm__get_dir(), name); 2054b1addaeSSasha Levin s = socket(AF_UNIX, SOCK_STREAM, 0); 2065358b0e6SSasha Levin 2074b1addaeSSasha Levin local.sun_family = AF_UNIX; 2084b1addaeSSasha Levin strcpy(local.sun_path, sock_file); 2094b1addaeSSasha Levin len = strlen(local.sun_path) + sizeof(local.sun_family); 2105358b0e6SSasha Levin 2114b1addaeSSasha Levin r = connect(s, &local, len); 212e3e9e392SSasha Levin if (r < 0 && errno == ECONNREFUSED) { 213e3e9e392SSasha Levin /* Clean ghost socket file */ 214e3e9e392SSasha Levin unlink(sock_file); 215e3e9e392SSasha Levin return -1; 216e3e9e392SSasha Levin } else if (r < 0) { 2174b1addaeSSasha Levin die("Failed connecting to instance"); 218e3e9e392SSasha Levin } 2195358b0e6SSasha Levin 2204b1addaeSSasha Levin return s; 2215358b0e6SSasha Levin } 2225358b0e6SSasha Levin 2234b1addaeSSasha Levin int kvm__enumerate_instances(int (*callback)(const char *name, int fd)) 22463bc8503SSasha Levin { 22563bc8503SSasha Levin char full_name[PATH_MAX]; 2264b1addaeSSasha Levin int sock; 22763bc8503SSasha Levin DIR *dir; 22863bc8503SSasha Levin struct dirent entry, *result; 229886af5f2SLiming Wang int ret = 0; 23063bc8503SSasha Levin 2319667701cSPekka Enberg sprintf(full_name, "%s", kvm__get_dir()); 23263bc8503SSasha Levin dir = opendir(full_name); 23363bc8503SSasha Levin 234f2e556f3SKonstantin Khlebnikov while (dir != NULL) { 23563bc8503SSasha Levin readdir_r(dir, &entry, &result); 23663bc8503SSasha Levin if (result == NULL) 23763bc8503SSasha Levin break; 2384b1addaeSSasha Levin if (entry.d_type == DT_SOCK) { 2394b1addaeSSasha Levin entry.d_name[strlen(entry.d_name)-5] = 0; 2404b1addaeSSasha Levin sock = kvm__get_sock_by_instance(entry.d_name); 241e3e9e392SSasha Levin if (sock < 0) 242e3e9e392SSasha Levin continue; 2434b1addaeSSasha Levin ret = callback(entry.d_name, sock); 2444b1addaeSSasha Levin close(sock); 245886af5f2SLiming Wang if (ret < 0) 246886af5f2SLiming Wang break; 24763bc8503SSasha Levin } 24863bc8503SSasha Levin } 24963bc8503SSasha Levin 2501a0ef251SSasha Levin closedir(dir); 2511a0ef251SSasha Levin 252886af5f2SLiming Wang return ret; 25363bc8503SSasha Levin } 25463bc8503SSasha Levin 25543835ac9SSasha Levin void kvm__delete(struct kvm *kvm) 2569ef4c68eSPekka Enberg { 25743835ac9SSasha Levin kvm__stop_timer(kvm); 258fbfe68b7SSasha Levin 25943835ac9SSasha Levin munmap(kvm->ram_start, kvm->ram_size); 260c733c80bSSasha Levin kvm_ipc__stop(); 2614b1addaeSSasha Levin kvm__remove_socket(kvm->name); 26243835ac9SSasha Levin free(kvm); 2639ef4c68eSPekka Enberg } 2649ef4c68eSPekka Enberg 265c78b8713SAsias He static bool kvm__cpu_supports_vm(void) 266c78b8713SAsias He { 267c78b8713SAsias He struct cpuid_regs regs; 2683fdf659dSSasha Levin u32 eax_base; 269831fbf23SPekka Enberg int feature; 270c78b8713SAsias He 271c78b8713SAsias He regs = (struct cpuid_regs) { 272831fbf23SPekka Enberg .eax = 0x00, 273c78b8713SAsias He }; 274c78b8713SAsias He host_cpuid(®s); 275c78b8713SAsias He 276ae87afbfSCyrill Gorcunov switch (regs.ebx) { 277ae87afbfSCyrill Gorcunov case CPUID_VENDOR_INTEL_1: 278831fbf23SPekka Enberg eax_base = 0x00; 279831fbf23SPekka Enberg feature = KVM__X86_FEATURE_VMX; 280ae87afbfSCyrill Gorcunov break; 28134649df9SPekka Enberg 282ae87afbfSCyrill Gorcunov case CPUID_VENDOR_AMD_1: 283831fbf23SPekka Enberg eax_base = 0x80000000; 284831fbf23SPekka Enberg feature = KVM__X86_FEATURE_SVM; 285ae87afbfSCyrill Gorcunov break; 28634649df9SPekka Enberg 28734649df9SPekka Enberg default: 28834649df9SPekka Enberg return false; 289ae87afbfSCyrill Gorcunov } 290ae87afbfSCyrill Gorcunov 291831fbf23SPekka Enberg regs = (struct cpuid_regs) { 292831fbf23SPekka Enberg .eax = eax_base, 293831fbf23SPekka Enberg }; 294831fbf23SPekka Enberg host_cpuid(®s); 295831fbf23SPekka Enberg 296831fbf23SPekka Enberg if (regs.eax < eax_base + 0x01) 297831fbf23SPekka Enberg return false; 298831fbf23SPekka Enberg 299831fbf23SPekka Enberg regs = (struct cpuid_regs) { 300831fbf23SPekka Enberg .eax = eax_base + 0x01 301831fbf23SPekka Enberg }; 302831fbf23SPekka Enberg host_cpuid(®s); 303831fbf23SPekka Enberg 304831fbf23SPekka Enberg return regs.ecx & (1 << feature); 305c78b8713SAsias He } 306c78b8713SAsias He 30796feb589SPekka Enberg /* 30896feb589SPekka Enberg * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping 30996feb589SPekka Enberg * memory regions to it. Therefore, be careful if you use this function for 31096feb589SPekka Enberg * registering memory regions for emulating hardware. 31196feb589SPekka Enberg */ 31296feb589SPekka Enberg void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr) 3134076b041SPekka Enberg { 3142b0e3342SPekka Enberg struct kvm_userspace_memory_region mem; 315839051d9SSasha Levin int ret; 316839051d9SSasha Levin 317839051d9SSasha Levin mem = (struct kvm_userspace_memory_region) { 31896feb589SPekka Enberg .slot = kvm->mem_slots++, 319874467f8SSasha Levin .guest_phys_addr = guest_phys, 320874467f8SSasha Levin .memory_size = size, 321c4acb611SIngo Molnar .userspace_addr = (unsigned long)userspace_addr, 322839051d9SSasha Levin }; 323839051d9SSasha Levin 324874467f8SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem); 325839051d9SSasha Levin if (ret < 0) 326839051d9SSasha Levin die_perror("KVM_SET_USER_MEMORY_REGION ioctl"); 327839051d9SSasha Levin } 328839051d9SSasha Levin 329874467f8SSasha Levin /* 330874467f8SSasha Levin * Allocating RAM size bigger than 4GB requires us to leave a gap 331874467f8SSasha Levin * in the RAM which is used for PCI MMIO, hotplug, and unconfigured 332874467f8SSasha Levin * devices (see documentation of e820_setup_gap() for details). 333874467f8SSasha Levin * 334874467f8SSasha Levin * If we're required to initialize RAM bigger than 4GB, we will create 335874467f8SSasha Levin * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space. 336874467f8SSasha Levin */ 337874467f8SSasha Levin 33843835ac9SSasha Levin void kvm__init_ram(struct kvm *kvm) 339874467f8SSasha Levin { 340874467f8SSasha Levin u64 phys_start, phys_size; 341874467f8SSasha Levin void *host_mem; 342874467f8SSasha Levin 34343835ac9SSasha Levin if (kvm->ram_size < KVM_32BIT_GAP_START) { 344874467f8SSasha Levin /* Use a single block of RAM for 32bit RAM */ 345874467f8SSasha Levin 346874467f8SSasha Levin phys_start = 0; 34743835ac9SSasha Levin phys_size = kvm->ram_size; 34843835ac9SSasha Levin host_mem = kvm->ram_start; 349874467f8SSasha Levin 35096feb589SPekka Enberg kvm__register_mem(kvm, phys_start, phys_size, host_mem); 351874467f8SSasha Levin } else { 352874467f8SSasha Levin /* First RAM range from zero to the PCI gap: */ 353874467f8SSasha Levin 354874467f8SSasha Levin phys_start = 0; 355874467f8SSasha Levin phys_size = KVM_32BIT_GAP_START; 35643835ac9SSasha Levin host_mem = kvm->ram_start; 357874467f8SSasha Levin 35896feb589SPekka Enberg kvm__register_mem(kvm, phys_start, phys_size, host_mem); 359874467f8SSasha Levin 360874467f8SSasha Levin /* Second RAM range from 4GB to the end of RAM: */ 361874467f8SSasha Levin 362874467f8SSasha Levin phys_start = 0x100000000ULL; 36343835ac9SSasha Levin phys_size = kvm->ram_size - phys_size; 36443835ac9SSasha Levin host_mem = kvm->ram_start + phys_start; 365874467f8SSasha Levin 36696feb589SPekka Enberg kvm__register_mem(kvm, phys_start, phys_size, host_mem); 367874467f8SSasha Levin } 368874467f8SSasha Levin } 369874467f8SSasha Levin 3708259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm) 371384922b3SPekka Enberg { 372384922b3SPekka Enberg int ret; 373384922b3SPekka Enberg 37443835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); 3758259b8ccSSasha Levin if (ret <= 0) 376384922b3SPekka Enberg die_perror("KVM_CAP_NR_VCPUS"); 377384922b3SPekka Enberg 378384922b3SPekka Enberg return ret; 379384922b3SPekka Enberg } 380384922b3SPekka Enberg 3814b1addaeSSasha Levin static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) 3824b1addaeSSasha Levin { 3834b1addaeSSasha Levin pid_t pid = getpid(); 3844b1addaeSSasha Levin int r = 0; 3854b1addaeSSasha Levin 3864b1addaeSSasha Levin if (type == KVM_IPC_PID) 3874b1addaeSSasha Levin r = write(fd, &pid, sizeof(pid)); 3884b1addaeSSasha Levin 3894b1addaeSSasha Levin if (r < 0) 3904b1addaeSSasha Levin pr_warning("Failed sending PID"); 3914b1addaeSSasha Levin } 3924b1addaeSSasha Levin 3938259b8ccSSasha Levin /* 3948259b8ccSSasha Levin * The following hack should be removed once 'x86: Raise the hard 3958259b8ccSSasha Levin * VCPU count limit' makes it's way into the mainline. 3968259b8ccSSasha Levin */ 3978259b8ccSSasha Levin #ifndef KVM_CAP_MAX_VCPUS 3988259b8ccSSasha Levin #define KVM_CAP_MAX_VCPUS 66 3998259b8ccSSasha Levin #endif 4008259b8ccSSasha Levin 4018259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm) 4028259b8ccSSasha Levin { 4038259b8ccSSasha Levin int ret; 4048259b8ccSSasha Levin 4058259b8ccSSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); 4068259b8ccSSasha Levin if (ret <= 0) 4078259b8ccSSasha Levin ret = kvm__recommended_cpus(kvm); 4088259b8ccSSasha Levin 4098259b8ccSSasha Levin return ret; 4108259b8ccSSasha Levin } 4118259b8ccSSasha Levin 4125358b0e6SSasha Levin struct kvm *kvm__init(const char *kvm_dev, u64 ram_size, const char *name) 413839051d9SSasha Levin { 4149687927dSAsias He struct kvm_pit_config pit_config = { .flags = 0, }; 41543835ac9SSasha Levin struct kvm *kvm; 4164076b041SPekka Enberg int ret; 4174076b041SPekka Enberg 418c78b8713SAsias He if (!kvm__cpu_supports_vm()) 419c78b8713SAsias He die("Your CPU does not support hardware virtualization"); 420c78b8713SAsias He 42143835ac9SSasha Levin kvm = kvm__new(); 4224076b041SPekka Enberg 42343835ac9SSasha Levin kvm->sys_fd = open(kvm_dev, O_RDWR); 42443835ac9SSasha Levin if (kvm->sys_fd < 0) { 4256d7c36ceSPekka Enberg if (errno == ENOENT) 426e907b83fSPekka Enberg die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev); 427f8334800SIngo Molnar if (errno == ENODEV) 428f8334800SIngo Molnar die("'%s' KVM driver not available.\n # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev); 4296d7c36ceSPekka Enberg 430f8334800SIngo Molnar fprintf(stderr, " Fatal, could not open %s: ", kvm_dev); 431f8334800SIngo Molnar perror(NULL); 432f8334800SIngo Molnar exit(1); 4336d7c36ceSPekka Enberg } 434b8f6afcdSPekka Enberg 43543835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0); 4366c7d8514SPekka Enberg if (ret != KVM_API_VERSION) 437f5ab5f67SPekka Enberg die_perror("KVM_API_VERSION ioctl"); 4386c7d8514SPekka Enberg 43943835ac9SSasha Levin kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0); 44043835ac9SSasha Levin if (kvm->vm_fd < 0) 441f5ab5f67SPekka Enberg die_perror("KVM_CREATE_VM ioctl"); 44228fa19c0SPekka Enberg 44343835ac9SSasha Levin if (kvm__check_extensions(kvm)) 44455e19624SCyrill Gorcunov die("A required KVM extention is not supported by OS"); 4459687927dSAsias He 44643835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000); 4479687927dSAsias He if (ret < 0) 4489687927dSAsias He die_perror("KVM_SET_TSS_ADDR ioctl"); 4499687927dSAsias He 45043835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config); 4519687927dSAsias He if (ret < 0) 4529687927dSAsias He die_perror("KVM_CREATE_PIT2 ioctl"); 4539687927dSAsias He 45443835ac9SSasha Levin kvm->ram_size = ram_size; 4550d1f17ecSPekka Enberg 45643835ac9SSasha Levin if (kvm->ram_size < KVM_32BIT_GAP_START) { 45737c34ca8SSasha Levin kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0); 458874467f8SSasha Levin } else { 45937c34ca8SSasha Levin kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0); 46043835ac9SSasha Levin if (kvm->ram_start != MAP_FAILED) { 461874467f8SSasha Levin /* 462874467f8SSasha Levin * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that 463874467f8SSasha Levin * if we accidently write to it, we will know. 464874467f8SSasha Levin */ 46543835ac9SSasha Levin mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE); 466874467f8SSasha Levin } 467874467f8SSasha Levin } 46843835ac9SSasha Levin if (kvm->ram_start == MAP_FAILED) 4690d1f17ecSPekka Enberg die("out of memory"); 4700d1f17ecSPekka Enberg 4717f4f39a4SSasha Levin madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE); 4727f4f39a4SSasha Levin 47343835ac9SSasha Levin ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP); 474895c2fefSPekka Enberg if (ret < 0) 4759687927dSAsias He die_perror("KVM_CREATE_IRQCHIP ioctl"); 4769687927dSAsias He 4775358b0e6SSasha Levin kvm->name = name; 4785358b0e6SSasha Levin 4794b1addaeSSasha Levin kvm_ipc__start(kvm__create_socket(kvm)); 4804b1addaeSSasha Levin kvm_ipc__register_handler(KVM_IPC_PID, kvm__pid); 48143835ac9SSasha Levin return kvm; 4824076b041SPekka Enberg } 4834076b041SPekka Enberg 4845f6772b8SCyrill Gorcunov #define BOOT_LOADER_SELECTOR 0x1000 485b08e9ec4SPekka Enberg #define BOOT_LOADER_IP 0x0000 486dbdb74c2SPekka Enberg #define BOOT_LOADER_SP 0x8000 4872dd4a4edSCyrill Gorcunov #define BOOT_CMDLINE_OFFSET 0x20000 4882dd4a4edSCyrill Gorcunov 4899a4ecdc5SPekka Enberg #define BOOT_PROTOCOL_REQUIRED 0x206 490a43f6460SCyrill Gorcunov #define LOAD_HIGH 0x01 491009b0758SPekka Enberg 49243835ac9SSasha Levin static int load_flat_binary(struct kvm *kvm, int fd) 493009b0758SPekka Enberg { 494009b0758SPekka Enberg void *p; 495009b0758SPekka Enberg int nr; 496009b0758SPekka Enberg 497009b0758SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 498009b0758SPekka Enberg die_perror("lseek"); 499009b0758SPekka Enberg 50043835ac9SSasha Levin p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP); 501009b0758SPekka Enberg 502009b0758SPekka Enberg while ((nr = read(fd, p, 65536)) > 0) 503009b0758SPekka Enberg p += nr; 504009b0758SPekka Enberg 50543835ac9SSasha Levin kvm->boot_selector = BOOT_LOADER_SELECTOR; 50643835ac9SSasha Levin kvm->boot_ip = BOOT_LOADER_IP; 50743835ac9SSasha Levin kvm->boot_sp = BOOT_LOADER_SP; 508edc8a14dSPekka Enberg 5097fb218bdSPekka Enberg return true; 510009b0758SPekka Enberg } 511009b0758SPekka Enberg 512ae1fae34SPekka Enberg static const char *BZIMAGE_MAGIC = "HdrS"; 513ae1fae34SPekka Enberg 51443835ac9SSasha Levin static bool load_bzimage(struct kvm *kvm, int fd_kernel, 51553861c74SJohn Floren int fd_initrd, const char *kernel_cmdline, u16 vidmode) 516ae1fae34SPekka Enberg { 517b9271160SPekka Enberg struct boot_params *kern_boot; 5184b62331fSPekka Enberg unsigned long setup_sects; 519b9271160SPekka Enberg struct boot_params boot; 5202dd4a4edSCyrill Gorcunov size_t cmdline_size; 5217fb218bdSPekka Enberg ssize_t setup_size; 52222489bb0SCyrill Gorcunov void *p; 523ae1fae34SPekka Enberg int nr; 524ae1fae34SPekka Enberg 5255d67eaf6SPekka Enberg /* 5265d67eaf6SPekka Enberg * See Documentation/x86/boot.txt for details no bzImage on-disk and 5275d67eaf6SPekka Enberg * memory layout. 5285d67eaf6SPekka Enberg */ 5295d67eaf6SPekka Enberg 5302065a6f7SCyrill Gorcunov if (lseek(fd_kernel, 0, SEEK_SET) < 0) 531009b0758SPekka Enberg die_perror("lseek"); 532009b0758SPekka Enberg 5330b62d2bbSPekka Enberg if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot)) 5342346d461SPekka Enberg return false; 535ae1fae34SPekka Enberg 5360b62d2bbSPekka Enberg if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC))) 5377fb218bdSPekka Enberg return false; 538ae1fae34SPekka Enberg 5390ea58e5bSPekka Enberg if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) 5400b62d2bbSPekka Enberg die("Too old kernel"); 541ad681038SCyrill Gorcunov 5422065a6f7SCyrill Gorcunov if (lseek(fd_kernel, 0, SEEK_SET) < 0) 543e93ab78aSPekka Enberg die_perror("lseek"); 544e93ab78aSPekka Enberg 5454cf542bbSCyrill Gorcunov if (!boot.hdr.setup_sects) 5464cf542bbSCyrill Gorcunov boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS; 54710943d14SPekka Enberg setup_sects = boot.hdr.setup_sects + 1; 54810943d14SPekka Enberg 54954d4a626SPekka Enberg setup_size = setup_sects << 9; 55043835ac9SSasha Levin p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP); 551ae1fae34SPekka Enberg 5522065a6f7SCyrill Gorcunov /* copy setup.bin to mem*/ 5532065a6f7SCyrill Gorcunov if (read(fd_kernel, p, setup_size) != setup_size) 5547fb218bdSPekka Enberg die_perror("read"); 5557fb218bdSPekka Enberg 5562065a6f7SCyrill Gorcunov /* copy vmlinux.bin to BZ_KERNEL_START*/ 55743835ac9SSasha Levin p = guest_flat_to_host(kvm, BZ_KERNEL_START); 558ae1fae34SPekka Enberg 5592065a6f7SCyrill Gorcunov while ((nr = read(fd_kernel, p, 65536)) > 0) 560ae1fae34SPekka Enberg p += nr; 561ae1fae34SPekka Enberg 56243835ac9SSasha Levin p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET); 563debcfac0SCyrill Gorcunov if (kernel_cmdline) { 564debcfac0SCyrill Gorcunov cmdline_size = strlen(kernel_cmdline) + 1; 565debcfac0SCyrill Gorcunov if (cmdline_size > boot.hdr.cmdline_size) 566debcfac0SCyrill Gorcunov cmdline_size = boot.hdr.cmdline_size; 567ad681038SCyrill Gorcunov 5682dd4a4edSCyrill Gorcunov memset(p, 0, boot.hdr.cmdline_size); 5692dd4a4edSCyrill Gorcunov memcpy(p, kernel_cmdline, cmdline_size - 1); 570debcfac0SCyrill Gorcunov } 571debcfac0SCyrill Gorcunov 57243835ac9SSasha Levin kern_boot = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00); 573a43f6460SCyrill Gorcunov 574b9271160SPekka Enberg kern_boot->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET; 575b9271160SPekka Enberg kern_boot->hdr.type_of_loader = 0xff; 576b9271160SPekka Enberg kern_boot->hdr.heap_end_ptr = 0xfe00; 577b9271160SPekka Enberg kern_boot->hdr.loadflags |= CAN_USE_HEAP; 57853861c74SJohn Floren kern_boot->hdr.vid_mode = vidmode; 579a43f6460SCyrill Gorcunov 5802065a6f7SCyrill Gorcunov /* 5812065a6f7SCyrill Gorcunov * Read initrd image into guest memory 5822065a6f7SCyrill Gorcunov */ 5832065a6f7SCyrill Gorcunov if (fd_initrd >= 0) { 5842065a6f7SCyrill Gorcunov struct stat initrd_stat; 5852065a6f7SCyrill Gorcunov unsigned long addr; 5862065a6f7SCyrill Gorcunov 5872065a6f7SCyrill Gorcunov if (fstat(fd_initrd, &initrd_stat)) 5882065a6f7SCyrill Gorcunov die_perror("fstat"); 5892065a6f7SCyrill Gorcunov 5902065a6f7SCyrill Gorcunov addr = boot.hdr.initrd_addr_max & ~0xfffff; 5912065a6f7SCyrill Gorcunov for (;;) { 5922065a6f7SCyrill Gorcunov if (addr < BZ_KERNEL_START) 5932065a6f7SCyrill Gorcunov die("Not enough memory for initrd"); 59443835ac9SSasha Levin else if (addr < (kvm->ram_size - initrd_stat.st_size)) 5952065a6f7SCyrill Gorcunov break; 5962065a6f7SCyrill Gorcunov addr -= 0x100000; 5972065a6f7SCyrill Gorcunov } 5982065a6f7SCyrill Gorcunov 59943835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 6002065a6f7SCyrill Gorcunov nr = read(fd_initrd, p, initrd_stat.st_size); 6012065a6f7SCyrill Gorcunov if (nr != initrd_stat.st_size) 6022065a6f7SCyrill Gorcunov die("Failed to read initrd"); 6032065a6f7SCyrill Gorcunov 6042065a6f7SCyrill Gorcunov kern_boot->hdr.ramdisk_image = addr; 6052065a6f7SCyrill Gorcunov kern_boot->hdr.ramdisk_size = initrd_stat.st_size; 6062065a6f7SCyrill Gorcunov } 6072065a6f7SCyrill Gorcunov 60843835ac9SSasha Levin kvm->boot_selector = BOOT_LOADER_SELECTOR; 609edc8a14dSPekka Enberg /* 610edc8a14dSPekka Enberg * The real-mode setup code starts at offset 0x200 of a bzImage. See 611edc8a14dSPekka Enberg * Documentation/x86/boot.txt for details. 612edc8a14dSPekka Enberg */ 61343835ac9SSasha Levin kvm->boot_ip = BOOT_LOADER_IP + 0x200; 61443835ac9SSasha Levin kvm->boot_sp = BOOT_LOADER_SP; 615edc8a14dSPekka Enberg 6167fb218bdSPekka Enberg return true; 617ae1fae34SPekka Enberg } 618ae1fae34SPekka Enberg 61972811558SPekka Enberg /* RFC 1952 */ 62072811558SPekka Enberg #define GZIP_ID1 0x1f 62172811558SPekka Enberg #define GZIP_ID2 0x8b 62272811558SPekka Enberg 62372811558SPekka Enberg static bool initrd_check(int fd) 62472811558SPekka Enberg { 62572811558SPekka Enberg unsigned char id[2]; 62672811558SPekka Enberg 62772811558SPekka Enberg if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0) 62872811558SPekka Enberg return false; 62972811558SPekka Enberg 63072811558SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 63172811558SPekka Enberg die_perror("lseek"); 63272811558SPekka Enberg 63372811558SPekka Enberg return id[0] == GZIP_ID1 && id[1] == GZIP_ID2; 63472811558SPekka Enberg } 63572811558SPekka Enberg 6366d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename, 63753861c74SJohn Floren const char *initrd_filename, const char *kernel_cmdline, u16 vidmode) 638ae1fae34SPekka Enberg { 6397fb218bdSPekka Enberg bool ret; 6402065a6f7SCyrill Gorcunov int fd_kernel = -1, fd_initrd = -1; 641ae1fae34SPekka Enberg 6422065a6f7SCyrill Gorcunov fd_kernel = open(kernel_filename, O_RDONLY); 6432065a6f7SCyrill Gorcunov if (fd_kernel < 0) 6440b62d2bbSPekka Enberg die("Unable to open kernel %s", kernel_filename); 645ae1fae34SPekka Enberg 6462065a6f7SCyrill Gorcunov if (initrd_filename) { 6472065a6f7SCyrill Gorcunov fd_initrd = open(initrd_filename, O_RDONLY); 6482065a6f7SCyrill Gorcunov if (fd_initrd < 0) 6490b62d2bbSPekka Enberg die("Unable to open initrd %s", initrd_filename); 65072811558SPekka Enberg 65172811558SPekka Enberg if (!initrd_check(fd_initrd)) 65272811558SPekka Enberg die("%s is not an initrd", initrd_filename); 6532065a6f7SCyrill Gorcunov } 6542065a6f7SCyrill Gorcunov 65553861c74SJohn Floren ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode); 65628972750SCyrill Gorcunov 65728972750SCyrill Gorcunov if (initrd_filename) 65828972750SCyrill Gorcunov close(fd_initrd); 65928972750SCyrill Gorcunov 660009b0758SPekka Enberg if (ret) 661009b0758SPekka Enberg goto found_kernel; 662ae1fae34SPekka Enberg 6634542f276SCyrill Gorcunov pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename); 6640b62d2bbSPekka Enberg 6652065a6f7SCyrill Gorcunov ret = load_flat_binary(kvm, fd_kernel); 666009b0758SPekka Enberg if (ret) 667009b0758SPekka Enberg goto found_kernel; 668009b0758SPekka Enberg 6695a6ac675SSasha Levin close(fd_kernel); 6705a6ac675SSasha Levin 671009b0758SPekka Enberg die("%s is not a valid bzImage or flat binary", kernel_filename); 672009b0758SPekka Enberg 673009b0758SPekka Enberg found_kernel: 6745a6ac675SSasha Levin close(fd_kernel); 6755a6ac675SSasha Levin 676ae1fae34SPekka Enberg return ret; 677ae1fae34SPekka Enberg } 678ae1fae34SPekka Enberg 679b3594ec7SCyrill Gorcunov /** 680b3594ec7SCyrill Gorcunov * kvm__setup_bios - inject BIOS into guest system memory 68143835ac9SSasha Levin * @kvm - guest system descriptor 682b3594ec7SCyrill Gorcunov * 683b3594ec7SCyrill Gorcunov * This function is a main routine where we poke guest memory 684b3594ec7SCyrill Gorcunov * and install BIOS there. 685b3594ec7SCyrill Gorcunov */ 68643835ac9SSasha Levin void kvm__setup_bios(struct kvm *kvm) 6872f3976eeSPekka Enberg { 688b3594ec7SCyrill Gorcunov /* standart minimal configuration */ 68943835ac9SSasha Levin setup_bios(kvm); 6902f3976eeSPekka Enberg 691b3594ec7SCyrill Gorcunov /* FIXME: SMP, ACPI and friends here */ 6920c7c14a7SCyrill Gorcunov 6930c7c14a7SCyrill Gorcunov /* MP table */ 69443835ac9SSasha Levin mptable_setup(kvm, kvm->nrcpus); 6952f3976eeSPekka Enberg } 6962f3976eeSPekka Enberg 697ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000 /* 1 msec */ 698ce79f1caSPekka Enberg 699ce79f1caSPekka Enberg /* 700ce79f1caSPekka Enberg * This function sets up a timer that's used to inject interrupts from the 701ce79f1caSPekka Enberg * userspace hypervisor into the guest at periodical intervals. Please note 702ce79f1caSPekka Enberg * that clock interrupt, for example, is not handled here. 703ce79f1caSPekka Enberg */ 70443835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm) 705ce79f1caSPekka Enberg { 706ce79f1caSPekka Enberg struct itimerspec its; 707ce79f1caSPekka Enberg struct sigevent sev; 708ce79f1caSPekka Enberg 709ce79f1caSPekka Enberg memset(&sev, 0, sizeof(struct sigevent)); 710ce79f1caSPekka Enberg sev.sigev_value.sival_int = 0; 711c7828731SSasha Levin sev.sigev_notify = SIGEV_THREAD_ID; 712ce79f1caSPekka Enberg sev.sigev_signo = SIGALRM; 713c7828731SSasha Levin sev._sigev_un._tid = syscall(__NR_gettid); 714ce79f1caSPekka Enberg 71543835ac9SSasha Levin if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0) 716ce79f1caSPekka Enberg die("timer_create()"); 717ce79f1caSPekka Enberg 718ce79f1caSPekka Enberg its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000; 719ce79f1caSPekka Enberg its.it_value.tv_nsec = TIMER_INTERVAL_NS % 1000000000; 720ce79f1caSPekka Enberg its.it_interval.tv_sec = its.it_value.tv_sec; 721ce79f1caSPekka Enberg its.it_interval.tv_nsec = its.it_value.tv_nsec; 722ce79f1caSPekka Enberg 72343835ac9SSasha Levin if (timer_settime(kvm->timerid, 0, &its, NULL) < 0) 724ce79f1caSPekka Enberg die("timer_settime()"); 725ce79f1caSPekka Enberg } 726ce79f1caSPekka Enberg 72743835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm) 728fbfe68b7SSasha Levin { 72943835ac9SSasha Levin if (kvm->timerid) 73043835ac9SSasha Levin if (timer_delete(kvm->timerid) < 0) 731fbfe68b7SSasha Levin die("timer_delete()"); 732fbfe68b7SSasha Levin 73343835ac9SSasha Levin kvm->timerid = 0; 734fbfe68b7SSasha Levin } 735fbfe68b7SSasha Levin 73643835ac9SSasha Levin void kvm__irq_line(struct kvm *kvm, int irq, int level) 7378b1ff07eSPekka Enberg { 7388b1ff07eSPekka Enberg struct kvm_irq_level irq_level; 7398b1ff07eSPekka Enberg 7408b1ff07eSPekka Enberg irq_level = (struct kvm_irq_level) { 7418b1ff07eSPekka Enberg { 7428b1ff07eSPekka Enberg .irq = irq, 7438b1ff07eSPekka Enberg }, 7448b1ff07eSPekka Enberg .level = level, 7458b1ff07eSPekka Enberg }; 7468b1ff07eSPekka Enberg 74743835ac9SSasha Levin if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0) 7488b1ff07eSPekka Enberg die_perror("KVM_IRQ_LINE failed"); 7498b1ff07eSPekka Enberg } 7508b1ff07eSPekka Enberg 751bfaed61cSSasha Levin void kvm__irq_trigger(struct kvm *kvm, int irq) 752bfaed61cSSasha Levin { 753bfaed61cSSasha Levin kvm__irq_line(kvm, irq, 1); 754bfaed61cSSasha Levin kvm__irq_line(kvm, irq, 0); 755bfaed61cSSasha Levin } 756bfaed61cSSasha Levin 75743835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size) 758090f898eSCyrill Gorcunov { 759090f898eSCyrill Gorcunov unsigned char *p; 760090f898eSCyrill Gorcunov unsigned long n; 761090f898eSCyrill Gorcunov 762090f898eSCyrill Gorcunov size &= ~7; /* mod 8 */ 763090f898eSCyrill Gorcunov if (!size) 764090f898eSCyrill Gorcunov return; 765090f898eSCyrill Gorcunov 76643835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 767090f898eSCyrill Gorcunov 76848cf3877SPekka Enberg for (n = 0; n < size; n += 8) { 76943835ac9SSasha Levin if (!host_ptr_in_ram(kvm, p + n)) 77048cf3877SPekka Enberg break; 77148cf3877SPekka Enberg 772090f898eSCyrill Gorcunov printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n", 773090f898eSCyrill Gorcunov addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3], 774090f898eSCyrill Gorcunov p[n + 4], p[n + 5], p[n + 6], p[n + 7]); 775090f898eSCyrill Gorcunov } 77648cf3877SPekka Enberg } 7774298ddadSSasha Levin 7784298ddadSSasha Levin void kvm__pause(void) 7794298ddadSSasha Levin { 7804298ddadSSasha Levin int i, paused_vcpus = 0; 7814298ddadSSasha Levin 7824298ddadSSasha Levin /* Check if the guest is running */ 7834298ddadSSasha Levin if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0) 7844298ddadSSasha Levin return; 7854298ddadSSasha Levin 7864298ddadSSasha Levin mutex_lock(&pause_lock); 7874298ddadSSasha Levin 7884298ddadSSasha Levin pause_event = eventfd(0, 0); 7894298ddadSSasha Levin if (pause_event < 0) 7904298ddadSSasha Levin die("Failed creating pause notification event"); 7914298ddadSSasha Levin for (i = 0; i < kvm->nrcpus; i++) 7924298ddadSSasha Levin pthread_kill(kvm_cpus[i]->thread, SIGKVMPAUSE); 7934298ddadSSasha Levin 7944298ddadSSasha Levin while (paused_vcpus < kvm->nrcpus) { 7954298ddadSSasha Levin u64 cur_read; 7964298ddadSSasha Levin 7974298ddadSSasha Levin if (read(pause_event, &cur_read, sizeof(cur_read)) < 0) 7984298ddadSSasha Levin die("Failed reading pause event"); 7994298ddadSSasha Levin paused_vcpus += cur_read; 8004298ddadSSasha Levin } 8014298ddadSSasha Levin close(pause_event); 8024298ddadSSasha Levin } 8034298ddadSSasha Levin 8044298ddadSSasha Levin void kvm__continue(void) 8054298ddadSSasha Levin { 8064298ddadSSasha Levin /* Check if the guest is running */ 8074298ddadSSasha Levin if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0) 8084298ddadSSasha Levin return; 8094298ddadSSasha Levin 8104298ddadSSasha Levin mutex_unlock(&pause_lock); 8114298ddadSSasha Levin } 8124298ddadSSasha Levin 8134298ddadSSasha Levin void kvm__notify_paused(void) 8144298ddadSSasha Levin { 8154298ddadSSasha Levin u64 p = 1; 8164298ddadSSasha Levin 8174298ddadSSasha Levin if (write(pause_event, &p, sizeof(p)) < 0) 8184298ddadSSasha Levin die("Failed notifying of paused VCPU."); 8194298ddadSSasha Levin 8204298ddadSSasha Levin mutex_lock(&pause_lock); 8214298ddadSSasha Levin mutex_unlock(&pause_lock); 8224298ddadSSasha Levin } 823