1ae1fae34SPekka Enberg #include "kvm/kvm.h" 272811558SPekka Enberg #include "kvm/read-write.h" 372811558SPekka Enberg #include "kvm/util.h" 44298ddadSSasha Levin #include "kvm/mutex.h" 54298ddadSSasha Levin #include "kvm/kvm-cpu.h" 64b1addaeSSasha Levin #include "kvm/kvm-ipc.h" 7eda03319SPekka Enberg 86c7d8514SPekka Enberg #include <linux/kvm.h> 9f5ab5f67SPekka Enberg 104b1addaeSSasha Levin #include <sys/un.h> 11e2e876c2SMatt Evans #include <sys/stat.h> 124b1addaeSSasha Levin #include <sys/types.h> 134b1addaeSSasha Levin #include <sys/socket.h> 14ae1fae34SPekka Enberg #include <sys/ioctl.h> 151f9cff23SPekka Enberg #include <sys/mman.h> 162da26a59SPekka Enberg #include <stdbool.h> 1706e41eeaSPekka Enberg #include <limits.h> 18ce79f1caSPekka Enberg #include <signal.h> 19f5ab5f67SPekka Enberg #include <stdarg.h> 20b8f6afcdSPekka Enberg #include <stdlib.h> 21f5ab5f67SPekka Enberg #include <string.h> 220d1f17ecSPekka Enberg #include <unistd.h> 231f9cff23SPekka Enberg #include <stdio.h> 24b8f6afcdSPekka Enberg #include <fcntl.h> 25ce79f1caSPekka Enberg #include <time.h> 264298ddadSSasha Levin #include <sys/eventfd.h> 27c7828731SSasha Levin #include <asm/unistd.h> 2863bc8503SSasha Levin #include <dirent.h> 29b8f6afcdSPekka Enberg 30ae1fae34SPekka Enberg #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason 310d1f17ecSPekka Enberg 32ae1fae34SPekka Enberg const char *kvm_exit_reasons[] = { 33ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN), 34ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION), 35ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO), 36ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL), 37ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG), 38ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT), 39ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO), 40ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN), 41ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN), 42ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY), 43ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR), 44ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR), 45ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS), 46ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC), 47ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET), 48ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR), 49ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI), 50ae1fae34SPekka Enberg DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR), 5163e158a0SMatt Evans #ifdef CONFIG_PPC64 5263e158a0SMatt Evans DEFINE_KVM_EXIT_REASON(KVM_EXIT_PAPR_HCALL), 5363e158a0SMatt Evans #endif 549b1fb1c3SPekka Enberg }; 559b1fb1c3SPekka Enberg 564298ddadSSasha Levin extern struct kvm *kvm; 57e2077857SMatt Evans extern struct kvm_cpu **kvm_cpus; 584298ddadSSasha Levin static int pause_event; 594298ddadSSasha Levin static DEFINE_MUTEX(pause_lock); 60af7b0868SMatt Evans extern struct kvm_ext kvm_req_ext[]; 614298ddadSSasha Levin 629667701cSPekka Enberg static char kvm_dir[PATH_MAX]; 639667701cSPekka Enberg 649667701cSPekka Enberg static void set_dir(const char *fmt, va_list args) 659667701cSPekka Enberg { 66dd188f9fSPekka Enberg char tmp[PATH_MAX]; 67dd188f9fSPekka Enberg 68dd188f9fSPekka Enberg vsnprintf(tmp, sizeof(tmp), fmt, args); 69dd188f9fSPekka Enberg 702bc995fbSPekka Enberg mkdir(tmp, 0777); 712bc995fbSPekka Enberg 72dd188f9fSPekka Enberg if (!realpath(tmp, kvm_dir)) 73dd188f9fSPekka Enberg die("Unable to set KVM tool directory"); 74f76a3285SPekka Enberg 75f76a3285SPekka Enberg strcat(kvm_dir, "/"); 769667701cSPekka Enberg } 779667701cSPekka Enberg 789667701cSPekka Enberg void kvm__set_dir(const char *fmt, ...) 799667701cSPekka Enberg { 809667701cSPekka Enberg va_list args; 819667701cSPekka Enberg 829667701cSPekka Enberg va_start(args, fmt); 839667701cSPekka Enberg set_dir(fmt, args); 849667701cSPekka Enberg va_end(args); 859667701cSPekka Enberg } 869667701cSPekka Enberg 879667701cSPekka Enberg const char *kvm__get_dir(void) 889667701cSPekka Enberg { 899667701cSPekka Enberg return kvm_dir; 909667701cSPekka Enberg } 919667701cSPekka Enberg 921d6fb3f2SSasha Levin bool kvm__supports_extension(struct kvm *kvm, unsigned int extension) 93b8f6afcdSPekka Enberg { 9428fa19c0SPekka Enberg int ret; 95b8f6afcdSPekka Enberg 9643835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension); 974076b041SPekka Enberg if (ret < 0) 984076b041SPekka Enberg return false; 994076b041SPekka Enberg 1004076b041SPekka Enberg return ret; 1014076b041SPekka Enberg } 1024076b041SPekka Enberg 10343835ac9SSasha Levin static int kvm__check_extensions(struct kvm *kvm) 10455e19624SCyrill Gorcunov { 10555e19624SCyrill Gorcunov unsigned int i; 10655e19624SCyrill Gorcunov 107af7b0868SMatt Evans for (i = 0; ; i++) { 108af7b0868SMatt Evans if (!kvm_req_ext[i].name) 109af7b0868SMatt Evans break; 11043835ac9SSasha Levin if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) { 111599ed2a8SCyrill Gorcunov pr_err("Unsuppored KVM extension detected: %s", 11255e19624SCyrill Gorcunov kvm_req_ext[i].name); 11355e19624SCyrill Gorcunov return (int)-i; 11455e19624SCyrill Gorcunov } 11555e19624SCyrill Gorcunov } 11655e19624SCyrill Gorcunov 11755e19624SCyrill Gorcunov return 0; 11855e19624SCyrill Gorcunov } 11955e19624SCyrill Gorcunov 1204076b041SPekka Enberg static struct kvm *kvm__new(void) 1214076b041SPekka Enberg { 12243835ac9SSasha Levin struct kvm *kvm = calloc(1, sizeof *kvm); 1234076b041SPekka Enberg 12443835ac9SSasha Levin if (!kvm) 1254076b041SPekka Enberg die("out of memory"); 1264076b041SPekka Enberg 12743835ac9SSasha Levin return kvm; 1284076b041SPekka Enberg } 1294076b041SPekka Enberg 130b91f1976SLai Jiangshan #define KVM_SOCK_SUFFIX ".sock" 131b91f1976SLai Jiangshan #define KVM_SOCK_SUFFIX_LEN ((ssize_t)sizeof(KVM_SOCK_SUFFIX) - 1) 132b91f1976SLai Jiangshan 1334b1addaeSSasha Levin static int kvm__create_socket(struct kvm *kvm) 1345358b0e6SSasha Levin { 1354b1addaeSSasha Levin char full_name[PATH_MAX]; 1364b1addaeSSasha Levin unsigned int s; 1374b1addaeSSasha Levin struct sockaddr_un local; 1384b1addaeSSasha Levin int len, r; 1395358b0e6SSasha Levin 1405358b0e6SSasha Levin if (!kvm->name) 1414b1addaeSSasha Levin return -1; 1425358b0e6SSasha Levin 143b91f1976SLai Jiangshan sprintf(full_name, "%s/%s%s", kvm__get_dir(), kvm->name, 144b91f1976SLai Jiangshan KVM_SOCK_SUFFIX); 145fa0022d2SSasha Levin if (access(full_name, F_OK) == 0) 146fa0022d2SSasha Levin die("Socket file %s already exist", full_name); 147fa0022d2SSasha Levin 1484b1addaeSSasha Levin s = socket(AF_UNIX, SOCK_STREAM, 0); 1494b1addaeSSasha Levin if (s < 0) 1504b1addaeSSasha Levin return s; 1514b1addaeSSasha Levin local.sun_family = AF_UNIX; 1524b1addaeSSasha Levin strcpy(local.sun_path, full_name); 1534b1addaeSSasha Levin unlink(local.sun_path); 1544b1addaeSSasha Levin len = strlen(local.sun_path) + sizeof(local.sun_family); 1554b1addaeSSasha Levin r = bind(s, (struct sockaddr *)&local, len); 1564b1addaeSSasha Levin if (r < 0) 1574b1addaeSSasha Levin goto fail; 1584b1addaeSSasha Levin 1594b1addaeSSasha Levin r = listen(s, 5); 1604b1addaeSSasha Levin if (r < 0) 1614b1addaeSSasha Levin goto fail; 1624b1addaeSSasha Levin 1634b1addaeSSasha Levin return s; 1644b1addaeSSasha Levin 1654b1addaeSSasha Levin fail: 1664b1addaeSSasha Levin close(s); 1674b1addaeSSasha Levin return -1; 1685358b0e6SSasha Levin } 1695358b0e6SSasha Levin 1704b1addaeSSasha Levin void kvm__remove_socket(const char *name) 1715358b0e6SSasha Levin { 1725358b0e6SSasha Levin char full_name[PATH_MAX]; 1735358b0e6SSasha Levin 174b91f1976SLai Jiangshan sprintf(full_name, "%s/%s%s", kvm__get_dir(), name, KVM_SOCK_SUFFIX); 1755358b0e6SSasha Levin unlink(full_name); 1765358b0e6SSasha Levin } 1775358b0e6SSasha Levin 1784b1addaeSSasha Levin int kvm__get_sock_by_instance(const char *name) 1795358b0e6SSasha Levin { 1804b1addaeSSasha Levin int s, len, r; 1814b1addaeSSasha Levin char sock_file[PATH_MAX]; 1824b1addaeSSasha Levin struct sockaddr_un local; 1835358b0e6SSasha Levin 184b91f1976SLai Jiangshan sprintf(sock_file, "%s/%s%s", kvm__get_dir(), name, KVM_SOCK_SUFFIX); 1854b1addaeSSasha Levin s = socket(AF_UNIX, SOCK_STREAM, 0); 1865358b0e6SSasha Levin 1874b1addaeSSasha Levin local.sun_family = AF_UNIX; 1884b1addaeSSasha Levin strcpy(local.sun_path, sock_file); 1894b1addaeSSasha Levin len = strlen(local.sun_path) + sizeof(local.sun_family); 1905358b0e6SSasha Levin 1914b1addaeSSasha Levin r = connect(s, &local, len); 192e3e9e392SSasha Levin if (r < 0 && errno == ECONNREFUSED) { 193e3e9e392SSasha Levin /* Clean ghost socket file */ 194e3e9e392SSasha Levin unlink(sock_file); 195e3e9e392SSasha Levin return -1; 196e3e9e392SSasha Levin } else if (r < 0) { 1974b1addaeSSasha Levin die("Failed connecting to instance"); 198e3e9e392SSasha Levin } 1995358b0e6SSasha Levin 2004b1addaeSSasha Levin return s; 2015358b0e6SSasha Levin } 2025358b0e6SSasha Levin 2034b1addaeSSasha Levin int kvm__enumerate_instances(int (*callback)(const char *name, int fd)) 20463bc8503SSasha Levin { 2054b1addaeSSasha Levin int sock; 20663bc8503SSasha Levin DIR *dir; 20763bc8503SSasha Levin struct dirent entry, *result; 208886af5f2SLiming Wang int ret = 0; 20963bc8503SSasha Levin 210da08dcdbSLai Jiangshan dir = opendir(kvm__get_dir()); 2113d7f121cSLai Jiangshan if (!dir) 2123d7f121cSLai Jiangshan return -1; 21363bc8503SSasha Levin 2143d7f121cSLai Jiangshan for (;;) { 21563bc8503SSasha Levin readdir_r(dir, &entry, &result); 21663bc8503SSasha Levin if (result == NULL) 21763bc8503SSasha Levin break; 2184b1addaeSSasha Levin if (entry.d_type == DT_SOCK) { 219b91f1976SLai Jiangshan ssize_t name_len = strlen(entry.d_name); 220b91f1976SLai Jiangshan char *p; 221b91f1976SLai Jiangshan 222b91f1976SLai Jiangshan if (name_len <= KVM_SOCK_SUFFIX_LEN) 223b91f1976SLai Jiangshan continue; 224b91f1976SLai Jiangshan 225b91f1976SLai Jiangshan p = &entry.d_name[name_len - KVM_SOCK_SUFFIX_LEN]; 226b91f1976SLai Jiangshan if (memcmp(KVM_SOCK_SUFFIX, p, KVM_SOCK_SUFFIX_LEN)) 227b91f1976SLai Jiangshan continue; 228b91f1976SLai Jiangshan 229b91f1976SLai Jiangshan *p = 0; 2304b1addaeSSasha Levin sock = kvm__get_sock_by_instance(entry.d_name); 231e3e9e392SSasha Levin if (sock < 0) 232e3e9e392SSasha Levin continue; 2334b1addaeSSasha Levin ret = callback(entry.d_name, sock); 2344b1addaeSSasha Levin close(sock); 235886af5f2SLiming Wang if (ret < 0) 236886af5f2SLiming Wang break; 23763bc8503SSasha Levin } 23863bc8503SSasha Levin } 23963bc8503SSasha Levin 2401a0ef251SSasha Levin closedir(dir); 2411a0ef251SSasha Levin 242886af5f2SLiming Wang return ret; 24363bc8503SSasha Levin } 24463bc8503SSasha Levin 24543835ac9SSasha Levin void kvm__delete(struct kvm *kvm) 2469ef4c68eSPekka Enberg { 24743835ac9SSasha Levin kvm__stop_timer(kvm); 248fbfe68b7SSasha Levin 24943835ac9SSasha Levin munmap(kvm->ram_start, kvm->ram_size); 250c733c80bSSasha Levin kvm_ipc__stop(); 2514b1addaeSSasha Levin kvm__remove_socket(kvm->name); 25243835ac9SSasha Levin free(kvm); 2539ef4c68eSPekka Enberg } 2549ef4c68eSPekka Enberg 25596feb589SPekka Enberg /* 25696feb589SPekka Enberg * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping 25796feb589SPekka Enberg * memory regions to it. Therefore, be careful if you use this function for 25896feb589SPekka Enberg * registering memory regions for emulating hardware. 25996feb589SPekka Enberg */ 26096feb589SPekka Enberg void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr) 2614076b041SPekka Enberg { 2622b0e3342SPekka Enberg struct kvm_userspace_memory_region mem; 263839051d9SSasha Levin int ret; 264839051d9SSasha Levin 265839051d9SSasha Levin mem = (struct kvm_userspace_memory_region) { 26696feb589SPekka Enberg .slot = kvm->mem_slots++, 267874467f8SSasha Levin .guest_phys_addr = guest_phys, 268874467f8SSasha Levin .memory_size = size, 269c4acb611SIngo Molnar .userspace_addr = (unsigned long)userspace_addr, 270839051d9SSasha Levin }; 271839051d9SSasha Levin 272874467f8SSasha Levin ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem); 273839051d9SSasha Levin if (ret < 0) 274839051d9SSasha Levin die_perror("KVM_SET_USER_MEMORY_REGION ioctl"); 275839051d9SSasha Levin } 276839051d9SSasha Levin 2778259b8ccSSasha Levin int kvm__recommended_cpus(struct kvm *kvm) 278384922b3SPekka Enberg { 279384922b3SPekka Enberg int ret; 280384922b3SPekka Enberg 28143835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); 2828259b8ccSSasha Levin if (ret <= 0) 2833b9b691dSMatt Evans /* 2843b9b691dSMatt Evans * api.txt states that if KVM_CAP_NR_VCPUS does not exist, 2853b9b691dSMatt Evans * assume 4. 2863b9b691dSMatt Evans */ 2873b9b691dSMatt Evans return 4; 288384922b3SPekka Enberg 289384922b3SPekka Enberg return ret; 290384922b3SPekka Enberg } 291384922b3SPekka Enberg 2924b1addaeSSasha Levin static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) 2934b1addaeSSasha Levin { 2944b1addaeSSasha Levin pid_t pid = getpid(); 2954b1addaeSSasha Levin int r = 0; 2964b1addaeSSasha Levin 2974b1addaeSSasha Levin if (type == KVM_IPC_PID) 2984b1addaeSSasha Levin r = write(fd, &pid, sizeof(pid)); 2994b1addaeSSasha Levin 3004b1addaeSSasha Levin if (r < 0) 3014b1addaeSSasha Levin pr_warning("Failed sending PID"); 3024b1addaeSSasha Levin } 3034b1addaeSSasha Levin 3048259b8ccSSasha Levin /* 3058259b8ccSSasha Levin * The following hack should be removed once 'x86: Raise the hard 3068259b8ccSSasha Levin * VCPU count limit' makes it's way into the mainline. 3078259b8ccSSasha Levin */ 3088259b8ccSSasha Levin #ifndef KVM_CAP_MAX_VCPUS 3098259b8ccSSasha Levin #define KVM_CAP_MAX_VCPUS 66 3108259b8ccSSasha Levin #endif 3118259b8ccSSasha Levin 3128259b8ccSSasha Levin int kvm__max_cpus(struct kvm *kvm) 3138259b8ccSSasha Levin { 3148259b8ccSSasha Levin int ret; 3158259b8ccSSasha Levin 3168259b8ccSSasha Levin ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); 3178259b8ccSSasha Levin if (ret <= 0) 3188259b8ccSSasha Levin ret = kvm__recommended_cpus(kvm); 3198259b8ccSSasha Levin 3208259b8ccSSasha Levin return ret; 3218259b8ccSSasha Levin } 3228259b8ccSSasha Levin 32361061257SMatt Evans struct kvm *kvm__init(const char *kvm_dev, const char *hugetlbfs_path, u64 ram_size, const char *name) 324839051d9SSasha Levin { 32543835ac9SSasha Levin struct kvm *kvm; 3264076b041SPekka Enberg int ret; 3274076b041SPekka Enberg 328af7b0868SMatt Evans if (!kvm__arch_cpu_supports_vm()) 329c78b8713SAsias He die("Your CPU does not support hardware virtualization"); 330c78b8713SAsias He 33143835ac9SSasha Levin kvm = kvm__new(); 3324076b041SPekka Enberg 33343835ac9SSasha Levin kvm->sys_fd = open(kvm_dev, O_RDWR); 33443835ac9SSasha Levin if (kvm->sys_fd < 0) { 3356d7c36ceSPekka Enberg if (errno == ENOENT) 336e907b83fSPekka Enberg die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev); 337f8334800SIngo Molnar if (errno == ENODEV) 338f8334800SIngo Molnar die("'%s' KVM driver not available.\n # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev); 3396d7c36ceSPekka Enberg 340f8334800SIngo Molnar fprintf(stderr, " Fatal, could not open %s: ", kvm_dev); 341f8334800SIngo Molnar perror(NULL); 342f8334800SIngo Molnar exit(1); 3436d7c36ceSPekka Enberg } 344b8f6afcdSPekka Enberg 34543835ac9SSasha Levin ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0); 3466c7d8514SPekka Enberg if (ret != KVM_API_VERSION) 347f5ab5f67SPekka Enberg die_perror("KVM_API_VERSION ioctl"); 3486c7d8514SPekka Enberg 34943835ac9SSasha Levin kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0); 35043835ac9SSasha Levin if (kvm->vm_fd < 0) 351f5ab5f67SPekka Enberg die_perror("KVM_CREATE_VM ioctl"); 35228fa19c0SPekka Enberg 35343835ac9SSasha Levin if (kvm__check_extensions(kvm)) 35455e19624SCyrill Gorcunov die("A required KVM extention is not supported by OS"); 3559687927dSAsias He 356*7eff9f49SWanlong Gao kvm__arch_init(kvm, hugetlbfs_path, ram_size); 3579687927dSAsias He 3585358b0e6SSasha Levin kvm->name = name; 3595358b0e6SSasha Levin 3604b1addaeSSasha Levin kvm_ipc__start(kvm__create_socket(kvm)); 3614b1addaeSSasha Levin kvm_ipc__register_handler(KVM_IPC_PID, kvm__pid); 36243835ac9SSasha Levin return kvm; 3634076b041SPekka Enberg } 3644076b041SPekka Enberg 36572811558SPekka Enberg /* RFC 1952 */ 36672811558SPekka Enberg #define GZIP_ID1 0x1f 36772811558SPekka Enberg #define GZIP_ID2 0x8b 368663ce1dfSMatt Evans #define CPIO_MAGIC "0707" 369663ce1dfSMatt Evans /* initrd may be gzipped, or a plain cpio */ 37072811558SPekka Enberg static bool initrd_check(int fd) 37172811558SPekka Enberg { 372663ce1dfSMatt Evans unsigned char id[4]; 37372811558SPekka Enberg 37472811558SPekka Enberg if (read_in_full(fd, id, ARRAY_SIZE(id)) < 0) 37572811558SPekka Enberg return false; 37672811558SPekka Enberg 37772811558SPekka Enberg if (lseek(fd, 0, SEEK_SET) < 0) 37872811558SPekka Enberg die_perror("lseek"); 37972811558SPekka Enberg 380663ce1dfSMatt Evans return (id[0] == GZIP_ID1 && id[1] == GZIP_ID2) || 381663ce1dfSMatt Evans !memcmp(id, CPIO_MAGIC, 4); 38272811558SPekka Enberg } 38372811558SPekka Enberg 3846d1f350dSCyrill Gorcunov bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename, 38553861c74SJohn Floren const char *initrd_filename, const char *kernel_cmdline, u16 vidmode) 386ae1fae34SPekka Enberg { 3877fb218bdSPekka Enberg bool ret; 3882065a6f7SCyrill Gorcunov int fd_kernel = -1, fd_initrd = -1; 389ae1fae34SPekka Enberg 3902065a6f7SCyrill Gorcunov fd_kernel = open(kernel_filename, O_RDONLY); 3912065a6f7SCyrill Gorcunov if (fd_kernel < 0) 3920b62d2bbSPekka Enberg die("Unable to open kernel %s", kernel_filename); 393ae1fae34SPekka Enberg 3942065a6f7SCyrill Gorcunov if (initrd_filename) { 3952065a6f7SCyrill Gorcunov fd_initrd = open(initrd_filename, O_RDONLY); 3962065a6f7SCyrill Gorcunov if (fd_initrd < 0) 3970b62d2bbSPekka Enberg die("Unable to open initrd %s", initrd_filename); 39872811558SPekka Enberg 39972811558SPekka Enberg if (!initrd_check(fd_initrd)) 40072811558SPekka Enberg die("%s is not an initrd", initrd_filename); 4012065a6f7SCyrill Gorcunov } 4022065a6f7SCyrill Gorcunov 40353861c74SJohn Floren ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline, vidmode); 40428972750SCyrill Gorcunov 405009b0758SPekka Enberg if (ret) 406009b0758SPekka Enberg goto found_kernel; 407ae1fae34SPekka Enberg 4084542f276SCyrill Gorcunov pr_warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename); 4090b62d2bbSPekka Enberg 410604dbd63SMatt Evans ret = load_flat_binary(kvm, fd_kernel, fd_initrd, kernel_cmdline); 411604dbd63SMatt Evans 412009b0758SPekka Enberg if (ret) 413009b0758SPekka Enberg goto found_kernel; 414009b0758SPekka Enberg 415604dbd63SMatt Evans if (initrd_filename) 416604dbd63SMatt Evans close(fd_initrd); 4175a6ac675SSasha Levin close(fd_kernel); 4185a6ac675SSasha Levin 419009b0758SPekka Enberg die("%s is not a valid bzImage or flat binary", kernel_filename); 420009b0758SPekka Enberg 421009b0758SPekka Enberg found_kernel: 422604dbd63SMatt Evans if (initrd_filename) 423604dbd63SMatt Evans close(fd_initrd); 4245a6ac675SSasha Levin close(fd_kernel); 4255a6ac675SSasha Levin 426ae1fae34SPekka Enberg return ret; 427ae1fae34SPekka Enberg } 428ae1fae34SPekka Enberg 429ce79f1caSPekka Enberg #define TIMER_INTERVAL_NS 1000000 /* 1 msec */ 430ce79f1caSPekka Enberg 431ce79f1caSPekka Enberg /* 432ce79f1caSPekka Enberg * This function sets up a timer that's used to inject interrupts from the 433ce79f1caSPekka Enberg * userspace hypervisor into the guest at periodical intervals. Please note 434ce79f1caSPekka Enberg * that clock interrupt, for example, is not handled here. 435ce79f1caSPekka Enberg */ 43643835ac9SSasha Levin void kvm__start_timer(struct kvm *kvm) 437ce79f1caSPekka Enberg { 438ce79f1caSPekka Enberg struct itimerspec its; 439ce79f1caSPekka Enberg struct sigevent sev; 440ce79f1caSPekka Enberg 441ce79f1caSPekka Enberg memset(&sev, 0, sizeof(struct sigevent)); 442ce79f1caSPekka Enberg sev.sigev_value.sival_int = 0; 443c7828731SSasha Levin sev.sigev_notify = SIGEV_THREAD_ID; 444ce79f1caSPekka Enberg sev.sigev_signo = SIGALRM; 445c7828731SSasha Levin sev._sigev_un._tid = syscall(__NR_gettid); 446ce79f1caSPekka Enberg 44743835ac9SSasha Levin if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0) 448ce79f1caSPekka Enberg die("timer_create()"); 449ce79f1caSPekka Enberg 450ce79f1caSPekka Enberg its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000; 451ce79f1caSPekka Enberg its.it_value.tv_nsec = TIMER_INTERVAL_NS % 1000000000; 452ce79f1caSPekka Enberg its.it_interval.tv_sec = its.it_value.tv_sec; 453ce79f1caSPekka Enberg its.it_interval.tv_nsec = its.it_value.tv_nsec; 454ce79f1caSPekka Enberg 45543835ac9SSasha Levin if (timer_settime(kvm->timerid, 0, &its, NULL) < 0) 456ce79f1caSPekka Enberg die("timer_settime()"); 457ce79f1caSPekka Enberg } 458ce79f1caSPekka Enberg 45943835ac9SSasha Levin void kvm__stop_timer(struct kvm *kvm) 460fbfe68b7SSasha Levin { 46143835ac9SSasha Levin if (kvm->timerid) 46243835ac9SSasha Levin if (timer_delete(kvm->timerid) < 0) 463fbfe68b7SSasha Levin die("timer_delete()"); 464fbfe68b7SSasha Levin 46543835ac9SSasha Levin kvm->timerid = 0; 466fbfe68b7SSasha Levin } 467fbfe68b7SSasha Levin 46843835ac9SSasha Levin void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size) 469090f898eSCyrill Gorcunov { 470090f898eSCyrill Gorcunov unsigned char *p; 471090f898eSCyrill Gorcunov unsigned long n; 472090f898eSCyrill Gorcunov 473090f898eSCyrill Gorcunov size &= ~7; /* mod 8 */ 474090f898eSCyrill Gorcunov if (!size) 475090f898eSCyrill Gorcunov return; 476090f898eSCyrill Gorcunov 47743835ac9SSasha Levin p = guest_flat_to_host(kvm, addr); 478090f898eSCyrill Gorcunov 47948cf3877SPekka Enberg for (n = 0; n < size; n += 8) { 48043835ac9SSasha Levin if (!host_ptr_in_ram(kvm, p + n)) 48148cf3877SPekka Enberg break; 48248cf3877SPekka Enberg 483090f898eSCyrill Gorcunov printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n", 484090f898eSCyrill Gorcunov addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3], 485090f898eSCyrill Gorcunov p[n + 4], p[n + 5], p[n + 6], p[n + 7]); 486090f898eSCyrill Gorcunov } 48748cf3877SPekka Enberg } 4884298ddadSSasha Levin 4894298ddadSSasha Levin void kvm__pause(void) 4904298ddadSSasha Levin { 4914298ddadSSasha Levin int i, paused_vcpus = 0; 4924298ddadSSasha Levin 4934298ddadSSasha Levin /* Check if the guest is running */ 4944298ddadSSasha Levin if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0) 4954298ddadSSasha Levin return; 4964298ddadSSasha Levin 4974298ddadSSasha Levin mutex_lock(&pause_lock); 4984298ddadSSasha Levin 4994298ddadSSasha Levin pause_event = eventfd(0, 0); 5004298ddadSSasha Levin if (pause_event < 0) 5014298ddadSSasha Levin die("Failed creating pause notification event"); 5024298ddadSSasha Levin for (i = 0; i < kvm->nrcpus; i++) 5034298ddadSSasha Levin pthread_kill(kvm_cpus[i]->thread, SIGKVMPAUSE); 5044298ddadSSasha Levin 5054298ddadSSasha Levin while (paused_vcpus < kvm->nrcpus) { 5064298ddadSSasha Levin u64 cur_read; 5074298ddadSSasha Levin 5084298ddadSSasha Levin if (read(pause_event, &cur_read, sizeof(cur_read)) < 0) 5094298ddadSSasha Levin die("Failed reading pause event"); 5104298ddadSSasha Levin paused_vcpus += cur_read; 5114298ddadSSasha Levin } 5124298ddadSSasha Levin close(pause_event); 5134298ddadSSasha Levin } 5144298ddadSSasha Levin 5154298ddadSSasha Levin void kvm__continue(void) 5164298ddadSSasha Levin { 5174298ddadSSasha Levin /* Check if the guest is running */ 5184298ddadSSasha Levin if (!kvm_cpus[0] || kvm_cpus[0]->thread == 0) 5194298ddadSSasha Levin return; 5204298ddadSSasha Levin 5214298ddadSSasha Levin mutex_unlock(&pause_lock); 5224298ddadSSasha Levin } 5234298ddadSSasha Levin 5244298ddadSSasha Levin void kvm__notify_paused(void) 5254298ddadSSasha Levin { 5264298ddadSSasha Levin u64 p = 1; 5274298ddadSSasha Levin 5284298ddadSSasha Levin if (write(pause_event, &p, sizeof(p)) < 0) 5294298ddadSSasha Levin die("Failed notifying of paused VCPU."); 5304298ddadSSasha Levin 5314298ddadSSasha Levin mutex_lock(&pause_lock); 5324298ddadSSasha Levin mutex_unlock(&pause_lock); 5334298ddadSSasha Levin } 534