15c3d55faSPekka Enberg #include "kvm/kvm-cpu.h" 25c3d55faSPekka Enberg 3b0b42ba0SPekka Enberg #include "kvm/symbol.h" 45c3d55faSPekka Enberg #include "kvm/util.h" 55c3d55faSPekka Enberg #include "kvm/kvm.h" 619d98215SMarc Zyngier #include "kvm/virtio.h" 75c3d55faSPekka Enberg 85c3d55faSPekka Enberg #include <sys/ioctl.h> 95c3d55faSPekka Enberg #include <sys/mman.h> 105ee154d1SPekka Enberg #include <signal.h> 115c3d55faSPekka Enberg #include <stdlib.h> 12b0b42ba0SPekka Enberg #include <string.h> 135c3d55faSPekka Enberg #include <errno.h> 145c3d55faSPekka Enberg #include <stdio.h> 155c3d55faSPekka Enberg 16656be1b8SSasha Levin extern __thread struct kvm_cpu *current_kvm_cpu; 17656be1b8SSasha Levin 1819d98215SMarc Zyngier int __attribute__((weak)) kvm_cpu__get_endianness(struct kvm_cpu *vcpu) 1919d98215SMarc Zyngier { 2019d98215SMarc Zyngier return VIRTIO_ENDIAN_HOST; 2119d98215SMarc Zyngier } 2219d98215SMarc Zyngier 2343835ac9SSasha Levin void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu) 245c3d55faSPekka Enberg { 255c3d55faSPekka Enberg struct kvm_guest_debug debug = { 265c3d55faSPekka Enberg .control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP, 275c3d55faSPekka Enberg }; 285c3d55faSPekka Enberg 2943835ac9SSasha Levin if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0) 304542f276SCyrill Gorcunov pr_warning("KVM_SET_GUEST_DEBUG failed"); 315c3d55faSPekka Enberg } 325c3d55faSPekka Enberg 3343835ac9SSasha Levin void kvm_cpu__run(struct kvm_cpu *vcpu) 345c3d55faSPekka Enberg { 355c3d55faSPekka Enberg int err; 365c3d55faSPekka Enberg 37f9fdf5cdSAsias He if (!vcpu->is_running) 38f9fdf5cdSAsias He return; 39f9fdf5cdSAsias He 4043835ac9SSasha Levin err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0); 4176b75d32SMatt Evans if (err < 0 && (errno != EINTR && errno != EAGAIN)) 425c3d55faSPekka Enberg die_perror("KVM_RUN failed"); 435c3d55faSPekka Enberg } 4465bab644SPekka Enberg 454298ddadSSasha Levin static void kvm_cpu_signal_handler(int signum) 4649e5227dSSasha Levin { 474298ddadSSasha Levin if (signum == SIGKVMEXIT) { 48*2aa76b26SWill Deacon if (current_kvm_cpu && current_kvm_cpu->is_running) 49656be1b8SSasha Levin current_kvm_cpu->is_running = false; 504298ddadSSasha Levin } else if (signum == SIGKVMPAUSE) { 514298ddadSSasha Levin current_kvm_cpu->paused = 1; 524298ddadSSasha Levin } 5349e5227dSSasha Levin } 5449e5227dSSasha Levin 5573f7e5b3SSasha Levin static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu) 5673f7e5b3SSasha Levin { 5773f7e5b3SSasha Levin if (cpu->ring) { 5873f7e5b3SSasha Levin while (cpu->ring->first != cpu->ring->last) { 5973f7e5b3SSasha Levin struct kvm_coalesced_mmio *m; 6073f7e5b3SSasha Levin m = &cpu->ring->coalesced_mmio[cpu->ring->first]; 619b735910SMarc Zyngier kvm_cpu__emulate_mmio(cpu, 6273f7e5b3SSasha Levin m->phys_addr, 6373f7e5b3SSasha Levin m->data, 6473f7e5b3SSasha Levin m->len, 6573f7e5b3SSasha Levin 1); 6673f7e5b3SSasha Levin cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX; 6773f7e5b3SSasha Levin } 6873f7e5b3SSasha Levin } 6973f7e5b3SSasha Levin } 7073f7e5b3SSasha Levin 7165bab644SPekka Enberg int kvm_cpu__start(struct kvm_cpu *cpu) 7265bab644SPekka Enberg { 735ee154d1SPekka Enberg sigset_t sigset; 745ee154d1SPekka Enberg 755ee154d1SPekka Enberg sigemptyset(&sigset); 765ee154d1SPekka Enberg sigaddset(&sigset, SIGALRM); 775ee154d1SPekka Enberg 785ee154d1SPekka Enberg pthread_sigmask(SIG_BLOCK, &sigset, NULL); 795ee154d1SPekka Enberg 804298ddadSSasha Levin signal(SIGKVMEXIT, kvm_cpu_signal_handler); 814298ddadSSasha Levin signal(SIGKVMPAUSE, kvm_cpu_signal_handler); 8249e5227dSSasha Levin 835d1a249cSPekka Enberg kvm_cpu__reset_vcpu(cpu); 845d1a249cSPekka Enberg 85809f088bSSasha Levin if (cpu->kvm->cfg.single_step) 866d6a4d41SPekka Enberg kvm_cpu__enable_singlestep(cpu); 876d6a4d41SPekka Enberg 88c23d9748SSasha Levin while (cpu->is_running) { 894298ddadSSasha Levin if (cpu->paused) { 904298ddadSSasha Levin kvm__notify_paused(); 914298ddadSSasha Levin cpu->paused = 0; 924298ddadSSasha Levin } 934298ddadSSasha Levin 944b1c6f6eSSasha Levin if (cpu->needs_nmi) { 954b1c6f6eSSasha Levin kvm_cpu__arch_nmi(cpu); 964b1c6f6eSSasha Levin cpu->needs_nmi = 0; 974b1c6f6eSSasha Levin } 984b1c6f6eSSasha Levin 9965bab644SPekka Enberg kvm_cpu__run(cpu); 10065bab644SPekka Enberg 10165bab644SPekka Enberg switch (cpu->kvm_run->exit_reason) { 1021621292eSSasha Levin case KVM_EXIT_UNKNOWN: 1031621292eSSasha Levin break; 10465bab644SPekka Enberg case KVM_EXIT_DEBUG: 10565bab644SPekka Enberg kvm_cpu__show_registers(cpu); 10665bab644SPekka Enberg kvm_cpu__show_code(cpu); 10765bab644SPekka Enberg break; 10865bab644SPekka Enberg case KVM_EXIT_IO: { 10965bab644SPekka Enberg bool ret; 11065bab644SPekka Enberg 1114123ca55SMarc Zyngier ret = kvm_cpu__emulate_io(cpu, 11265bab644SPekka Enberg cpu->kvm_run->io.port, 1133fdf659dSSasha Levin (u8 *)cpu->kvm_run + 11465bab644SPekka Enberg cpu->kvm_run->io.data_offset, 11565bab644SPekka Enberg cpu->kvm_run->io.direction, 11665bab644SPekka Enberg cpu->kvm_run->io.size, 11765bab644SPekka Enberg cpu->kvm_run->io.count); 11865bab644SPekka Enberg 11965bab644SPekka Enberg if (!ret) 12065bab644SPekka Enberg goto panic_kvm; 12165bab644SPekka Enberg break; 12265bab644SPekka Enberg } 12365bab644SPekka Enberg case KVM_EXIT_MMIO: { 12465bab644SPekka Enberg bool ret; 12565bab644SPekka Enberg 126a7518f05SSasha Levin /* 127a7518f05SSasha Levin * If we had MMIO exit, coalesced ring should be processed 128a7518f05SSasha Levin * *before* processing the exit itself 129a7518f05SSasha Levin */ 130a7518f05SSasha Levin kvm_cpu__handle_coalesced_mmio(cpu); 131a7518f05SSasha Levin 1329b735910SMarc Zyngier ret = kvm_cpu__emulate_mmio(cpu, 13365bab644SPekka Enberg cpu->kvm_run->mmio.phys_addr, 13465bab644SPekka Enberg cpu->kvm_run->mmio.data, 13565bab644SPekka Enberg cpu->kvm_run->mmio.len, 13665bab644SPekka Enberg cpu->kvm_run->mmio.is_write); 13765bab644SPekka Enberg 13865bab644SPekka Enberg if (!ret) 13965bab644SPekka Enberg goto panic_kvm; 14065bab644SPekka Enberg break; 14165bab644SPekka Enberg } 1425ee154d1SPekka Enberg case KVM_EXIT_INTR: 143656be1b8SSasha Levin if (cpu->is_running) 144656be1b8SSasha Levin break; 14549e5227dSSasha Levin goto exit_kvm; 14665bab644SPekka Enberg case KVM_EXIT_SHUTDOWN: 14765bab644SPekka Enberg goto exit_kvm; 1485f9b016eSAnup Patel case KVM_EXIT_SYSTEM_EVENT: 1495f9b016eSAnup Patel /* 1505f9b016eSAnup Patel * Print the type of system event and 1515f9b016eSAnup Patel * treat all system events as shutdown request. 1525f9b016eSAnup Patel */ 1535f9b016eSAnup Patel switch (cpu->kvm_run->system_event.type) { 1545f9b016eSAnup Patel default: 1555f9b016eSAnup Patel pr_warning("unknown system event type %d", 1565f9b016eSAnup Patel cpu->kvm_run->system_event.type); 1570161ed77SMark Rutland /* fall through for now */ 1580161ed77SMark Rutland case KVM_SYSTEM_EVENT_RESET: 1590161ed77SMark Rutland /* Fall through for now */ 1600161ed77SMark Rutland case KVM_SYSTEM_EVENT_SHUTDOWN: 1610161ed77SMark Rutland /* 1620161ed77SMark Rutland * Ensure that all VCPUs are torn down, 1630161ed77SMark Rutland * regardless of which CPU generated the event. 1640161ed77SMark Rutland */ 165*2aa76b26SWill Deacon kvm__reboot(cpu->kvm); 1665f9b016eSAnup Patel goto exit_kvm; 1675f9b016eSAnup Patel }; 1685f9b016eSAnup Patel break; 169341ee0d4SMatt Evans default: { 170341ee0d4SMatt Evans bool ret; 171341ee0d4SMatt Evans 172341ee0d4SMatt Evans ret = kvm_cpu__handle_exit(cpu); 173341ee0d4SMatt Evans if (!ret) 17465bab644SPekka Enberg goto panic_kvm; 175341ee0d4SMatt Evans break; 176341ee0d4SMatt Evans } 17765bab644SPekka Enberg } 17873f7e5b3SSasha Levin kvm_cpu__handle_coalesced_mmio(cpu); 17965bab644SPekka Enberg } 18065bab644SPekka Enberg 18165bab644SPekka Enberg exit_kvm: 18265bab644SPekka Enberg return 0; 18365bab644SPekka Enberg 18465bab644SPekka Enberg panic_kvm: 18565bab644SPekka Enberg return 1; 18665bab644SPekka Enberg } 187df4239fbSSasha Levin 188df4239fbSSasha Levin int kvm_cpu__init(struct kvm *kvm) 189df4239fbSSasha Levin { 190df4239fbSSasha Levin int max_cpus, recommended_cpus, i; 191df4239fbSSasha Levin 192df4239fbSSasha Levin max_cpus = kvm__max_cpus(kvm); 193df4239fbSSasha Levin recommended_cpus = kvm__recommended_cpus(kvm); 194df4239fbSSasha Levin 195df4239fbSSasha Levin if (kvm->cfg.nrcpus > max_cpus) { 196df4239fbSSasha Levin printf(" # Limit the number of CPUs to %d\n", max_cpus); 197df4239fbSSasha Levin kvm->cfg.nrcpus = max_cpus; 198df4239fbSSasha Levin } else if (kvm->cfg.nrcpus > recommended_cpus) { 199df4239fbSSasha Levin printf(" # Warning: The maximum recommended amount of VCPUs" 200df4239fbSSasha Levin " is %d\n", recommended_cpus); 201df4239fbSSasha Levin } 202df4239fbSSasha Levin 203df4239fbSSasha Levin kvm->nrcpus = kvm->cfg.nrcpus; 204df4239fbSSasha Levin 205df4239fbSSasha Levin /* Alloc one pointer too many, so array ends up 0-terminated */ 206df4239fbSSasha Levin kvm->cpus = calloc(kvm->nrcpus + 1, sizeof(void *)); 207df4239fbSSasha Levin if (!kvm->cpus) { 208df4239fbSSasha Levin pr_warning("Couldn't allocate array for %d CPUs", kvm->nrcpus); 209df4239fbSSasha Levin return -ENOMEM; 210df4239fbSSasha Levin } 211df4239fbSSasha Levin 212df4239fbSSasha Levin for (i = 0; i < kvm->nrcpus; i++) { 213df4239fbSSasha Levin kvm->cpus[i] = kvm_cpu__arch_init(kvm, i); 214df4239fbSSasha Levin if (!kvm->cpus[i]) { 215df4239fbSSasha Levin pr_warning("unable to initialize KVM VCPU"); 216df4239fbSSasha Levin goto fail_alloc; 217df4239fbSSasha Levin } 218df4239fbSSasha Levin } 219df4239fbSSasha Levin 220df4239fbSSasha Levin return 0; 221df4239fbSSasha Levin 222df4239fbSSasha Levin fail_alloc: 223df4239fbSSasha Levin for (i = 0; i < kvm->nrcpus; i++) 224df4239fbSSasha Levin free(kvm->cpus[i]); 225df4239fbSSasha Levin return -ENOMEM; 226df4239fbSSasha Levin } 22749a8afd1SSasha Levin base_init(kvm_cpu__init); 228df4239fbSSasha Levin 229df4239fbSSasha Levin int kvm_cpu__exit(struct kvm *kvm) 230df4239fbSSasha Levin { 231df4239fbSSasha Levin int i, r; 232df4239fbSSasha Levin void *ret = NULL; 233df4239fbSSasha Levin 234df4239fbSSasha Levin kvm_cpu__delete(kvm->cpus[0]); 235df4239fbSSasha Levin kvm->cpus[0] = NULL; 236df4239fbSSasha Levin 237df4239fbSSasha Levin for (i = 1; i < kvm->nrcpus; i++) { 238df4239fbSSasha Levin if (kvm->cpus[i]->is_running) { 239df4239fbSSasha Levin pthread_kill(kvm->cpus[i]->thread, SIGKVMEXIT); 240df4239fbSSasha Levin if (pthread_join(kvm->cpus[i]->thread, &ret) != 0) 241df4239fbSSasha Levin die("pthread_join"); 242df4239fbSSasha Levin kvm_cpu__delete(kvm->cpus[i]); 243df4239fbSSasha Levin } 244df4239fbSSasha Levin if (ret == NULL) 245df4239fbSSasha Levin r = 0; 246df4239fbSSasha Levin } 247df4239fbSSasha Levin 248df4239fbSSasha Levin free(kvm->cpus); 249df4239fbSSasha Levin 250df4239fbSSasha Levin kvm->nrcpus = 0; 251df4239fbSSasha Levin 252df4239fbSSasha Levin return r; 253df4239fbSSasha Levin } 254a44c3293SSasha Levin core_exit(kvm_cpu__exit); 255