xref: /kvmtool/kvm-cpu.c (revision e2077857c04d460d4d0ac17cf0e246d43bd26100)
15c3d55faSPekka Enberg #include "kvm/kvm-cpu.h"
25c3d55faSPekka Enberg 
3b0b42ba0SPekka Enberg #include "kvm/symbol.h"
45c3d55faSPekka Enberg #include "kvm/util.h"
55c3d55faSPekka Enberg #include "kvm/kvm.h"
65c3d55faSPekka Enberg 
75c3d55faSPekka Enberg #include <sys/ioctl.h>
85c3d55faSPekka Enberg #include <sys/mman.h>
95ee154d1SPekka Enberg #include <signal.h>
105c3d55faSPekka Enberg #include <stdlib.h>
11b0b42ba0SPekka Enberg #include <string.h>
125c3d55faSPekka Enberg #include <errno.h>
135c3d55faSPekka Enberg #include <stdio.h>
145c3d55faSPekka Enberg 
15*e2077857SMatt Evans extern struct kvm_cpu **kvm_cpus;
16656be1b8SSasha Levin extern __thread struct kvm_cpu *current_kvm_cpu;
17656be1b8SSasha Levin 
1843835ac9SSasha Levin void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
195c3d55faSPekka Enberg {
205c3d55faSPekka Enberg 	struct kvm_guest_debug debug = {
215c3d55faSPekka Enberg 		.control	= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
225c3d55faSPekka Enberg 	};
235c3d55faSPekka Enberg 
2443835ac9SSasha Levin 	if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
254542f276SCyrill Gorcunov 		pr_warning("KVM_SET_GUEST_DEBUG failed");
265c3d55faSPekka Enberg }
275c3d55faSPekka Enberg 
2843835ac9SSasha Levin void kvm_cpu__run(struct kvm_cpu *vcpu)
295c3d55faSPekka Enberg {
305c3d55faSPekka Enberg 	int err;
315c3d55faSPekka Enberg 
3243835ac9SSasha Levin 	err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0);
3376b75d32SMatt Evans 	if (err < 0 && (errno != EINTR && errno != EAGAIN))
345c3d55faSPekka Enberg 		die_perror("KVM_RUN failed");
355c3d55faSPekka Enberg }
3665bab644SPekka Enberg 
374298ddadSSasha Levin static void kvm_cpu_signal_handler(int signum)
3849e5227dSSasha Levin {
394298ddadSSasha Levin 	if (signum == SIGKVMEXIT) {
40714e5b7fSSasha Levin 		if (current_kvm_cpu && current_kvm_cpu->is_running) {
41656be1b8SSasha Levin 			current_kvm_cpu->is_running = false;
42656be1b8SSasha Levin 			pthread_kill(pthread_self(), SIGKVMEXIT);
43656be1b8SSasha Levin 		}
444298ddadSSasha Levin 	} else if (signum == SIGKVMPAUSE) {
454298ddadSSasha Levin 		current_kvm_cpu->paused = 1;
464298ddadSSasha Levin 	}
4749e5227dSSasha Levin }
4849e5227dSSasha Levin 
4973f7e5b3SSasha Levin static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
5073f7e5b3SSasha Levin {
5173f7e5b3SSasha Levin 	if (cpu->ring) {
5273f7e5b3SSasha Levin 		while (cpu->ring->first != cpu->ring->last) {
5373f7e5b3SSasha Levin 			struct kvm_coalesced_mmio *m;
5473f7e5b3SSasha Levin 			m = &cpu->ring->coalesced_mmio[cpu->ring->first];
5517edd536SMatt Evans 			kvm_cpu__emulate_mmio(cpu->kvm,
5673f7e5b3SSasha Levin 					      m->phys_addr,
5773f7e5b3SSasha Levin 					      m->data,
5873f7e5b3SSasha Levin 					      m->len,
5973f7e5b3SSasha Levin 					      1);
6073f7e5b3SSasha Levin 			cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX;
6173f7e5b3SSasha Levin 		}
6273f7e5b3SSasha Levin 	}
6373f7e5b3SSasha Levin }
6473f7e5b3SSasha Levin 
65714e5b7fSSasha Levin void kvm_cpu__reboot(void)
66714e5b7fSSasha Levin {
67c23d9748SSasha Levin 	int i;
68c23d9748SSasha Levin 
69*e2077857SMatt Evans 	/* The kvm_cpus array contains a null pointer in the last location */
70*e2077857SMatt Evans 	for (i = 0; ; i++) {
71c23d9748SSasha Levin 		if (kvm_cpus[i])
72c23d9748SSasha Levin 			pthread_kill(kvm_cpus[i]->thread, SIGKVMEXIT);
73*e2077857SMatt Evans 		else
74*e2077857SMatt Evans 			break;
75*e2077857SMatt Evans 	}
76714e5b7fSSasha Levin }
77714e5b7fSSasha Levin 
7865bab644SPekka Enberg int kvm_cpu__start(struct kvm_cpu *cpu)
7965bab644SPekka Enberg {
805ee154d1SPekka Enberg 	sigset_t sigset;
815ee154d1SPekka Enberg 
825ee154d1SPekka Enberg 	sigemptyset(&sigset);
835ee154d1SPekka Enberg 	sigaddset(&sigset, SIGALRM);
845ee154d1SPekka Enberg 
855ee154d1SPekka Enberg 	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
865ee154d1SPekka Enberg 
874298ddadSSasha Levin 	signal(SIGKVMEXIT, kvm_cpu_signal_handler);
884298ddadSSasha Levin 	signal(SIGKVMPAUSE, kvm_cpu_signal_handler);
8949e5227dSSasha Levin 
905d1a249cSPekka Enberg 	kvm_cpu__reset_vcpu(cpu);
915d1a249cSPekka Enberg 
926d6a4d41SPekka Enberg 	if (cpu->kvm->single_step)
936d6a4d41SPekka Enberg 		kvm_cpu__enable_singlestep(cpu);
946d6a4d41SPekka Enberg 
95c23d9748SSasha Levin 	while (cpu->is_running) {
964298ddadSSasha Levin 		if (cpu->paused) {
974298ddadSSasha Levin 			kvm__notify_paused();
984298ddadSSasha Levin 			cpu->paused = 0;
994298ddadSSasha Levin 		}
1004298ddadSSasha Levin 
1014b1c6f6eSSasha Levin 		if (cpu->needs_nmi) {
1024b1c6f6eSSasha Levin 			kvm_cpu__arch_nmi(cpu);
1034b1c6f6eSSasha Levin 			cpu->needs_nmi = 0;
1044b1c6f6eSSasha Levin 		}
1054b1c6f6eSSasha Levin 
10665bab644SPekka Enberg 		kvm_cpu__run(cpu);
10765bab644SPekka Enberg 
10865bab644SPekka Enberg 		switch (cpu->kvm_run->exit_reason) {
1091621292eSSasha Levin 		case KVM_EXIT_UNKNOWN:
1101621292eSSasha Levin 			break;
11165bab644SPekka Enberg 		case KVM_EXIT_DEBUG:
11265bab644SPekka Enberg 			kvm_cpu__show_registers(cpu);
11365bab644SPekka Enberg 			kvm_cpu__show_code(cpu);
11465bab644SPekka Enberg 			break;
11565bab644SPekka Enberg 		case KVM_EXIT_IO: {
11665bab644SPekka Enberg 			bool ret;
11765bab644SPekka Enberg 
11817edd536SMatt Evans 			ret = kvm_cpu__emulate_io(cpu->kvm,
11965bab644SPekka Enberg 						  cpu->kvm_run->io.port,
1203fdf659dSSasha Levin 						  (u8 *)cpu->kvm_run +
12165bab644SPekka Enberg 						  cpu->kvm_run->io.data_offset,
12265bab644SPekka Enberg 						  cpu->kvm_run->io.direction,
12365bab644SPekka Enberg 						  cpu->kvm_run->io.size,
12465bab644SPekka Enberg 						  cpu->kvm_run->io.count);
12565bab644SPekka Enberg 
12665bab644SPekka Enberg 			if (!ret)
12765bab644SPekka Enberg 				goto panic_kvm;
12865bab644SPekka Enberg 			break;
12965bab644SPekka Enberg 		}
13065bab644SPekka Enberg 		case KVM_EXIT_MMIO: {
13165bab644SPekka Enberg 			bool ret;
13265bab644SPekka Enberg 
133a7518f05SSasha Levin 			/*
134a7518f05SSasha Levin 			 * If we had MMIO exit, coalesced ring should be processed
135a7518f05SSasha Levin 			 * *before* processing the exit itself
136a7518f05SSasha Levin 			 */
137a7518f05SSasha Levin 			kvm_cpu__handle_coalesced_mmio(cpu);
138a7518f05SSasha Levin 
13917edd536SMatt Evans 			ret = kvm_cpu__emulate_mmio(cpu->kvm,
14065bab644SPekka Enberg 						    cpu->kvm_run->mmio.phys_addr,
14165bab644SPekka Enberg 						    cpu->kvm_run->mmio.data,
14265bab644SPekka Enberg 						    cpu->kvm_run->mmio.len,
14365bab644SPekka Enberg 						    cpu->kvm_run->mmio.is_write);
14465bab644SPekka Enberg 
14565bab644SPekka Enberg 			if (!ret)
14665bab644SPekka Enberg 				goto panic_kvm;
14765bab644SPekka Enberg 			break;
14865bab644SPekka Enberg 		}
1495ee154d1SPekka Enberg 		case KVM_EXIT_INTR:
150656be1b8SSasha Levin 			if (cpu->is_running)
151656be1b8SSasha Levin 				break;
15249e5227dSSasha Levin 			goto exit_kvm;
15365bab644SPekka Enberg 		case KVM_EXIT_SHUTDOWN:
15465bab644SPekka Enberg 			goto exit_kvm;
155341ee0d4SMatt Evans 		default: {
156341ee0d4SMatt Evans 			bool ret;
157341ee0d4SMatt Evans 
158341ee0d4SMatt Evans 			ret = kvm_cpu__handle_exit(cpu);
159341ee0d4SMatt Evans 			if (!ret)
16065bab644SPekka Enberg 				goto panic_kvm;
161341ee0d4SMatt Evans 			break;
162341ee0d4SMatt Evans 		}
16365bab644SPekka Enberg 		}
16473f7e5b3SSasha Levin 		kvm_cpu__handle_coalesced_mmio(cpu);
16565bab644SPekka Enberg 	}
16665bab644SPekka Enberg 
16765bab644SPekka Enberg exit_kvm:
16865bab644SPekka Enberg 	return 0;
16965bab644SPekka Enberg 
17065bab644SPekka Enberg panic_kvm:
17165bab644SPekka Enberg 	return 1;
17265bab644SPekka Enberg }
173