xref: /kvmtool/kvm-cpu.c (revision 4346fd8f1ef04040775965c8476be61ad8e15643)
1 #include "kvm/kvm-cpu.h"
2 
3 #include "kvm/symbol.h"
4 #include "kvm/util.h"
5 #include "kvm/kvm.h"
6 
7 #include <sys/ioctl.h>
8 #include <sys/mman.h>
9 #include <signal.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <stdio.h>
14 
15 extern __thread struct kvm_cpu *current_kvm_cpu;
16 
17 void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
18 {
19 	struct kvm_guest_debug debug = {
20 		.control	= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
21 	};
22 
23 	if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
24 		pr_warning("KVM_SET_GUEST_DEBUG failed");
25 }
26 
27 void kvm_cpu__run(struct kvm_cpu *vcpu)
28 {
29 	int err;
30 
31 	if (!vcpu->is_running)
32 		return;
33 
34 	err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0);
35 	if (err < 0 && (errno != EINTR && errno != EAGAIN))
36 		die_perror("KVM_RUN failed");
37 }
38 
39 static void kvm_cpu_signal_handler(int signum)
40 {
41 	if (signum == SIGKVMEXIT) {
42 		if (current_kvm_cpu && current_kvm_cpu->is_running) {
43 			current_kvm_cpu->is_running = false;
44 			kvm__continue(current_kvm_cpu->kvm);
45 		}
46 	} else if (signum == SIGKVMPAUSE) {
47 		current_kvm_cpu->paused = 1;
48 	}
49 }
50 
51 static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
52 {
53 	if (cpu->ring) {
54 		while (cpu->ring->first != cpu->ring->last) {
55 			struct kvm_coalesced_mmio *m;
56 			m = &cpu->ring->coalesced_mmio[cpu->ring->first];
57 			kvm_cpu__emulate_mmio(cpu->kvm,
58 					      m->phys_addr,
59 					      m->data,
60 					      m->len,
61 					      1);
62 			cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX;
63 		}
64 	}
65 }
66 
67 void kvm_cpu__reboot(struct kvm *kvm)
68 {
69 	int i;
70 
71 	/* The kvm->cpus array contains a null pointer in the last location */
72 	for (i = 0; ; i++) {
73 		if (kvm->cpus[i])
74 			pthread_kill(kvm->cpus[i]->thread, SIGKVMEXIT);
75 		else
76 			break;
77 	}
78 }
79 
80 int kvm_cpu__start(struct kvm_cpu *cpu)
81 {
82 	sigset_t sigset;
83 
84 	sigemptyset(&sigset);
85 	sigaddset(&sigset, SIGALRM);
86 
87 	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
88 
89 	signal(SIGKVMEXIT, kvm_cpu_signal_handler);
90 	signal(SIGKVMPAUSE, kvm_cpu_signal_handler);
91 
92 	kvm_cpu__reset_vcpu(cpu);
93 
94 	if (cpu->kvm->cfg.single_step)
95 		kvm_cpu__enable_singlestep(cpu);
96 
97 	while (cpu->is_running) {
98 		if (cpu->paused) {
99 			kvm__notify_paused();
100 			cpu->paused = 0;
101 		}
102 
103 		if (cpu->needs_nmi) {
104 			kvm_cpu__arch_nmi(cpu);
105 			cpu->needs_nmi = 0;
106 		}
107 
108 		kvm_cpu__run(cpu);
109 
110 		switch (cpu->kvm_run->exit_reason) {
111 		case KVM_EXIT_UNKNOWN:
112 			break;
113 		case KVM_EXIT_DEBUG:
114 			kvm_cpu__show_registers(cpu);
115 			kvm_cpu__show_code(cpu);
116 			break;
117 		case KVM_EXIT_IO: {
118 			bool ret;
119 
120 			ret = kvm_cpu__emulate_io(cpu->kvm,
121 						  cpu->kvm_run->io.port,
122 						  (u8 *)cpu->kvm_run +
123 						  cpu->kvm_run->io.data_offset,
124 						  cpu->kvm_run->io.direction,
125 						  cpu->kvm_run->io.size,
126 						  cpu->kvm_run->io.count);
127 
128 			if (!ret)
129 				goto panic_kvm;
130 			break;
131 		}
132 		case KVM_EXIT_MMIO: {
133 			bool ret;
134 
135 			/*
136 			 * If we had MMIO exit, coalesced ring should be processed
137 			 * *before* processing the exit itself
138 			 */
139 			kvm_cpu__handle_coalesced_mmio(cpu);
140 
141 			ret = kvm_cpu__emulate_mmio(cpu->kvm,
142 						    cpu->kvm_run->mmio.phys_addr,
143 						    cpu->kvm_run->mmio.data,
144 						    cpu->kvm_run->mmio.len,
145 						    cpu->kvm_run->mmio.is_write);
146 
147 			if (!ret)
148 				goto panic_kvm;
149 			break;
150 		}
151 		case KVM_EXIT_INTR:
152 			if (cpu->is_running)
153 				break;
154 			goto exit_kvm;
155 		case KVM_EXIT_SHUTDOWN:
156 			goto exit_kvm;
157 		default: {
158 			bool ret;
159 
160 			ret = kvm_cpu__handle_exit(cpu);
161 			if (!ret)
162 				goto panic_kvm;
163 			break;
164 		}
165 		}
166 		kvm_cpu__handle_coalesced_mmio(cpu);
167 	}
168 
169 exit_kvm:
170 	return 0;
171 
172 panic_kvm:
173 	return 1;
174 }
175 
176 int kvm_cpu__init(struct kvm *kvm)
177 {
178 	int max_cpus, recommended_cpus, i;
179 
180 	max_cpus = kvm__max_cpus(kvm);
181 	recommended_cpus = kvm__recommended_cpus(kvm);
182 
183 	if (kvm->cfg.nrcpus > max_cpus) {
184 		printf("  # Limit the number of CPUs to %d\n", max_cpus);
185 		kvm->cfg.nrcpus = max_cpus;
186 	} else if (kvm->cfg.nrcpus > recommended_cpus) {
187 		printf("  # Warning: The maximum recommended amount of VCPUs"
188 			" is %d\n", recommended_cpus);
189 	}
190 
191 	kvm->nrcpus = kvm->cfg.nrcpus;
192 
193 	/* Alloc one pointer too many, so array ends up 0-terminated */
194 	kvm->cpus = calloc(kvm->nrcpus + 1, sizeof(void *));
195 	if (!kvm->cpus) {
196 		pr_warning("Couldn't allocate array for %d CPUs", kvm->nrcpus);
197 		return -ENOMEM;
198 	}
199 
200 	for (i = 0; i < kvm->nrcpus; i++) {
201 		kvm->cpus[i] = kvm_cpu__arch_init(kvm, i);
202 		if (!kvm->cpus[i]) {
203 			pr_warning("unable to initialize KVM VCPU");
204 			goto fail_alloc;
205 		}
206 	}
207 
208 	return 0;
209 
210 fail_alloc:
211 	for (i = 0; i < kvm->nrcpus; i++)
212 		free(kvm->cpus[i]);
213 	return -ENOMEM;
214 }
215 base_init(kvm_cpu__init);
216 
217 int kvm_cpu__exit(struct kvm *kvm)
218 {
219 	int i, r;
220 	void *ret = NULL;
221 
222 	kvm_cpu__delete(kvm->cpus[0]);
223 	kvm->cpus[0] = NULL;
224 
225 	for (i = 1; i < kvm->nrcpus; i++) {
226 		if (kvm->cpus[i]->is_running) {
227 			pthread_kill(kvm->cpus[i]->thread, SIGKVMEXIT);
228 			if (pthread_join(kvm->cpus[i]->thread, &ret) != 0)
229 				die("pthread_join");
230 			kvm_cpu__delete(kvm->cpus[i]);
231 		}
232 		if (ret == NULL)
233 			r = 0;
234 	}
235 
236 	free(kvm->cpus);
237 
238 	kvm->nrcpus = 0;
239 
240 	return r;
241 }
242 late_exit(kvm_cpu__exit);
243