xref: /kvmtool/kvm-cpu.c (revision 72e13944777a6c60fbcd78ef97e06ffd00969d77)
1 #include "kvm/kvm-cpu.h"
2 
3 #include "kvm/symbol.h"
4 #include "kvm/util.h"
5 #include "kvm/kvm.h"
6 #include "kvm/virtio.h"
7 #include "kvm/mutex.h"
8 #include "kvm/barrier.h"
9 
10 #include <sys/ioctl.h>
11 #include <sys/mman.h>
12 #include <sys/eventfd.h>
13 #include <signal.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <errno.h>
17 #include <stdio.h>
18 
19 extern __thread struct kvm_cpu *current_kvm_cpu;
20 
kvm_cpu__get_endianness(struct kvm_cpu * vcpu)21 int __attribute__((weak)) kvm_cpu__get_endianness(struct kvm_cpu *vcpu)
22 {
23 	return VIRTIO_ENDIAN_HOST;
24 }
25 
kvm_cpu__enable_singlestep(struct kvm_cpu * vcpu)26 void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
27 {
28 	struct kvm_guest_debug debug = {
29 		.control	= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
30 	};
31 
32 	if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
33 		pr_warning("KVM_SET_GUEST_DEBUG failed");
34 }
35 
kvm_cpu__run(struct kvm_cpu * vcpu)36 void kvm_cpu__run(struct kvm_cpu *vcpu)
37 {
38 	int err;
39 
40 	if (!vcpu->is_running)
41 		return;
42 
43 	err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0);
44 	if (err < 0 && (errno != EINTR && errno != EAGAIN))
45 		die_perror("KVM_RUN failed");
46 }
47 
kvm_cpu_signal_handler(int signum)48 static void kvm_cpu_signal_handler(int signum)
49 {
50 	if (signum == SIGKVMEXIT) {
51 		if (current_kvm_cpu && current_kvm_cpu->is_running)
52 			current_kvm_cpu->is_running = false;
53 	} else if (signum == SIGKVMPAUSE) {
54 		if (current_kvm_cpu->paused)
55 			die("Pause signaled for already paused CPU\n");
56 
57 		/* pause_lock is held by kvm__pause() */
58 		current_kvm_cpu->paused = 1;
59 
60 		/*
61 		 * This is a blocking function and uses locks. It is safe
62 		 * to call it for this signal as a second pause event should
63 		 * not be send to this thread until it acquires and releases
64 		 * the pause_lock.
65 		 */
66 		kvm__notify_paused();
67 	}
68 
69 	/* For SIGKVMTASK cpu->task is already set */
70 }
71 
kvm_cpu__handle_coalesced_mmio(struct kvm_cpu * cpu)72 static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
73 {
74 	if (cpu->ring) {
75 		while (cpu->ring->first != cpu->ring->last) {
76 			struct kvm_coalesced_mmio *m;
77 			m = &cpu->ring->coalesced_mmio[cpu->ring->first];
78 			kvm_cpu__emulate_mmio(cpu,
79 					      m->phys_addr,
80 					      m->data,
81 					      m->len,
82 					      1);
83 			cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX;
84 		}
85 	}
86 }
87 
88 static DEFINE_MUTEX(task_lock);
89 static int task_eventfd;
90 
kvm_cpu__run_task(struct kvm_cpu * cpu)91 static void kvm_cpu__run_task(struct kvm_cpu *cpu)
92 {
93 	u64 inc = 1;
94 
95 	pr_debug("Running task %p on cpu %lu", cpu->task, cpu->cpu_id);
96 
97 	/* Make sure we see the store to cpu->task */
98 	rmb();
99 	cpu->task->func(cpu, cpu->task->data);
100 
101 	/* Clear task before we signal completion */
102 	cpu->task = NULL;
103 	wmb();
104 
105 	if (write(task_eventfd, &inc, sizeof(inc)) < 0)
106 		die("Failed notifying of completed task.");
107 }
108 
kvm_cpu__run_on_all_cpus(struct kvm * kvm,struct kvm_cpu_task * task)109 void kvm_cpu__run_on_all_cpus(struct kvm *kvm, struct kvm_cpu_task *task)
110 {
111 	int i, done = 0;
112 
113 	pr_debug("Running task %p on all cpus", task);
114 
115 	mutex_lock(&task_lock);
116 
117 	for (i = 0; i < kvm->nrcpus; i++) {
118 		if (kvm->cpus[i]->task) {
119 			/* Should never happen */
120 			die("CPU %d already has a task pending!", i);
121 		}
122 
123 		kvm->cpus[i]->task = task;
124 		wmb();
125 
126 		if (kvm->cpus[i] == current_kvm_cpu)
127 			kvm_cpu__run_task(current_kvm_cpu);
128 		else
129 			pthread_kill(kvm->cpus[i]->thread, SIGKVMTASK);
130 	}
131 
132 	while (done < kvm->nrcpus) {
133 		u64 count;
134 
135 		if (read(task_eventfd, &count, sizeof(count)) < 0)
136 			die("Failed reading task eventfd");
137 
138 		done += count;
139 	}
140 
141 	mutex_unlock(&task_lock);
142 }
143 
kvm_cpu__start(struct kvm_cpu * cpu)144 int kvm_cpu__start(struct kvm_cpu *cpu)
145 {
146 	sigset_t sigset;
147 
148 	sigemptyset(&sigset);
149 	sigaddset(&sigset, SIGALRM);
150 
151 	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
152 
153 	signal(SIGKVMEXIT, kvm_cpu_signal_handler);
154 	signal(SIGKVMPAUSE, kvm_cpu_signal_handler);
155 	signal(SIGKVMTASK, kvm_cpu_signal_handler);
156 
157 	kvm_cpu__reset_vcpu(cpu);
158 
159 	if (cpu->kvm->cfg.single_step)
160 		kvm_cpu__enable_singlestep(cpu);
161 
162 	while (cpu->is_running) {
163 		if (cpu->needs_nmi) {
164 			kvm_cpu__arch_nmi(cpu);
165 			cpu->needs_nmi = 0;
166 		}
167 
168 		if (cpu->task)
169 			kvm_cpu__run_task(cpu);
170 
171 		kvm_cpu__run(cpu);
172 
173 		switch (cpu->kvm_run->exit_reason) {
174 		case KVM_EXIT_UNKNOWN:
175 			break;
176 		case KVM_EXIT_DEBUG:
177 			kvm_cpu__show_registers(cpu);
178 			kvm_cpu__show_code(cpu);
179 			break;
180 		case KVM_EXIT_IO: {
181 			bool ret;
182 
183 			ret = kvm_cpu__emulate_io(cpu,
184 						  cpu->kvm_run->io.port,
185 						  (u8 *)cpu->kvm_run +
186 						  cpu->kvm_run->io.data_offset,
187 						  cpu->kvm_run->io.direction,
188 						  cpu->kvm_run->io.size,
189 						  cpu->kvm_run->io.count);
190 
191 			if (!ret)
192 				goto panic_kvm;
193 			break;
194 		}
195 		case KVM_EXIT_MMIO: {
196 			bool ret;
197 
198 			/*
199 			 * If we had MMIO exit, coalesced ring should be processed
200 			 * *before* processing the exit itself
201 			 */
202 			kvm_cpu__handle_coalesced_mmio(cpu);
203 
204 			ret = kvm_cpu__emulate_mmio(cpu,
205 						    cpu->kvm_run->mmio.phys_addr,
206 						    cpu->kvm_run->mmio.data,
207 						    cpu->kvm_run->mmio.len,
208 						    cpu->kvm_run->mmio.is_write);
209 
210 			if (!ret)
211 				goto panic_kvm;
212 			break;
213 		}
214 		case KVM_EXIT_INTR:
215 			if (cpu->is_running)
216 				break;
217 			goto exit_kvm;
218 		case KVM_EXIT_SHUTDOWN:
219 			goto exit_kvm;
220 		case KVM_EXIT_SYSTEM_EVENT:
221 			/*
222 			 * Print the type of system event and
223 			 * treat all system events as shutdown request.
224 			 */
225 			switch (cpu->kvm_run->system_event.type) {
226 			default:
227 				pr_warning("unknown system event type %d",
228 					   cpu->kvm_run->system_event.type);
229 				/* fall through for now */
230 			case KVM_SYSTEM_EVENT_RESET:
231 				/* Fall through for now */
232 			case KVM_SYSTEM_EVENT_SHUTDOWN:
233 				/*
234 				 * Ensure that all VCPUs are torn down,
235 				 * regardless of which CPU generated the event.
236 				 */
237 				kvm__reboot(cpu->kvm);
238 				goto exit_kvm;
239 			};
240 			break;
241 		default: {
242 			bool ret;
243 
244 			ret = kvm_cpu__handle_exit(cpu);
245 			if (!ret)
246 				goto panic_kvm;
247 			break;
248 		}
249 		}
250 		kvm_cpu__handle_coalesced_mmio(cpu);
251 	}
252 
253 exit_kvm:
254 	return 0;
255 
256 panic_kvm:
257 	return 1;
258 }
259 
kvm_cpu__init(struct kvm * kvm)260 int kvm_cpu__init(struct kvm *kvm)
261 {
262 	int max_cpus, recommended_cpus, i;
263 
264 	max_cpus = kvm__max_cpus(kvm);
265 	recommended_cpus = kvm__recommended_cpus(kvm);
266 
267 	if (kvm->cfg.nrcpus > max_cpus) {
268 		pr_warning("Limiting the number of CPUs to %d", max_cpus);
269 		kvm->cfg.nrcpus = max_cpus;
270 	} else if (kvm->cfg.nrcpus > recommended_cpus) {
271 		pr_warning("The maximum recommended amount of VCPUs is %d",
272 			   recommended_cpus);
273 	}
274 
275 	kvm->nrcpus = kvm->cfg.nrcpus;
276 
277 	task_eventfd = eventfd(0, 0);
278 	if (task_eventfd < 0) {
279 		pr_err("Couldn't create task_eventfd");
280 		return task_eventfd;
281 	}
282 
283 	/* Alloc one pointer too many, so array ends up 0-terminated */
284 	kvm->cpus = calloc(kvm->nrcpus + 1, sizeof(void *));
285 	if (!kvm->cpus) {
286 		pr_err("Couldn't allocate array for %d CPUs", kvm->nrcpus);
287 		return -ENOMEM;
288 	}
289 
290 	for (i = 0; i < kvm->nrcpus; i++) {
291 		kvm->cpus[i] = kvm_cpu__arch_init(kvm, i);
292 		if (!kvm->cpus[i]) {
293 			pr_err("unable to initialize KVM VCPU");
294 			goto fail_alloc;
295 		}
296 	}
297 
298 	return 0;
299 
300 fail_alloc:
301 	for (i = 0; i < kvm->nrcpus; i++)
302 		free(kvm->cpus[i]);
303 	return -ENOMEM;
304 }
305 base_init(kvm_cpu__init);
306 
kvm_cpu__exit(struct kvm * kvm)307 int kvm_cpu__exit(struct kvm *kvm)
308 {
309 	int i, r;
310 	void *ret = NULL;
311 
312 	kvm_cpu__delete(kvm->cpus[0]);
313 	kvm->cpus[0] = NULL;
314 
315 	kvm__pause(kvm);
316 	for (i = 1; i < kvm->nrcpus; i++) {
317 		if (kvm->cpus[i]->is_running) {
318 			pthread_kill(kvm->cpus[i]->thread, SIGKVMEXIT);
319 			if (pthread_join(kvm->cpus[i]->thread, &ret) != 0)
320 				die("pthread_join");
321 			kvm_cpu__delete(kvm->cpus[i]);
322 		}
323 		if (ret == NULL)
324 			r = 0;
325 	}
326 	kvm__continue(kvm);
327 
328 	free(kvm->cpus);
329 
330 	kvm->nrcpus = 0;
331 
332 	close(task_eventfd);
333 
334 	return r;
335 }
336