xref: /kvmtool/kvm-cpu.c (revision 0161ed77586b53f080f1fa4c3d95284dcd092b84)
1 #include "kvm/kvm-cpu.h"
2 
3 #include "kvm/symbol.h"
4 #include "kvm/util.h"
5 #include "kvm/kvm.h"
6 #include "kvm/virtio.h"
7 
8 #include <sys/ioctl.h>
9 #include <sys/mman.h>
10 #include <signal.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <errno.h>
14 #include <stdio.h>
15 
16 extern __thread struct kvm_cpu *current_kvm_cpu;
17 
18 int __attribute__((weak)) kvm_cpu__get_endianness(struct kvm_cpu *vcpu)
19 {
20 	return VIRTIO_ENDIAN_HOST;
21 }
22 
23 void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
24 {
25 	struct kvm_guest_debug debug = {
26 		.control	= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
27 	};
28 
29 	if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
30 		pr_warning("KVM_SET_GUEST_DEBUG failed");
31 }
32 
33 void kvm_cpu__run(struct kvm_cpu *vcpu)
34 {
35 	int err;
36 
37 	if (!vcpu->is_running)
38 		return;
39 
40 	err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0);
41 	if (err < 0 && (errno != EINTR && errno != EAGAIN))
42 		die_perror("KVM_RUN failed");
43 }
44 
45 static void kvm_cpu_signal_handler(int signum)
46 {
47 	if (signum == SIGKVMEXIT) {
48 		if (current_kvm_cpu && current_kvm_cpu->is_running) {
49 			current_kvm_cpu->is_running = false;
50 			kvm__continue(current_kvm_cpu->kvm);
51 		}
52 	} else if (signum == SIGKVMPAUSE) {
53 		current_kvm_cpu->paused = 1;
54 	}
55 }
56 
57 static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
58 {
59 	if (cpu->ring) {
60 		while (cpu->ring->first != cpu->ring->last) {
61 			struct kvm_coalesced_mmio *m;
62 			m = &cpu->ring->coalesced_mmio[cpu->ring->first];
63 			kvm_cpu__emulate_mmio(cpu,
64 					      m->phys_addr,
65 					      m->data,
66 					      m->len,
67 					      1);
68 			cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX;
69 		}
70 	}
71 }
72 
73 void kvm_cpu__reboot(struct kvm *kvm)
74 {
75 	int i;
76 
77 	/* The kvm->cpus array contains a null pointer in the last location */
78 	for (i = 0; ; i++) {
79 		if (kvm->cpus[i])
80 			pthread_kill(kvm->cpus[i]->thread, SIGKVMEXIT);
81 		else
82 			break;
83 	}
84 }
85 
86 int kvm_cpu__start(struct kvm_cpu *cpu)
87 {
88 	sigset_t sigset;
89 
90 	sigemptyset(&sigset);
91 	sigaddset(&sigset, SIGALRM);
92 
93 	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
94 
95 	signal(SIGKVMEXIT, kvm_cpu_signal_handler);
96 	signal(SIGKVMPAUSE, kvm_cpu_signal_handler);
97 
98 	kvm_cpu__reset_vcpu(cpu);
99 
100 	if (cpu->kvm->cfg.single_step)
101 		kvm_cpu__enable_singlestep(cpu);
102 
103 	while (cpu->is_running) {
104 		if (cpu->paused) {
105 			kvm__notify_paused();
106 			cpu->paused = 0;
107 		}
108 
109 		if (cpu->needs_nmi) {
110 			kvm_cpu__arch_nmi(cpu);
111 			cpu->needs_nmi = 0;
112 		}
113 
114 		kvm_cpu__run(cpu);
115 
116 		switch (cpu->kvm_run->exit_reason) {
117 		case KVM_EXIT_UNKNOWN:
118 			break;
119 		case KVM_EXIT_DEBUG:
120 			kvm_cpu__show_registers(cpu);
121 			kvm_cpu__show_code(cpu);
122 			break;
123 		case KVM_EXIT_IO: {
124 			bool ret;
125 
126 			ret = kvm_cpu__emulate_io(cpu,
127 						  cpu->kvm_run->io.port,
128 						  (u8 *)cpu->kvm_run +
129 						  cpu->kvm_run->io.data_offset,
130 						  cpu->kvm_run->io.direction,
131 						  cpu->kvm_run->io.size,
132 						  cpu->kvm_run->io.count);
133 
134 			if (!ret)
135 				goto panic_kvm;
136 			break;
137 		}
138 		case KVM_EXIT_MMIO: {
139 			bool ret;
140 
141 			/*
142 			 * If we had MMIO exit, coalesced ring should be processed
143 			 * *before* processing the exit itself
144 			 */
145 			kvm_cpu__handle_coalesced_mmio(cpu);
146 
147 			ret = kvm_cpu__emulate_mmio(cpu,
148 						    cpu->kvm_run->mmio.phys_addr,
149 						    cpu->kvm_run->mmio.data,
150 						    cpu->kvm_run->mmio.len,
151 						    cpu->kvm_run->mmio.is_write);
152 
153 			if (!ret)
154 				goto panic_kvm;
155 			break;
156 		}
157 		case KVM_EXIT_INTR:
158 			if (cpu->is_running)
159 				break;
160 			goto exit_kvm;
161 		case KVM_EXIT_SHUTDOWN:
162 			goto exit_kvm;
163 		case KVM_EXIT_SYSTEM_EVENT:
164 			/*
165 			 * Print the type of system event and
166 			 * treat all system events as shutdown request.
167 			 */
168 			switch (cpu->kvm_run->system_event.type) {
169 			default:
170 				pr_warning("unknown system event type %d",
171 					   cpu->kvm_run->system_event.type);
172 				/* fall through for now */
173 			case KVM_SYSTEM_EVENT_RESET:
174 				/* Fall through for now */
175 			case KVM_SYSTEM_EVENT_SHUTDOWN:
176 				/*
177 				 * Ensure that all VCPUs are torn down,
178 				 * regardless of which CPU generated the event.
179 				 */
180 				kvm_cpu__reboot(cpu->kvm);
181 				goto exit_kvm;
182 			};
183 			break;
184 		default: {
185 			bool ret;
186 
187 			ret = kvm_cpu__handle_exit(cpu);
188 			if (!ret)
189 				goto panic_kvm;
190 			break;
191 		}
192 		}
193 		kvm_cpu__handle_coalesced_mmio(cpu);
194 	}
195 
196 exit_kvm:
197 	return 0;
198 
199 panic_kvm:
200 	return 1;
201 }
202 
203 int kvm_cpu__init(struct kvm *kvm)
204 {
205 	int max_cpus, recommended_cpus, i;
206 
207 	max_cpus = kvm__max_cpus(kvm);
208 	recommended_cpus = kvm__recommended_cpus(kvm);
209 
210 	if (kvm->cfg.nrcpus > max_cpus) {
211 		printf("  # Limit the number of CPUs to %d\n", max_cpus);
212 		kvm->cfg.nrcpus = max_cpus;
213 	} else if (kvm->cfg.nrcpus > recommended_cpus) {
214 		printf("  # Warning: The maximum recommended amount of VCPUs"
215 			" is %d\n", recommended_cpus);
216 	}
217 
218 	kvm->nrcpus = kvm->cfg.nrcpus;
219 
220 	/* Alloc one pointer too many, so array ends up 0-terminated */
221 	kvm->cpus = calloc(kvm->nrcpus + 1, sizeof(void *));
222 	if (!kvm->cpus) {
223 		pr_warning("Couldn't allocate array for %d CPUs", kvm->nrcpus);
224 		return -ENOMEM;
225 	}
226 
227 	for (i = 0; i < kvm->nrcpus; i++) {
228 		kvm->cpus[i] = kvm_cpu__arch_init(kvm, i);
229 		if (!kvm->cpus[i]) {
230 			pr_warning("unable to initialize KVM VCPU");
231 			goto fail_alloc;
232 		}
233 	}
234 
235 	return 0;
236 
237 fail_alloc:
238 	for (i = 0; i < kvm->nrcpus; i++)
239 		free(kvm->cpus[i]);
240 	return -ENOMEM;
241 }
242 base_init(kvm_cpu__init);
243 
244 int kvm_cpu__exit(struct kvm *kvm)
245 {
246 	int i, r;
247 	void *ret = NULL;
248 
249 	kvm_cpu__delete(kvm->cpus[0]);
250 	kvm->cpus[0] = NULL;
251 
252 	for (i = 1; i < kvm->nrcpus; i++) {
253 		if (kvm->cpus[i]->is_running) {
254 			pthread_kill(kvm->cpus[i]->thread, SIGKVMEXIT);
255 			if (pthread_join(kvm->cpus[i]->thread, &ret) != 0)
256 				die("pthread_join");
257 			kvm_cpu__delete(kvm->cpus[i]);
258 		}
259 		if (ret == NULL)
260 			r = 0;
261 	}
262 
263 	free(kvm->cpus);
264 
265 	kvm->nrcpus = 0;
266 
267 	return r;
268 }
269 late_exit(kvm_cpu__exit);
270