xref: /kvmtool/kvm-ipc.c (revision 211370d69e62335e3345aae5c52e935b11307acf)
1 #include <sys/epoll.h>
2 #include <sys/un.h>
3 #include <sys/types.h>
4 #include <sys/socket.h>
5 #include <sys/eventfd.h>
6 #include <dirent.h>
7 
8 #include "kvm/kvm-ipc.h"
9 #include "kvm/rwsem.h"
10 #include "kvm/read-write.h"
11 #include "kvm/util.h"
12 #include "kvm/kvm.h"
13 #include "kvm/builtin-debug.h"
14 #include "kvm/strbuf.h"
15 #include "kvm/kvm-cpu.h"
16 #include "kvm/8250-serial.h"
17 
18 struct kvm_ipc_head {
19 	u32 type;
20 	u32 len;
21 };
22 
23 #define KVM_IPC_MAX_MSGS 16
24 
25 #define KVM_SOCK_SUFFIX		".sock"
26 #define KVM_SOCK_SUFFIX_LEN	((ssize_t)sizeof(KVM_SOCK_SUFFIX) - 1)
27 
28 extern __thread struct kvm_cpu *current_kvm_cpu;
29 static void (*msgs[KVM_IPC_MAX_MSGS])(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg);
30 static DECLARE_RWSEM(msgs_rwlock);
31 static int epoll_fd, server_fd, stop_fd;
32 static pthread_t thread;
33 
34 static int kvm__create_socket(struct kvm *kvm)
35 {
36 	char full_name[PATH_MAX];
37 	unsigned int s;
38 	struct sockaddr_un local;
39 	int len, r;
40 
41 	/* This usually 108 bytes long */
42 	BUILD_BUG_ON(sizeof(local.sun_path) < 32);
43 
44 	snprintf(full_name, sizeof(full_name), "%s/%s%s",
45 		 kvm__get_dir(), kvm->cfg.guest_name, KVM_SOCK_SUFFIX);
46 	if (access(full_name, F_OK) == 0) {
47 		pr_err("Socket file %s already exist", full_name);
48 		return -EEXIST;
49 	}
50 
51 	s = socket(AF_UNIX, SOCK_STREAM, 0);
52 	if (s < 0) {
53 		perror("socket");
54 		return s;
55 	}
56 
57 	local.sun_family = AF_UNIX;
58 	strlcpy(local.sun_path, full_name, sizeof(local.sun_path));
59 	len = strlen(local.sun_path) + sizeof(local.sun_family);
60 	r = bind(s, (struct sockaddr *)&local, len);
61 	if (r < 0) {
62 		perror("bind");
63 		goto fail;
64 	}
65 
66 	r = listen(s, 5);
67 	if (r < 0) {
68 		perror("listen");
69 		goto fail;
70 	}
71 
72 	return s;
73 
74 fail:
75 	close(s);
76 	return r;
77 }
78 
79 void kvm__remove_socket(const char *name)
80 {
81 	char full_name[PATH_MAX];
82 
83 	snprintf(full_name, sizeof(full_name), "%s/%s%s",
84 		 kvm__get_dir(), name, KVM_SOCK_SUFFIX);
85 	unlink(full_name);
86 }
87 
88 int kvm__get_sock_by_instance(const char *name)
89 {
90 	int s, len, r;
91 	char sock_file[PATH_MAX];
92 	struct sockaddr_un local;
93 
94 	snprintf(sock_file, sizeof(sock_file), "%s/%s%s",
95 		 kvm__get_dir(), name, KVM_SOCK_SUFFIX);
96 	s = socket(AF_UNIX, SOCK_STREAM, 0);
97 
98 	local.sun_family = AF_UNIX;
99 	strlcpy(local.sun_path, sock_file, sizeof(local.sun_path));
100 	len = strlen(local.sun_path) + sizeof(local.sun_family);
101 
102 	r = connect(s, &local, len);
103 	if (r < 0 && errno == ECONNREFUSED) {
104 		/* Tell the user clean ghost socket file */
105 		pr_err("\"%s\" could be a ghost socket file, please remove it",
106 				sock_file);
107 		return r;
108 	} else if (r < 0) {
109 		return r;
110 	}
111 
112 	return s;
113 }
114 
115 int kvm__enumerate_instances(int (*callback)(const char *name, int fd))
116 {
117 	int sock;
118 	DIR *dir;
119 	struct dirent entry, *result;
120 	int ret = 0;
121 
122 	dir = opendir(kvm__get_dir());
123 	if (!dir)
124 		return -errno;
125 
126 	for (;;) {
127 		readdir_r(dir, &entry, &result);
128 		if (result == NULL)
129 			break;
130 		if (entry.d_type == DT_SOCK) {
131 			ssize_t name_len = strlen(entry.d_name);
132 			char *p;
133 
134 			if (name_len <= KVM_SOCK_SUFFIX_LEN)
135 				continue;
136 
137 			p = &entry.d_name[name_len - KVM_SOCK_SUFFIX_LEN];
138 			if (memcmp(KVM_SOCK_SUFFIX, p, KVM_SOCK_SUFFIX_LEN))
139 				continue;
140 
141 			*p = 0;
142 			sock = kvm__get_sock_by_instance(entry.d_name);
143 			if (sock < 0)
144 				continue;
145 			ret = callback(entry.d_name, sock);
146 			close(sock);
147 			if (ret < 0)
148 				break;
149 		}
150 	}
151 
152 	closedir(dir);
153 
154 	return ret;
155 }
156 
157 int kvm_ipc__register_handler(u32 type, void (*cb)(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg))
158 {
159 	if (type >= KVM_IPC_MAX_MSGS)
160 		return -ENOSPC;
161 
162 	down_write(&msgs_rwlock);
163 	msgs[type] = cb;
164 	up_write(&msgs_rwlock);
165 
166 	return 0;
167 }
168 
169 int kvm_ipc__send(int fd, u32 type)
170 {
171 	struct kvm_ipc_head head = {.type = type, .len = 0,};
172 
173 	if (write_in_full(fd, &head, sizeof(head)) < 0)
174 		return -1;
175 
176 	return 0;
177 }
178 
179 int kvm_ipc__send_msg(int fd, u32 type, u32 len, u8 *msg)
180 {
181 	struct kvm_ipc_head head = {.type = type, .len = len,};
182 
183 	if (write_in_full(fd, &head, sizeof(head)) < 0)
184 		return -1;
185 
186 	if (write_in_full(fd, msg, len) < 0)
187 		return -1;
188 
189 	return 0;
190 }
191 
192 static int kvm_ipc__handle(struct kvm *kvm, int fd, u32 type, u32 len, u8 *data)
193 {
194 	void (*cb)(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg);
195 
196 	if (type >= KVM_IPC_MAX_MSGS)
197 		return -ENOSPC;
198 
199 	down_read(&msgs_rwlock);
200 	cb = msgs[type];
201 	up_read(&msgs_rwlock);
202 
203 	if (cb == NULL) {
204 		pr_warning("No device handles type %u\n", type);
205 		return -ENODEV;
206 	}
207 
208 	cb(kvm, fd, type, len, data);
209 
210 	return 0;
211 }
212 
213 static int kvm_ipc__new_conn(int fd)
214 {
215 	int client;
216 	struct epoll_event ev;
217 
218 	client = accept(fd, NULL, NULL);
219 	if (client < 0)
220 		return -1;
221 
222 	ev.events = EPOLLIN | EPOLLRDHUP;
223 	ev.data.fd = client;
224 	if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client, &ev) < 0) {
225 		close(client);
226 		return -1;
227 	}
228 
229 	return client;
230 }
231 
232 static void kvm_ipc__close_conn(int fd)
233 {
234 	epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, NULL);
235 	close(fd);
236 }
237 
238 static int kvm_ipc__receive(struct kvm *kvm, int fd)
239 {
240 	struct kvm_ipc_head head;
241 	u8 *msg = NULL;
242 	u32 n;
243 
244 	n = read(fd, &head, sizeof(head));
245 	if (n != sizeof(head))
246 		goto done;
247 
248 	msg = malloc(head.len);
249 	if (msg == NULL)
250 		goto done;
251 
252 	n = read_in_full(fd, msg, head.len);
253 	if (n != head.len)
254 		goto done;
255 
256 	kvm_ipc__handle(kvm, fd, head.type, head.len, msg);
257 
258 	return 0;
259 
260 done:
261 	free(msg);
262 	return -1;
263 }
264 
265 static void *kvm_ipc__thread(void *param)
266 {
267 	struct epoll_event event;
268 	struct kvm *kvm = param;
269 
270 	kvm__set_thread_name("kvm-ipc");
271 
272 	for (;;) {
273 		int nfds;
274 
275 		nfds = epoll_wait(epoll_fd, &event, 1, -1);
276 		if (nfds > 0) {
277 			int fd = event.data.fd;
278 
279 			if (fd == stop_fd && event.events & EPOLLIN) {
280 				break;
281 			} else if (fd == server_fd) {
282 				int client, r;
283 
284 				client = kvm_ipc__new_conn(fd);
285 				/*
286 				 * Handle multiple IPC cmd at a time
287 				 */
288 				do {
289 					r = kvm_ipc__receive(kvm, client);
290 				} while	(r == 0);
291 
292 			} else if (event.events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
293 				kvm_ipc__close_conn(fd);
294 			} else {
295 				kvm_ipc__receive(kvm, fd);
296 			}
297 		}
298 	}
299 
300 	return NULL;
301 }
302 
303 static void kvm__pid(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
304 {
305 	pid_t pid = getpid();
306 	int r = 0;
307 
308 	if (type == KVM_IPC_PID)
309 		r = write(fd, &pid, sizeof(pid));
310 
311 	if (r < 0)
312 		pr_warning("Failed sending PID");
313 }
314 
315 static void handle_stop(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
316 {
317 	if (WARN_ON(type != KVM_IPC_STOP || len))
318 		return;
319 
320 	kvm_cpu__reboot(kvm);
321 }
322 
323 /* Pause/resume the guest using SIGUSR2 */
324 static int is_paused;
325 
326 static void handle_pause(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
327 {
328 	if (WARN_ON(len))
329 		return;
330 
331 	if (type == KVM_IPC_RESUME && is_paused) {
332 		kvm->vm_state = KVM_VMSTATE_RUNNING;
333 		kvm__continue(kvm);
334 	} else if (type == KVM_IPC_PAUSE && !is_paused) {
335 		kvm->vm_state = KVM_VMSTATE_PAUSED;
336 		ioctl(kvm->vm_fd, KVM_KVMCLOCK_CTRL);
337 		kvm__pause(kvm);
338 	} else {
339 		return;
340 	}
341 
342 	is_paused = !is_paused;
343 }
344 
345 static void handle_vmstate(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
346 {
347 	int r = 0;
348 
349 	if (type == KVM_IPC_VMSTATE)
350 		r = write(fd, &kvm->vm_state, sizeof(kvm->vm_state));
351 
352 	if (r < 0)
353 		pr_warning("Failed sending VMSTATE");
354 }
355 
356 /*
357  * Serialize debug printout so that the output of multiple vcpus does not
358  * get mixed up:
359  */
360 static int printout_done;
361 
362 static void handle_sigusr1(int sig)
363 {
364 	struct kvm_cpu *cpu = current_kvm_cpu;
365 	int fd = kvm_cpu__get_debug_fd();
366 
367 	if (!cpu || cpu->needs_nmi)
368 		return;
369 
370 	dprintf(fd, "\n #\n # vCPU #%ld's dump:\n #\n", cpu->cpu_id);
371 	kvm_cpu__show_registers(cpu);
372 	kvm_cpu__show_code(cpu);
373 	kvm_cpu__show_page_tables(cpu);
374 	fflush(stdout);
375 	printout_done = 1;
376 }
377 
378 static void handle_debug(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
379 {
380 	int i;
381 	struct debug_cmd_params *params;
382 	u32 dbg_type;
383 	u32 vcpu;
384 
385 	if (WARN_ON(type != KVM_IPC_DEBUG || len != sizeof(*params)))
386 		return;
387 
388 	params = (void *)msg;
389 	dbg_type = params->dbg_type;
390 	vcpu = params->cpu;
391 
392 	if (dbg_type & KVM_DEBUG_CMD_TYPE_SYSRQ)
393 		serial8250__inject_sysrq(kvm, params->sysrq);
394 
395 	if (dbg_type & KVM_DEBUG_CMD_TYPE_NMI) {
396 		if ((int)vcpu >= kvm->nrcpus)
397 			return;
398 
399 		kvm->cpus[vcpu]->needs_nmi = 1;
400 		pthread_kill(kvm->cpus[vcpu]->thread, SIGUSR1);
401 	}
402 
403 	if (!(dbg_type & KVM_DEBUG_CMD_TYPE_DUMP))
404 		return;
405 
406 	for (i = 0; i < kvm->nrcpus; i++) {
407 		struct kvm_cpu *cpu = kvm->cpus[i];
408 
409 		if (!cpu)
410 			continue;
411 
412 		printout_done = 0;
413 
414 		kvm_cpu__set_debug_fd(fd);
415 		pthread_kill(cpu->thread, SIGUSR1);
416 		/*
417 		 * Wait for the vCPU to dump state before signalling
418 		 * the next thread. Since this is debug code it does
419 		 * not matter that we are burning CPU time a bit:
420 		 */
421 		while (!printout_done)
422 			sleep(0);
423 	}
424 
425 	close(fd);
426 
427 	serial8250__inject_sysrq(kvm, 'p');
428 }
429 
430 int kvm_ipc__init(struct kvm *kvm)
431 {
432 	int ret;
433 	int sock = kvm__create_socket(kvm);
434 	struct epoll_event ev = {0};
435 
436 	server_fd = sock;
437 
438 	epoll_fd = epoll_create(KVM_IPC_MAX_MSGS);
439 	if (epoll_fd < 0) {
440 		perror("epoll_create");
441 		ret = epoll_fd;
442 		goto err;
443 	}
444 
445 	ev.events = EPOLLIN | EPOLLET;
446 	ev.data.fd = sock;
447 	if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) < 0) {
448 		pr_err("Failed adding socket to epoll");
449 		ret = -EFAULT;
450 		goto err_epoll;
451 	}
452 
453 	stop_fd = eventfd(0, 0);
454 	if (stop_fd < 0) {
455 		perror("eventfd");
456 		ret = stop_fd;
457 		goto err_epoll;
458 	}
459 
460 	ev.events = EPOLLIN | EPOLLET;
461 	ev.data.fd = stop_fd;
462 	if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, stop_fd, &ev) < 0) {
463 		pr_err("Failed adding stop event to epoll");
464 		ret = -EFAULT;
465 		goto err_stop;
466 	}
467 
468 	if (pthread_create(&thread, NULL, kvm_ipc__thread, kvm) != 0) {
469 		pr_err("Failed starting IPC thread");
470 		ret = -EFAULT;
471 		goto err_stop;
472 	}
473 
474 	kvm_ipc__register_handler(KVM_IPC_PID, kvm__pid);
475 	kvm_ipc__register_handler(KVM_IPC_DEBUG, handle_debug);
476 	kvm_ipc__register_handler(KVM_IPC_PAUSE, handle_pause);
477 	kvm_ipc__register_handler(KVM_IPC_RESUME, handle_pause);
478 	kvm_ipc__register_handler(KVM_IPC_STOP, handle_stop);
479 	kvm_ipc__register_handler(KVM_IPC_VMSTATE, handle_vmstate);
480 	signal(SIGUSR1, handle_sigusr1);
481 
482 	return 0;
483 
484 err_stop:
485 	close(stop_fd);
486 err_epoll:
487 	close(epoll_fd);
488 err:
489 	return ret;
490 }
491 base_init(kvm_ipc__init);
492 
493 int kvm_ipc__exit(struct kvm *kvm)
494 {
495 	u64 val = 1;
496 	int ret;
497 
498 	ret = write(stop_fd, &val, sizeof(val));
499 	if (ret < 0)
500 		return ret;
501 
502 	close(server_fd);
503 	close(epoll_fd);
504 
505 	kvm__remove_socket(kvm->cfg.guest_name);
506 
507 	return ret;
508 }
509 base_exit(kvm_ipc__exit);
510