xref: /kvmtool/kvm-ipc.c (revision d30d94872e7fe998bd527e5cc6c38db58d46ee03)
1 #include <sys/epoll.h>
2 #include <sys/un.h>
3 #include <sys/types.h>
4 #include <sys/socket.h>
5 #include <dirent.h>
6 
7 #include "kvm/epoll.h"
8 #include "kvm/kvm-ipc.h"
9 #include "kvm/rwsem.h"
10 #include "kvm/read-write.h"
11 #include "kvm/util.h"
12 #include "kvm/kvm.h"
13 #include "kvm/builtin-debug.h"
14 #include "kvm/strbuf.h"
15 #include "kvm/kvm-cpu.h"
16 #include "kvm/8250-serial.h"
17 
18 struct kvm_ipc_head {
19 	u32 type;
20 	u32 len;
21 };
22 
23 #define KVM_IPC_MAX_MSGS 16
24 
25 #define KVM_SOCK_SUFFIX		".sock"
26 #define KVM_SOCK_SUFFIX_LEN	((ssize_t)sizeof(KVM_SOCK_SUFFIX) - 1)
27 
28 extern __thread struct kvm_cpu *current_kvm_cpu;
29 static void (*msgs[KVM_IPC_MAX_MSGS])(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg);
30 static DECLARE_RWSEM(msgs_rwlock);
31 static int server_fd;
32 static struct kvm__epoll epoll;
33 
kvm__create_socket(struct kvm * kvm)34 static int kvm__create_socket(struct kvm *kvm)
35 {
36 	char full_name[PATH_MAX];
37 	int s;
38 	struct sockaddr_un local;
39 	int len, r;
40 
41 	/* This usually 108 bytes long */
42 	BUILD_BUG_ON(sizeof(local.sun_path) < 32);
43 
44 	snprintf(full_name, sizeof(full_name), "%s/%s%s",
45 		 kvm__get_dir(), kvm->cfg.guest_name, KVM_SOCK_SUFFIX);
46 
47 	s = socket(AF_UNIX, SOCK_STREAM, 0);
48 	if (s < 0) {
49 		perror("socket");
50 		return s;
51 	}
52 
53 	local.sun_family = AF_UNIX;
54 	strlcpy(local.sun_path, full_name, sizeof(local.sun_path));
55 	len = strlen(local.sun_path) + sizeof(local.sun_family);
56 	r = bind(s, (struct sockaddr *)&local, len);
57 	/* Check for an existing socket file */
58 	if (r < 0 && errno == EADDRINUSE) {
59 		r = connect(s, (struct sockaddr *)&local, len);
60 		if (r == 0) {
61 			/*
62 			 * If we could connect, there is already a guest
63 			 * using this same name. This should not happen
64 			 * for PID derived names, but could happen for user
65 			 * provided guest names.
66 			 */
67 			pr_err("Guest socket file %s already exists.",
68 			       full_name);
69 			r = -EEXIST;
70 			goto fail;
71 		}
72 		if (errno == ECONNREFUSED) {
73 			/*
74 			 * This is a ghost socket file, with no-one listening
75 			 * on the other end. Since kvmtool will only bind
76 			 * above when creating a new guest, there is no
77 			 * danger in just removing the file and re-trying.
78 			 */
79 			unlink(full_name);
80 			pr_info("Removed ghost socket file \"%s\".", full_name);
81 			r = bind(s, (struct sockaddr *)&local, len);
82 		}
83 	}
84 	if (r < 0) {
85 		perror("bind");
86 		goto fail;
87 	}
88 
89 	r = listen(s, 5);
90 	if (r < 0) {
91 		perror("listen");
92 		goto fail;
93 	}
94 
95 	return s;
96 
97 fail:
98 	close(s);
99 	return r;
100 }
101 
kvm__remove_socket(const char * name)102 void kvm__remove_socket(const char *name)
103 {
104 	char full_name[PATH_MAX];
105 
106 	snprintf(full_name, sizeof(full_name), "%s/%s%s",
107 		 kvm__get_dir(), name, KVM_SOCK_SUFFIX);
108 	unlink(full_name);
109 }
110 
kvm__get_sock_by_instance(const char * name)111 int kvm__get_sock_by_instance(const char *name)
112 {
113 	int s, len, r;
114 	char sock_file[PATH_MAX];
115 	struct sockaddr_un local;
116 
117 	snprintf(sock_file, sizeof(sock_file), "%s/%s%s",
118 		 kvm__get_dir(), name, KVM_SOCK_SUFFIX);
119 	s = socket(AF_UNIX, SOCK_STREAM, 0);
120 
121 	local.sun_family = AF_UNIX;
122 	strlcpy(local.sun_path, sock_file, sizeof(local.sun_path));
123 	len = strlen(local.sun_path) + sizeof(local.sun_family);
124 
125 	r = connect(s, (struct sockaddr *)&local, len);
126 	if (r < 0 && errno == ECONNREFUSED) {
127 		/* Clean up the ghost socket file */
128 		unlink(local.sun_path);
129 		pr_info("Removed ghost socket file \"%s\".", sock_file);
130 		return r;
131 	} else if (r < 0) {
132 		return r;
133 	}
134 
135 	return s;
136 }
137 
is_socket(const char * base_path,const struct dirent * dent)138 static bool is_socket(const char *base_path, const struct dirent *dent)
139 {
140 	switch (dent->d_type) {
141 	case DT_SOCK:
142 		return true;
143 
144 	case DT_UNKNOWN: {
145 		char path[PATH_MAX];
146 		struct stat st;
147 
148 		sprintf(path, "%s/%s", base_path, dent->d_name);
149 		if (stat(path, &st))
150 			return false;
151 
152 		return S_ISSOCK(st.st_mode);
153 	}
154 	default:
155 		return false;
156 	}
157 }
158 
kvm__enumerate_instances(int (* callback)(const char * name,int fd))159 int kvm__enumerate_instances(int (*callback)(const char *name, int fd))
160 {
161 	int sock;
162 	DIR *dir;
163 	struct dirent *entry;
164 	int ret = 0;
165 	const char *path;
166 
167 	path = kvm__get_dir();
168 
169 	dir = opendir(path);
170 	if (!dir)
171 		return -errno;
172 
173 	for (;;) {
174 		entry = readdir(dir);
175 		if (!entry)
176 			break;
177 		if (is_socket(path, entry)) {
178 			ssize_t name_len = strlen(entry->d_name);
179 			char *p;
180 
181 			if (name_len <= KVM_SOCK_SUFFIX_LEN)
182 				continue;
183 
184 			p = &entry->d_name[name_len - KVM_SOCK_SUFFIX_LEN];
185 			if (memcmp(KVM_SOCK_SUFFIX, p, KVM_SOCK_SUFFIX_LEN))
186 				continue;
187 
188 			*p = 0;
189 			sock = kvm__get_sock_by_instance(entry->d_name);
190 			if (sock < 0)
191 				continue;
192 			ret = callback(entry->d_name, sock);
193 			close(sock);
194 			if (ret < 0)
195 				break;
196 		}
197 	}
198 
199 	closedir(dir);
200 
201 	return ret;
202 }
203 
kvm_ipc__register_handler(u32 type,void (* cb)(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg))204 int kvm_ipc__register_handler(u32 type, void (*cb)(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg))
205 {
206 	if (type >= KVM_IPC_MAX_MSGS)
207 		return -ENOSPC;
208 
209 	down_write(&msgs_rwlock);
210 	msgs[type] = cb;
211 	up_write(&msgs_rwlock);
212 
213 	return 0;
214 }
215 
kvm_ipc__send(int fd,u32 type)216 int kvm_ipc__send(int fd, u32 type)
217 {
218 	struct kvm_ipc_head head = {.type = type, .len = 0,};
219 
220 	if (write_in_full(fd, &head, sizeof(head)) < 0)
221 		return -1;
222 
223 	return 0;
224 }
225 
kvm_ipc__send_msg(int fd,u32 type,u32 len,u8 * msg)226 int kvm_ipc__send_msg(int fd, u32 type, u32 len, u8 *msg)
227 {
228 	struct kvm_ipc_head head = {.type = type, .len = len,};
229 
230 	if (write_in_full(fd, &head, sizeof(head)) < 0)
231 		return -1;
232 
233 	if (write_in_full(fd, msg, len) < 0)
234 		return -1;
235 
236 	return 0;
237 }
238 
kvm_ipc__handle(struct kvm * kvm,int fd,u32 type,u32 len,u8 * data)239 static int kvm_ipc__handle(struct kvm *kvm, int fd, u32 type, u32 len, u8 *data)
240 {
241 	void (*cb)(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg);
242 
243 	if (type >= KVM_IPC_MAX_MSGS)
244 		return -ENOSPC;
245 
246 	down_read(&msgs_rwlock);
247 	cb = msgs[type];
248 	up_read(&msgs_rwlock);
249 
250 	if (cb == NULL) {
251 		pr_warning("No device handles type %u\n", type);
252 		return -ENODEV;
253 	}
254 
255 	cb(kvm, fd, type, len, data);
256 
257 	return 0;
258 }
259 
kvm_ipc__new_conn(int fd)260 static int kvm_ipc__new_conn(int fd)
261 {
262 	int client;
263 	struct epoll_event ev;
264 
265 	client = accept(fd, NULL, NULL);
266 	if (client < 0)
267 		return -1;
268 
269 	ev.events = EPOLLIN | EPOLLRDHUP;
270 	ev.data.fd = client;
271 	if (epoll_ctl(epoll.fd, EPOLL_CTL_ADD, client, &ev) < 0) {
272 		close(client);
273 		return -1;
274 	}
275 
276 	return client;
277 }
278 
kvm_ipc__close_conn(int fd)279 static void kvm_ipc__close_conn(int fd)
280 {
281 	epoll_ctl(epoll.fd, EPOLL_CTL_DEL, fd, NULL);
282 	close(fd);
283 }
284 
kvm_ipc__receive(struct kvm * kvm,int fd)285 static int kvm_ipc__receive(struct kvm *kvm, int fd)
286 {
287 	struct kvm_ipc_head head;
288 	u8 *msg = NULL;
289 	u32 n;
290 
291 	n = read(fd, &head, sizeof(head));
292 	if (n != sizeof(head))
293 		goto done;
294 
295 	msg = malloc(head.len);
296 	if (msg == NULL)
297 		goto done;
298 
299 	n = read_in_full(fd, msg, head.len);
300 	if (n != head.len)
301 		goto done;
302 
303 	kvm_ipc__handle(kvm, fd, head.type, head.len, msg);
304 
305 	return 0;
306 
307 done:
308 	free(msg);
309 	return -1;
310 }
311 
kvm_ipc__handle_event(struct kvm * kvm,struct epoll_event * ev)312 static void kvm_ipc__handle_event(struct kvm *kvm, struct epoll_event *ev)
313 {
314 	int fd = ev->data.fd;
315 
316 	if (fd == server_fd) {
317 		int client, r;
318 
319 		client = kvm_ipc__new_conn(fd);
320 		/*
321 		 * Handle multiple IPC cmd at a time
322 		 */
323 		do {
324 			r = kvm_ipc__receive(kvm, client);
325 		} while	(r == 0);
326 
327 	} else if (ev->events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
328 		kvm_ipc__close_conn(fd);
329 	} else {
330 		kvm_ipc__receive(kvm, fd);
331 	}
332 }
333 
kvm__pid(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg)334 static void kvm__pid(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
335 {
336 	pid_t pid = getpid();
337 	int r = 0;
338 
339 	if (type == KVM_IPC_PID)
340 		r = write(fd, &pid, sizeof(pid));
341 
342 	if (r < 0)
343 		pr_warning("Failed sending PID");
344 }
345 
handle_stop(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg)346 static void handle_stop(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
347 {
348 	if (WARN_ON(type != KVM_IPC_STOP || len))
349 		return;
350 
351 	kvm__reboot(kvm);
352 }
353 
354 /* Pause/resume the guest using SIGUSR2 */
355 static int is_paused;
356 
handle_pause(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg)357 static void handle_pause(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
358 {
359 	if (WARN_ON(len))
360 		return;
361 
362 	if (type == KVM_IPC_RESUME && is_paused) {
363 		kvm->vm_state = KVM_VMSTATE_RUNNING;
364 		kvm__continue(kvm);
365 	} else if (type == KVM_IPC_PAUSE && !is_paused) {
366 		kvm->vm_state = KVM_VMSTATE_PAUSED;
367 		ioctl(kvm->vm_fd, KVM_KVMCLOCK_CTRL);
368 		kvm__pause(kvm);
369 	} else {
370 		return;
371 	}
372 
373 	is_paused = !is_paused;
374 }
375 
handle_vmstate(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg)376 static void handle_vmstate(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
377 {
378 	int r = 0;
379 
380 	if (type == KVM_IPC_VMSTATE)
381 		r = write(fd, &kvm->vm_state, sizeof(kvm->vm_state));
382 
383 	if (r < 0)
384 		pr_warning("Failed sending VMSTATE");
385 }
386 
387 /*
388  * Serialize debug printout so that the output of multiple vcpus does not
389  * get mixed up:
390  */
391 static int printout_done;
392 
handle_sigusr1(int sig)393 static void handle_sigusr1(int sig)
394 {
395 	struct kvm_cpu *cpu = current_kvm_cpu;
396 	int fd = kvm_cpu__get_debug_fd();
397 
398 	if (!cpu || cpu->needs_nmi)
399 		return;
400 
401 	dprintf(fd, "\n #\n # vCPU #%ld's dump:\n #\n", cpu->cpu_id);
402 	kvm_cpu__show_registers(cpu);
403 	kvm_cpu__show_code(cpu);
404 	kvm_cpu__show_page_tables(cpu);
405 	fflush(stdout);
406 	printout_done = 1;
407 }
408 
handle_debug(struct kvm * kvm,int fd,u32 type,u32 len,u8 * msg)409 static void handle_debug(struct kvm *kvm, int fd, u32 type, u32 len, u8 *msg)
410 {
411 	int i;
412 	struct debug_cmd_params *params;
413 	u32 dbg_type;
414 	u32 vcpu;
415 
416 	if (WARN_ON(type != KVM_IPC_DEBUG || len != sizeof(*params)))
417 		return;
418 
419 	params = (void *)msg;
420 	dbg_type = params->dbg_type;
421 	vcpu = params->cpu;
422 
423 	if (dbg_type & KVM_DEBUG_CMD_TYPE_SYSRQ)
424 		serial8250__inject_sysrq(kvm, params->sysrq);
425 
426 	if (dbg_type & KVM_DEBUG_CMD_TYPE_NMI) {
427 		if ((int)vcpu >= kvm->nrcpus)
428 			return;
429 
430 		kvm->cpus[vcpu]->needs_nmi = 1;
431 		pthread_kill(kvm->cpus[vcpu]->thread, SIGUSR1);
432 	}
433 
434 	if (!(dbg_type & KVM_DEBUG_CMD_TYPE_DUMP))
435 		return;
436 
437 	for (i = 0; i < kvm->nrcpus; i++) {
438 		struct kvm_cpu *cpu = kvm->cpus[i];
439 
440 		if (!cpu)
441 			continue;
442 
443 		printout_done = 0;
444 
445 		kvm_cpu__set_debug_fd(fd);
446 		pthread_kill(cpu->thread, SIGUSR1);
447 		/*
448 		 * Wait for the vCPU to dump state before signalling
449 		 * the next thread. Since this is debug code it does
450 		 * not matter that we are burning CPU time a bit:
451 		 */
452 		while (!printout_done)
453 			sleep(0);
454 	}
455 
456 	close(fd);
457 
458 	serial8250__inject_sysrq(kvm, 'p');
459 }
460 
kvm_ipc__init(struct kvm * kvm)461 int kvm_ipc__init(struct kvm *kvm)
462 {
463 	int ret;
464 	int sock = kvm__create_socket(kvm);
465 	struct epoll_event ev = {0};
466 
467 	server_fd = sock;
468 
469 	ret = epoll__init(kvm, &epoll, "kvm-ipc",
470 			  kvm_ipc__handle_event);
471 	if (ret) {
472 		pr_err("Failed starting IPC thread");
473 		goto err;
474 	}
475 
476 	ev.events = EPOLLIN | EPOLLET;
477 	ev.data.fd = sock;
478 	if (epoll_ctl(epoll.fd, EPOLL_CTL_ADD, sock, &ev) < 0) {
479 		pr_err("Failed adding socket to epoll");
480 		ret = -EFAULT;
481 		goto err_epoll;
482 	}
483 
484 	kvm_ipc__register_handler(KVM_IPC_PID, kvm__pid);
485 	kvm_ipc__register_handler(KVM_IPC_DEBUG, handle_debug);
486 	kvm_ipc__register_handler(KVM_IPC_PAUSE, handle_pause);
487 	kvm_ipc__register_handler(KVM_IPC_RESUME, handle_pause);
488 	kvm_ipc__register_handler(KVM_IPC_STOP, handle_stop);
489 	kvm_ipc__register_handler(KVM_IPC_VMSTATE, handle_vmstate);
490 	signal(SIGUSR1, handle_sigusr1);
491 
492 	return 0;
493 
494 err_epoll:
495 	epoll__exit(&epoll);
496 	close(server_fd);
497 err:
498 	return ret;
499 }
500 base_init(kvm_ipc__init);
501 
kvm_ipc__exit(struct kvm * kvm)502 int kvm_ipc__exit(struct kvm *kvm)
503 {
504 	epoll__exit(&epoll);
505 	close(server_fd);
506 
507 	kvm__remove_socket(kvm->cfg.guest_name);
508 
509 	return 0;
510 }
511 base_exit(kvm_ipc__exit);
512