xref: /kvmtool/virtio/net.c (revision b231683c336132441496060a81726305ca56b11b)
1 #include "kvm/virtio-pci-dev.h"
2 #include "kvm/virtio-net.h"
3 #include "kvm/virtio.h"
4 #include "kvm/mutex.h"
5 #include "kvm/util.h"
6 #include "kvm/kvm.h"
7 #include "kvm/irq.h"
8 #include "kvm/uip.h"
9 #include "kvm/guest_compat.h"
10 #include "kvm/iovec.h"
11 #include "kvm/strbuf.h"
12 
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <linux/if_tun.h>
16 #include <linux/types.h>
17 
18 #include <arpa/inet.h>
19 #include <net/if.h>
20 
21 #include <unistd.h>
22 #include <fcntl.h>
23 
24 #include <sys/socket.h>
25 #include <sys/ioctl.h>
26 #include <sys/types.h>
27 #include <sys/wait.h>
28 #include <sys/eventfd.h>
29 
30 #define VIRTIO_NET_QUEUE_SIZE		256
31 #define VIRTIO_NET_NUM_QUEUES		8
32 
33 struct net_dev;
34 
35 struct net_dev_operations {
36 	int (*rx)(struct iovec *iov, u16 in, struct net_dev *ndev);
37 	int (*tx)(struct iovec *iov, u16 in, struct net_dev *ndev);
38 };
39 
40 struct net_dev_queue {
41 	int				id;
42 	struct net_dev			*ndev;
43 	struct virt_queue		vq;
44 	pthread_t			thread;
45 	struct mutex			lock;
46 	pthread_cond_t			cond;
47 	int				gsi;
48 	int				irqfd;
49 };
50 
51 struct net_dev {
52 	struct mutex			mutex;
53 	struct virtio_device		vdev;
54 	struct list_head		list;
55 
56 	struct net_dev_queue		queues[VIRTIO_NET_NUM_QUEUES * 2 + 1];
57 	struct virtio_net_config	config;
58 	u32				queue_pairs;
59 
60 	int				vhost_fd;
61 	int				tap_fd;
62 	char				tap_name[IFNAMSIZ];
63 	bool				tap_ufo;
64 
65 	int				mode;
66 
67 	struct uip_info			info;
68 	struct net_dev_operations	*ops;
69 	struct kvm			*kvm;
70 
71 	struct virtio_net_params	*params;
72 };
73 
74 static LIST_HEAD(ndevs);
75 static int compat_id = -1;
76 
77 #define MAX_PACKET_SIZE 65550
78 
79 static bool has_virtio_feature(struct net_dev *ndev, u32 feature)
80 {
81 	return ndev->vdev.features & (1 << feature);
82 }
83 
84 static int virtio_net_hdr_len(struct net_dev *ndev)
85 {
86 	if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF) ||
87 	    !ndev->vdev.legacy)
88 		return sizeof(struct virtio_net_hdr_mrg_rxbuf);
89 
90 	return sizeof(struct virtio_net_hdr);
91 }
92 
93 static void *virtio_net_rx_thread(void *p)
94 {
95 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
96 	struct net_dev_queue *queue = p;
97 	struct virt_queue *vq = &queue->vq;
98 	struct net_dev *ndev = queue->ndev;
99 	struct kvm *kvm;
100 	u16 out, in;
101 	u16 head;
102 	int len, copied;
103 
104 	kvm__set_thread_name("virtio-net-rx");
105 
106 	kvm = ndev->kvm;
107 	while (1) {
108 		mutex_lock(&queue->lock);
109 		if (!virt_queue__available(vq))
110 			pthread_cond_wait(&queue->cond, &queue->lock.mutex);
111 		mutex_unlock(&queue->lock);
112 
113 		while (virt_queue__available(vq)) {
114 			unsigned char buffer[MAX_PACKET_SIZE + sizeof(struct virtio_net_hdr_mrg_rxbuf)];
115 			struct iovec dummy_iov = {
116 				.iov_base = buffer,
117 				.iov_len  = sizeof(buffer),
118 			};
119 			struct virtio_net_hdr_mrg_rxbuf *hdr;
120 			u16 num_buffers;
121 
122 			len = ndev->ops->rx(&dummy_iov, 1, ndev);
123 			if (len < 0) {
124 				pr_warning("%s: rx on vq %u failed (%d), exiting thread\n",
125 						__func__, queue->id, len);
126 				goto out_err;
127 			}
128 
129 			copied = num_buffers = 0;
130 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
131 			hdr = iov[0].iov_base;
132 			while (copied < len) {
133 				size_t iovsize = min_t(size_t, len - copied, iov_size(iov, in));
134 
135 				memcpy_toiovec(iov, buffer + copied, iovsize);
136 				copied += iovsize;
137 				virt_queue__set_used_elem_no_update(vq, head, iovsize, num_buffers++);
138 				if (copied == len)
139 					break;
140 				while (!virt_queue__available(vq))
141 					sleep(0);
142 				head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
143 			}
144 
145 			/*
146 			 * The device MUST set num_buffers, except in the case
147 			 * where the legacy driver did not negotiate
148 			 * VIRTIO_NET_F_MRG_RXBUF and the field does not exist.
149 			 */
150 			if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF) ||
151 			    !ndev->vdev.legacy)
152 				hdr->num_buffers = virtio_host_to_guest_u16(vq, num_buffers);
153 
154 			virt_queue__used_idx_advance(vq, num_buffers);
155 
156 			/* We should interrupt guest right now, otherwise latency is huge. */
157 			if (virtio_queue__should_signal(vq))
158 				ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, queue->id);
159 		}
160 	}
161 
162 out_err:
163 	pthread_exit(NULL);
164 	return NULL;
165 
166 }
167 
168 static void *virtio_net_tx_thread(void *p)
169 {
170 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
171 	struct net_dev_queue *queue = p;
172 	struct virt_queue *vq = &queue->vq;
173 	struct net_dev *ndev = queue->ndev;
174 	struct kvm *kvm;
175 	u16 out, in;
176 	u16 head;
177 	int len;
178 
179 	kvm__set_thread_name("virtio-net-tx");
180 
181 	kvm = ndev->kvm;
182 
183 	while (1) {
184 		mutex_lock(&queue->lock);
185 		if (!virt_queue__available(vq))
186 			pthread_cond_wait(&queue->cond, &queue->lock.mutex);
187 		mutex_unlock(&queue->lock);
188 
189 		while (virt_queue__available(vq)) {
190 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
191 			len = ndev->ops->tx(iov, out, ndev);
192 			if (len < 0) {
193 				pr_warning("%s: tx on vq %u failed (%d)\n",
194 						__func__, queue->id, errno);
195 				goto out_err;
196 			}
197 
198 			virt_queue__set_used_elem(vq, head, len);
199 		}
200 
201 		if (virtio_queue__should_signal(vq))
202 			ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, queue->id);
203 	}
204 
205 out_err:
206 	pthread_exit(NULL);
207 	return NULL;
208 }
209 
210 static virtio_net_ctrl_ack virtio_net_handle_mq(struct kvm* kvm, struct net_dev *ndev, struct virtio_net_ctrl_hdr *ctrl)
211 {
212 	/* Not much to do here */
213 	return VIRTIO_NET_OK;
214 }
215 
216 static void *virtio_net_ctrl_thread(void *p)
217 {
218 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
219 	struct net_dev_queue *queue = p;
220 	struct virt_queue *vq = &queue->vq;
221 	struct net_dev *ndev = queue->ndev;
222 	u16 out, in, head;
223 	struct kvm *kvm = ndev->kvm;
224 	struct virtio_net_ctrl_hdr *ctrl;
225 	virtio_net_ctrl_ack *ack;
226 
227 	kvm__set_thread_name("virtio-net-ctrl");
228 
229 	while (1) {
230 		mutex_lock(&queue->lock);
231 		if (!virt_queue__available(vq))
232 			pthread_cond_wait(&queue->cond, &queue->lock.mutex);
233 		mutex_unlock(&queue->lock);
234 
235 		while (virt_queue__available(vq)) {
236 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
237 			ctrl = iov[0].iov_base;
238 			ack = iov[out].iov_base;
239 
240 			switch (ctrl->class) {
241 			case VIRTIO_NET_CTRL_MQ:
242 				*ack = virtio_net_handle_mq(kvm, ndev, ctrl);
243 				break;
244 			default:
245 				*ack = VIRTIO_NET_ERR;
246 				break;
247 			}
248 			virt_queue__set_used_elem(vq, head, iov[out].iov_len);
249 		}
250 
251 		if (virtio_queue__should_signal(vq))
252 			ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, queue->id);
253 	}
254 
255 	pthread_exit(NULL);
256 
257 	return NULL;
258 }
259 
260 static void virtio_net_handle_callback(struct kvm *kvm, struct net_dev *ndev, int queue)
261 {
262 	struct net_dev_queue *net_queue = &ndev->queues[queue];
263 
264 	if ((u32)queue >= (ndev->queue_pairs * 2 + 1)) {
265 		pr_warning("Unknown queue index %u", queue);
266 		return;
267 	}
268 
269 	mutex_lock(&net_queue->lock);
270 	pthread_cond_signal(&net_queue->cond);
271 	mutex_unlock(&net_queue->lock);
272 }
273 
274 static int virtio_net_request_tap(struct net_dev *ndev, struct ifreq *ifr,
275 				  const char *tapname)
276 {
277 	int ret;
278 
279 	memset(ifr, 0, sizeof(*ifr));
280 	ifr->ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
281 	if (tapname)
282 		strlcpy(ifr->ifr_name, tapname, sizeof(ifr->ifr_name));
283 
284 	ret = ioctl(ndev->tap_fd, TUNSETIFF, ifr);
285 
286 	if (ret >= 0)
287 		strlcpy(ndev->tap_name, ifr->ifr_name, sizeof(ndev->tap_name));
288 	return ret;
289 }
290 
291 static int virtio_net_exec_script(const char* script, const char *tap_name)
292 {
293 	pid_t pid;
294 	int status;
295 
296 	pid = fork();
297 	if (pid == 0) {
298 		execl(script, script, tap_name, NULL);
299 		_exit(1);
300 	} else {
301 		waitpid(pid, &status, 0);
302 		if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
303 			pr_warning("Fail to setup tap by %s", script);
304 			return -1;
305 		}
306 	}
307 	return 0;
308 }
309 
310 static bool virtio_net__tap_init(struct net_dev *ndev)
311 {
312 	int sock = socket(AF_INET, SOCK_STREAM, 0);
313 	int hdr_len;
314 	struct sockaddr_in sin = {0};
315 	struct ifreq ifr;
316 	const struct virtio_net_params *params = ndev->params;
317 	bool skipconf = !!params->tapif;
318 
319 	hdr_len = virtio_net_hdr_len(ndev);
320 	if (ioctl(ndev->tap_fd, TUNSETVNETHDRSZ, &hdr_len) < 0)
321 		pr_warning("Config tap device TUNSETVNETHDRSZ error");
322 
323 	if (strcmp(params->script, "none")) {
324 		if (virtio_net_exec_script(params->script, ndev->tap_name) < 0)
325 			goto fail;
326 	} else if (!skipconf) {
327 		memset(&ifr, 0, sizeof(ifr));
328 		strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ifr.ifr_name));
329 		sin.sin_addr.s_addr = inet_addr(params->host_ip);
330 		memcpy(&(ifr.ifr_addr), &sin, sizeof(ifr.ifr_addr));
331 		ifr.ifr_addr.sa_family = AF_INET;
332 		if (ioctl(sock, SIOCSIFADDR, &ifr) < 0) {
333 			pr_warning("Could not set ip address on tap device");
334 			goto fail;
335 		}
336 	}
337 
338 	if (!skipconf) {
339 		memset(&ifr, 0, sizeof(ifr));
340 		strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ifr.ifr_name));
341 		ioctl(sock, SIOCGIFFLAGS, &ifr);
342 		ifr.ifr_flags |= IFF_UP | IFF_RUNNING;
343 		if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0)
344 			pr_warning("Could not bring tap device up");
345 	}
346 
347 	close(sock);
348 
349 	return 1;
350 
351 fail:
352 	if (sock >= 0)
353 		close(sock);
354 	if (ndev->tap_fd >= 0)
355 		close(ndev->tap_fd);
356 
357 	return 0;
358 }
359 
360 static void virtio_net__tap_exit(struct net_dev *ndev)
361 {
362 	int sock;
363 	struct ifreq ifr;
364 
365 	if (ndev->params->tapif)
366 		return;
367 
368 	sock = socket(AF_INET, SOCK_STREAM, 0);
369 	strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ifr.ifr_name));
370 	ioctl(sock, SIOCGIFFLAGS, &ifr);
371 	ifr.ifr_flags &= ~(IFF_UP | IFF_RUNNING);
372 	if (ioctl(sock, SIOCGIFFLAGS, &ifr) < 0)
373 		pr_warning("Count not bring tap device down");
374 	close(sock);
375 }
376 
377 static bool virtio_net__tap_create(struct net_dev *ndev)
378 {
379 	int offload;
380 	struct ifreq ifr;
381 	const struct virtio_net_params *params = ndev->params;
382 	bool macvtap = (!!params->tapif) && (params->tapif[0] == '/');
383 
384 	/* Did the user already gave us the FD? */
385 	if (params->fd)
386 		ndev->tap_fd = params->fd;
387 	else {
388 		const char *tap_file = "/dev/net/tun";
389 
390 		/* Did the user ask us to use macvtap? */
391 		if (macvtap)
392 			tap_file = params->tapif;
393 
394 		ndev->tap_fd = open(tap_file, O_RDWR);
395 		if (ndev->tap_fd < 0) {
396 			pr_warning("Unable to open %s", tap_file);
397 			return 0;
398 		}
399 	}
400 
401 	if (!macvtap &&
402 	    virtio_net_request_tap(ndev, &ifr, params->tapif) < 0) {
403 		pr_warning("Config tap device error. Are you root?");
404 		goto fail;
405 	}
406 
407 	/*
408 	 * The UFO support had been removed from kernel in commit:
409 	 * ID: fb652fdfe83710da0ca13448a41b7ed027d0a984
410 	 * https://www.spinics.net/lists/netdev/msg443562.html
411 	 * In oder to support the older kernels without this commit,
412 	 * we set the TUN_F_UFO to offload by default to test the status of
413 	 * UFO kernel support.
414 	 */
415 	ndev->tap_ufo = true;
416 	offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_UFO;
417 	if (ioctl(ndev->tap_fd, TUNSETOFFLOAD, offload) < 0) {
418 		/*
419 		 * Is this failure caused by kernel remove the UFO support?
420 		 * Try TUNSETOFFLOAD without TUN_F_UFO.
421 		 */
422 		offload &= ~TUN_F_UFO;
423 		if (ioctl(ndev->tap_fd, TUNSETOFFLOAD, offload) < 0) {
424 			pr_warning("Config tap device TUNSETOFFLOAD error");
425 			goto fail;
426 		}
427 		ndev->tap_ufo = false;
428 	}
429 
430 	return 1;
431 
432 fail:
433 	if ((ndev->tap_fd >= 0) || (!params->fd) )
434 		close(ndev->tap_fd);
435 
436 	return 0;
437 }
438 
439 static inline int tap_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev)
440 {
441 	return writev(ndev->tap_fd, iov, out);
442 }
443 
444 static inline int tap_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev)
445 {
446 	return readv(ndev->tap_fd, iov, in);
447 }
448 
449 static inline int uip_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev)
450 {
451 	return uip_tx(iov, out, &ndev->info);
452 }
453 
454 static inline int uip_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev)
455 {
456 	return uip_rx(iov, in, &ndev->info);
457 }
458 
459 static struct net_dev_operations tap_ops = {
460 	.rx	= tap_ops_rx,
461 	.tx	= tap_ops_tx,
462 };
463 
464 static struct net_dev_operations uip_ops = {
465 	.rx	= uip_ops_rx,
466 	.tx	= uip_ops_tx,
467 };
468 
469 static u8 *get_config(struct kvm *kvm, void *dev)
470 {
471 	struct net_dev *ndev = dev;
472 
473 	return ((u8 *)(&ndev->config));
474 }
475 
476 static size_t get_config_size(struct kvm *kvm, void *dev)
477 {
478 	struct net_dev *ndev = dev;
479 
480 	return sizeof(ndev->config);
481 }
482 
483 static u32 get_host_features(struct kvm *kvm, void *dev)
484 {
485 	u32 features;
486 	struct net_dev *ndev = dev;
487 
488 	features = 1UL << VIRTIO_NET_F_MAC
489 		| 1UL << VIRTIO_NET_F_CSUM
490 		| 1UL << VIRTIO_NET_F_HOST_TSO4
491 		| 1UL << VIRTIO_NET_F_HOST_TSO6
492 		| 1UL << VIRTIO_NET_F_GUEST_TSO4
493 		| 1UL << VIRTIO_NET_F_GUEST_TSO6
494 		| 1UL << VIRTIO_RING_F_EVENT_IDX
495 		| 1UL << VIRTIO_RING_F_INDIRECT_DESC
496 		| 1UL << VIRTIO_NET_F_CTRL_VQ
497 		| 1UL << VIRTIO_NET_F_MRG_RXBUF
498 		| 1UL << (ndev->queue_pairs > 1 ? VIRTIO_NET_F_MQ : 0);
499 
500 	/*
501 	 * The UFO feature for host and guest only can be enabled when the
502 	 * kernel has TAP UFO support.
503 	 */
504 	if (ndev->tap_ufo)
505 		features |= (1UL << VIRTIO_NET_F_HOST_UFO
506 				| 1UL << VIRTIO_NET_F_GUEST_UFO);
507 
508 	return features;
509 }
510 
511 static int virtio_net__vhost_set_features(struct net_dev *ndev)
512 {
513 	u64 features = 1UL << VIRTIO_RING_F_EVENT_IDX;
514 	u64 vhost_features;
515 
516 	if (ioctl(ndev->vhost_fd, VHOST_GET_FEATURES, &vhost_features) != 0)
517 		die_perror("VHOST_GET_FEATURES failed");
518 
519 	/* make sure both side support mergable rx buffers */
520 	if (vhost_features & 1UL << VIRTIO_NET_F_MRG_RXBUF &&
521 			has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF))
522 		features |= 1UL << VIRTIO_NET_F_MRG_RXBUF;
523 
524 	return ioctl(ndev->vhost_fd, VHOST_SET_FEATURES, &features);
525 }
526 
527 static void virtio_net_start(struct net_dev *ndev)
528 {
529 	if (ndev->mode == NET_MODE_TAP) {
530 		if (!virtio_net__tap_init(ndev))
531 			die_perror("TAP device initialized failed because");
532 
533 		if (ndev->vhost_fd &&
534 				virtio_net__vhost_set_features(ndev) != 0)
535 			die_perror("VHOST_SET_FEATURES failed");
536 	} else {
537 		ndev->info.vnet_hdr_len = virtio_net_hdr_len(ndev);
538 		uip_init(&ndev->info);
539 	}
540 }
541 
542 static void virtio_net_stop(struct net_dev *ndev)
543 {
544 	/* Undo whatever start() did */
545 	if (ndev->mode == NET_MODE_TAP)
546 		virtio_net__tap_exit(ndev);
547 	else
548 		uip_exit(&ndev->info);
549 }
550 
551 static void virtio_net_update_endian(struct net_dev *ndev)
552 {
553 	struct virtio_net_config *conf = &ndev->config;
554 
555 	conf->status = virtio_host_to_guest_u16(&ndev->vdev,
556 						VIRTIO_NET_S_LINK_UP);
557 	conf->max_virtqueue_pairs = virtio_host_to_guest_u16(&ndev->vdev,
558 							     ndev->queue_pairs);
559 
560 	/* Let TAP know about vnet header endianness */
561 	if (ndev->mode == NET_MODE_TAP &&
562 	    ndev->vdev.endian != VIRTIO_ENDIAN_HOST) {
563 		int enable_val = 1, disable_val = 0;
564 		int enable_req, disable_req;
565 
566 		if (ndev->vdev.endian == VIRTIO_ENDIAN_LE) {
567 			enable_req = TUNSETVNETLE;
568 			disable_req = TUNSETVNETBE;
569 		} else {
570 			enable_req = TUNSETVNETBE;
571 			disable_req = TUNSETVNETLE;
572 		}
573 
574 		ioctl(ndev->tap_fd, disable_req, &disable_val);
575 		if (ioctl(ndev->tap_fd, enable_req, &enable_val) < 0)
576 			pr_err("Config tap device TUNSETVNETLE/BE error");
577 	}
578 }
579 
580 static void notify_status(struct kvm *kvm, void *dev, u32 status)
581 {
582 	struct net_dev *ndev = dev;
583 
584 	if (status & VIRTIO__STATUS_CONFIG)
585 		virtio_net_update_endian(ndev);
586 
587 	if (status & VIRTIO__STATUS_START)
588 		virtio_net_start(dev);
589 	else if (status & VIRTIO__STATUS_STOP)
590 		virtio_net_stop(dev);
591 }
592 
593 static bool is_ctrl_vq(struct net_dev *ndev, u32 vq)
594 {
595 	return vq == (u32)(ndev->queue_pairs * 2);
596 }
597 
598 static int init_vq(struct kvm *kvm, void *dev, u32 vq)
599 {
600 	struct vhost_vring_state state = { .index = vq };
601 	struct net_dev_queue *net_queue;
602 	struct vhost_vring_addr addr;
603 	struct net_dev *ndev = dev;
604 	struct virt_queue *queue;
605 	int r;
606 
607 	compat__remove_message(compat_id);
608 
609 	net_queue	= &ndev->queues[vq];
610 	net_queue->id	= vq;
611 	net_queue->ndev	= ndev;
612 	queue		= &net_queue->vq;
613 	virtio_init_device_vq(kvm, &ndev->vdev, queue, VIRTIO_NET_QUEUE_SIZE);
614 
615 	mutex_init(&net_queue->lock);
616 	pthread_cond_init(&net_queue->cond, NULL);
617 	if (is_ctrl_vq(ndev, vq)) {
618 		pthread_create(&net_queue->thread, NULL, virtio_net_ctrl_thread,
619 			       net_queue);
620 
621 		return 0;
622 	} else if (ndev->vhost_fd == 0 ) {
623 		if (vq & 1)
624 			pthread_create(&net_queue->thread, NULL,
625 				       virtio_net_tx_thread, net_queue);
626 		else
627 			pthread_create(&net_queue->thread, NULL,
628 				       virtio_net_rx_thread, net_queue);
629 
630 		return 0;
631 	}
632 
633 	if (queue->endian != VIRTIO_ENDIAN_HOST)
634 		die_perror("VHOST requires the same endianness in guest and host");
635 
636 	state.num = queue->vring.num;
637 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_NUM, &state);
638 	if (r < 0)
639 		die_perror("VHOST_SET_VRING_NUM failed");
640 	state.num = 0;
641 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_BASE, &state);
642 	if (r < 0)
643 		die_perror("VHOST_SET_VRING_BASE failed");
644 
645 	addr = (struct vhost_vring_addr) {
646 		.index = vq,
647 		.desc_user_addr = (u64)(unsigned long)queue->vring.desc,
648 		.avail_user_addr = (u64)(unsigned long)queue->vring.avail,
649 		.used_user_addr = (u64)(unsigned long)queue->vring.used,
650 	};
651 
652 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_ADDR, &addr);
653 	if (r < 0)
654 		die_perror("VHOST_SET_VRING_ADDR failed");
655 
656 	return 0;
657 }
658 
659 static void exit_vq(struct kvm *kvm, void *dev, u32 vq)
660 {
661 	struct net_dev *ndev = dev;
662 	struct net_dev_queue *queue = &ndev->queues[vq];
663 
664 	if (!is_ctrl_vq(ndev, vq) && queue->gsi) {
665 		irq__del_irqfd(kvm, queue->gsi, queue->irqfd);
666 		close(queue->irqfd);
667 		queue->gsi = queue->irqfd = 0;
668 	}
669 
670 	/*
671 	 * TODO: vhost reset owner. It's the only way to cleanly stop vhost, but
672 	 * we can't restart it at the moment.
673 	 */
674 	if (ndev->vhost_fd && !is_ctrl_vq(ndev, vq)) {
675 		pr_warning("Cannot reset VHOST queue");
676 		ioctl(ndev->vhost_fd, VHOST_RESET_OWNER);
677 		return;
678 	}
679 
680 	/*
681 	 * Threads are waiting on cancellation points (readv or
682 	 * pthread_cond_wait) and should stop gracefully.
683 	 */
684 	pthread_cancel(queue->thread);
685 	pthread_join(queue->thread, NULL);
686 }
687 
688 static void notify_vq_gsi(struct kvm *kvm, void *dev, u32 vq, u32 gsi)
689 {
690 	struct net_dev *ndev = dev;
691 	struct net_dev_queue *queue = &ndev->queues[vq];
692 	struct vhost_vring_file file;
693 	int r;
694 
695 	if (ndev->vhost_fd == 0)
696 		return;
697 
698 	file = (struct vhost_vring_file) {
699 		.index	= vq,
700 		.fd	= eventfd(0, 0),
701 	};
702 
703 	r = irq__add_irqfd(kvm, gsi, file.fd, -1);
704 	if (r < 0)
705 		die_perror("KVM_IRQFD failed");
706 
707 	queue->irqfd = file.fd;
708 	queue->gsi = gsi;
709 
710 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_CALL, &file);
711 	if (r < 0)
712 		die_perror("VHOST_SET_VRING_CALL failed");
713 	file.fd = ndev->tap_fd;
714 	r = ioctl(ndev->vhost_fd, VHOST_NET_SET_BACKEND, &file);
715 	if (r != 0)
716 		die("VHOST_NET_SET_BACKEND failed %d", errno);
717 
718 }
719 
720 static void notify_vq_eventfd(struct kvm *kvm, void *dev, u32 vq, u32 efd)
721 {
722 	struct net_dev *ndev = dev;
723 	struct vhost_vring_file file = {
724 		.index	= vq,
725 		.fd	= efd,
726 	};
727 	int r;
728 
729 	if (ndev->vhost_fd == 0 || is_ctrl_vq(ndev, vq))
730 		return;
731 
732 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_KICK, &file);
733 	if (r < 0)
734 		die_perror("VHOST_SET_VRING_KICK failed");
735 }
736 
737 static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
738 {
739 	struct net_dev *ndev = dev;
740 
741 	virtio_net_handle_callback(kvm, ndev, vq);
742 
743 	return 0;
744 }
745 
746 static struct virt_queue *get_vq(struct kvm *kvm, void *dev, u32 vq)
747 {
748 	struct net_dev *ndev = dev;
749 
750 	return &ndev->queues[vq].vq;
751 }
752 
753 static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
754 {
755 	/* FIXME: dynamic */
756 	return VIRTIO_NET_QUEUE_SIZE;
757 }
758 
759 static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
760 {
761 	/* FIXME: dynamic */
762 	return size;
763 }
764 
765 static unsigned int get_vq_count(struct kvm *kvm, void *dev)
766 {
767 	struct net_dev *ndev = dev;
768 
769 	return ndev->queue_pairs * 2 + 1;
770 }
771 
772 static struct virtio_ops net_dev_virtio_ops = {
773 	.get_config		= get_config,
774 	.get_config_size	= get_config_size,
775 	.get_host_features	= get_host_features,
776 	.get_vq_count		= get_vq_count,
777 	.init_vq		= init_vq,
778 	.exit_vq		= exit_vq,
779 	.get_vq			= get_vq,
780 	.get_size_vq		= get_size_vq,
781 	.set_size_vq		= set_size_vq,
782 	.notify_vq		= notify_vq,
783 	.notify_vq_gsi		= notify_vq_gsi,
784 	.notify_vq_eventfd	= notify_vq_eventfd,
785 	.notify_status		= notify_status,
786 };
787 
788 static void virtio_net__vhost_init(struct kvm *kvm, struct net_dev *ndev)
789 {
790 	struct kvm_mem_bank *bank;
791 	struct vhost_memory *mem;
792 	int r, i;
793 
794 	ndev->vhost_fd = open("/dev/vhost-net", O_RDWR);
795 	if (ndev->vhost_fd < 0)
796 		die_perror("Failed openning vhost-net device");
797 
798 	mem = calloc(1, sizeof(*mem) + kvm->mem_slots * sizeof(struct vhost_memory_region));
799 	if (mem == NULL)
800 		die("Failed allocating memory for vhost memory map");
801 
802 	i = 0;
803 	list_for_each_entry(bank, &kvm->mem_banks, list) {
804 		mem->regions[i] = (struct vhost_memory_region) {
805 			.guest_phys_addr = bank->guest_phys_addr,
806 			.memory_size	 = bank->size,
807 			.userspace_addr	 = (unsigned long)bank->host_addr,
808 		};
809 		i++;
810 	}
811 	mem->nregions = i;
812 
813 	r = ioctl(ndev->vhost_fd, VHOST_SET_OWNER);
814 	if (r != 0)
815 		die_perror("VHOST_SET_OWNER failed");
816 
817 	r = ioctl(ndev->vhost_fd, VHOST_SET_MEM_TABLE, mem);
818 	if (r != 0)
819 		die_perror("VHOST_SET_MEM_TABLE failed");
820 
821 	ndev->vdev.use_vhost = true;
822 
823 	free(mem);
824 }
825 
826 static inline void str_to_mac(const char *str, char *mac)
827 {
828 	sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
829 		mac, mac+1, mac+2, mac+3, mac+4, mac+5);
830 }
831 static int set_net_param(struct kvm *kvm, struct virtio_net_params *p,
832 			const char *param, const char *val)
833 {
834 	if (strcmp(param, "guest_mac") == 0) {
835 		str_to_mac(val, p->guest_mac);
836 	} else if (strcmp(param, "mode") == 0) {
837 		if (!strncmp(val, "user", 4)) {
838 			int i;
839 
840 			for (i = 0; i < kvm->cfg.num_net_devices; i++)
841 				if (kvm->cfg.net_params[i].mode == NET_MODE_USER)
842 					die("Only one usermode network device allowed at a time");
843 			p->mode = NET_MODE_USER;
844 		} else if (!strncmp(val, "tap", 3)) {
845 			p->mode = NET_MODE_TAP;
846 		} else if (!strncmp(val, "none", 4)) {
847 			kvm->cfg.no_net = 1;
848 			return -1;
849 		} else
850 			die("Unknown network mode %s, please use user, tap or none", kvm->cfg.network);
851 	} else if (strcmp(param, "script") == 0) {
852 		p->script = strdup(val);
853 	} else if (strcmp(param, "downscript") == 0) {
854 		p->downscript = strdup(val);
855 	} else if (strcmp(param, "guest_ip") == 0) {
856 		p->guest_ip = strdup(val);
857 	} else if (strcmp(param, "host_ip") == 0) {
858 		p->host_ip = strdup(val);
859 	} else if (strcmp(param, "trans") == 0) {
860 		p->trans = strdup(val);
861 	} else if (strcmp(param, "tapif") == 0) {
862 		p->tapif = strdup(val);
863 	} else if (strcmp(param, "vhost") == 0) {
864 		p->vhost = atoi(val);
865 	} else if (strcmp(param, "fd") == 0) {
866 		p->fd = atoi(val);
867 	} else if (strcmp(param, "mq") == 0) {
868 		p->mq = atoi(val);
869 	} else
870 		die("Unknown network parameter %s", param);
871 
872 	return 0;
873 }
874 
875 int netdev_parser(const struct option *opt, const char *arg, int unset)
876 {
877 	struct virtio_net_params p;
878 	char *buf = NULL, *cmd = NULL, *cur = NULL;
879 	bool on_cmd = true;
880 	struct kvm *kvm = opt->ptr;
881 
882 	if (arg) {
883 		buf = strdup(arg);
884 		if (buf == NULL)
885 			die("Failed allocating new net buffer");
886 		cur = strtok(buf, ",=");
887 	}
888 
889 	p = (struct virtio_net_params) {
890 		.guest_ip	= DEFAULT_GUEST_ADDR,
891 		.host_ip	= DEFAULT_HOST_ADDR,
892 		.script		= DEFAULT_SCRIPT,
893 		.downscript	= DEFAULT_SCRIPT,
894 		.mode		= NET_MODE_TAP,
895 	};
896 
897 	str_to_mac(DEFAULT_GUEST_MAC, p.guest_mac);
898 	p.guest_mac[5] += kvm->cfg.num_net_devices;
899 
900 	while (cur) {
901 		if (on_cmd) {
902 			cmd = cur;
903 		} else {
904 			if (set_net_param(kvm, &p, cmd, cur) < 0)
905 				goto done;
906 		}
907 		on_cmd = !on_cmd;
908 
909 		cur = strtok(NULL, ",=");
910 	};
911 
912 	kvm->cfg.num_net_devices++;
913 
914 	kvm->cfg.net_params = realloc(kvm->cfg.net_params, kvm->cfg.num_net_devices * sizeof(*kvm->cfg.net_params));
915 	if (kvm->cfg.net_params == NULL)
916 		die("Failed adding new network device");
917 
918 	kvm->cfg.net_params[kvm->cfg.num_net_devices - 1] = p;
919 
920 done:
921 	free(buf);
922 	return 0;
923 }
924 
925 static int virtio_net__init_one(struct virtio_net_params *params)
926 {
927 	int i, r;
928 	struct net_dev *ndev;
929 	struct virtio_ops *ops;
930 	enum virtio_trans trans = VIRTIO_DEFAULT_TRANS(params->kvm);
931 
932 	ndev = calloc(1, sizeof(struct net_dev));
933 	if (ndev == NULL)
934 		return -ENOMEM;
935 
936 	list_add_tail(&ndev->list, &ndevs);
937 
938 	ops = malloc(sizeof(*ops));
939 	if (ops == NULL)
940 		return -ENOMEM;
941 
942 	ndev->kvm = params->kvm;
943 	ndev->params = params;
944 
945 	mutex_init(&ndev->mutex);
946 	ndev->queue_pairs = max(1, min(VIRTIO_NET_NUM_QUEUES, params->mq));
947 
948 	for (i = 0 ; i < 6 ; i++) {
949 		ndev->config.mac[i]		= params->guest_mac[i];
950 		ndev->info.guest_mac.addr[i]	= params->guest_mac[i];
951 		ndev->info.host_mac.addr[i]	= params->host_mac[i];
952 	}
953 
954 	ndev->mode = params->mode;
955 	if (ndev->mode == NET_MODE_TAP) {
956 		ndev->ops = &tap_ops;
957 		if (!virtio_net__tap_create(ndev))
958 			die_perror("You have requested a TAP device, but creation of one has failed because");
959 	} else {
960 		ndev->info.host_ip		= ntohl(inet_addr(params->host_ip));
961 		ndev->info.guest_ip		= ntohl(inet_addr(params->guest_ip));
962 		ndev->info.guest_netmask	= ntohl(inet_addr("255.255.255.0"));
963 		ndev->info.buf_nr		= 20,
964 		ndev->ops = &uip_ops;
965 		uip_static_init(&ndev->info);
966 	}
967 
968 	*ops = net_dev_virtio_ops;
969 
970 	if (params->trans) {
971 		if (strcmp(params->trans, "mmio") == 0)
972 			trans = VIRTIO_MMIO;
973 		else if (strcmp(params->trans, "pci") == 0)
974 			trans = VIRTIO_PCI;
975 		else
976 			pr_warning("virtio-net: Unknown transport method : %s, "
977 				   "falling back to %s.", params->trans,
978 				   virtio_trans_name(trans));
979 	}
980 
981 	r = virtio_init(params->kvm, ndev, &ndev->vdev, ops, trans,
982 			PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET, PCI_CLASS_NET);
983 	if (r < 0) {
984 		free(ops);
985 		return r;
986 	}
987 
988 	if (params->vhost)
989 		virtio_net__vhost_init(params->kvm, ndev);
990 
991 	if (compat_id == -1)
992 		compat_id = virtio_compat_add_message("virtio-net", "CONFIG_VIRTIO_NET");
993 
994 	return 0;
995 }
996 
997 int virtio_net__init(struct kvm *kvm)
998 {
999 	int i, r;
1000 
1001 	for (i = 0; i < kvm->cfg.num_net_devices; i++) {
1002 		kvm->cfg.net_params[i].kvm = kvm;
1003 		r = virtio_net__init_one(&kvm->cfg.net_params[i]);
1004 		if (r < 0)
1005 			goto cleanup;
1006 	}
1007 
1008 	if (kvm->cfg.num_net_devices == 0 && kvm->cfg.no_net == 0) {
1009 		static struct virtio_net_params net_params;
1010 
1011 		net_params = (struct virtio_net_params) {
1012 			.guest_ip	= kvm->cfg.guest_ip,
1013 			.host_ip	= kvm->cfg.host_ip,
1014 			.kvm		= kvm,
1015 			.script		= kvm->cfg.script,
1016 			.mode		= NET_MODE_USER,
1017 		};
1018 		str_to_mac(kvm->cfg.guest_mac, net_params.guest_mac);
1019 		str_to_mac(kvm->cfg.host_mac, net_params.host_mac);
1020 
1021 		r = virtio_net__init_one(&net_params);
1022 		if (r < 0)
1023 			goto cleanup;
1024 	}
1025 
1026 	return 0;
1027 
1028 cleanup:
1029 	virtio_net__exit(kvm);
1030 	return r;
1031 }
1032 virtio_dev_init(virtio_net__init);
1033 
1034 int virtio_net__exit(struct kvm *kvm)
1035 {
1036 	struct virtio_net_params *params;
1037 	struct net_dev *ndev;
1038 	struct list_head *ptr, *n;
1039 
1040 	list_for_each_safe(ptr, n, &ndevs) {
1041 		ndev = list_entry(ptr, struct net_dev, list);
1042 		params = ndev->params;
1043 		/* Cleanup any tap device which attached to bridge */
1044 		if (ndev->mode == NET_MODE_TAP &&
1045 		    strcmp(params->downscript, "none"))
1046 			virtio_net_exec_script(params->downscript, ndev->tap_name);
1047 
1048 		list_del(&ndev->list);
1049 		free(ndev);
1050 	}
1051 	return 0;
1052 }
1053 virtio_dev_exit(virtio_net__exit);
1054