xref: /kvmtool/virtio/net.c (revision 9e56ec141326bfdd20ca82e8f92c493e3ecef006)
1 #include "kvm/virtio-pci-dev.h"
2 #include "kvm/virtio-net.h"
3 #include "kvm/virtio.h"
4 #include "kvm/types.h"
5 #include "kvm/mutex.h"
6 #include "kvm/util.h"
7 #include "kvm/kvm.h"
8 #include "kvm/irq.h"
9 #include "kvm/uip.h"
10 #include "kvm/guest_compat.h"
11 
12 #include <linux/vhost.h>
13 #include <linux/virtio_net.h>
14 #include <linux/if_tun.h>
15 #include <linux/types.h>
16 
17 #include <arpa/inet.h>
18 #include <net/if.h>
19 
20 #include <unistd.h>
21 #include <fcntl.h>
22 
23 #include <sys/socket.h>
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <sys/eventfd.h>
28 
29 #define VIRTIO_NET_QUEUE_SIZE		256
30 #define VIRTIO_NET_NUM_QUEUES		8
31 
32 struct net_dev;
33 
34 struct net_dev_operations {
35 	int (*rx)(struct iovec *iov, u16 in, struct net_dev *ndev);
36 	int (*tx)(struct iovec *iov, u16 in, struct net_dev *ndev);
37 };
38 
39 struct net_dev {
40 	struct mutex			mutex;
41 	struct virtio_device		vdev;
42 	struct list_head		list;
43 
44 	struct virt_queue		vqs[VIRTIO_NET_NUM_QUEUES * 2 + 1];
45 	struct virtio_net_config	config;
46 	u32				features, rx_vqs, tx_vqs, queue_pairs;
47 
48 	pthread_t			io_thread[VIRTIO_NET_NUM_QUEUES * 2 + 1];
49 	struct mutex			io_lock[VIRTIO_NET_NUM_QUEUES * 2 + 1];
50 	pthread_cond_t			io_cond[VIRTIO_NET_NUM_QUEUES * 2 + 1];
51 
52 	int				vhost_fd;
53 	int				tap_fd;
54 	char				tap_name[IFNAMSIZ];
55 
56 	int				mode;
57 
58 	struct uip_info			info;
59 	struct net_dev_operations	*ops;
60 	struct kvm			*kvm;
61 };
62 
63 static LIST_HEAD(ndevs);
64 static int compat_id = -1;
65 
66 static void *virtio_net_rx_thread(void *p)
67 {
68 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
69 	struct virt_queue *vq;
70 	struct kvm *kvm;
71 	struct net_dev *ndev = p;
72 	u16 out, in;
73 	u16 head;
74 	int len;
75 	u32 id;
76 
77 	mutex_lock(&ndev->mutex);
78 	id = ndev->rx_vqs++ * 2;
79 	mutex_unlock(&ndev->mutex);
80 
81 	kvm__set_thread_name("virtio-net-rx");
82 
83 	kvm = ndev->kvm;
84 	vq = &ndev->vqs[id];
85 
86 	while (1) {
87 		mutex_lock(&ndev->io_lock[id]);
88 		if (!virt_queue__available(vq))
89 			pthread_cond_wait(&ndev->io_cond[id], &ndev->io_lock[id].mutex);
90 		mutex_unlock(&ndev->io_lock[id]);
91 
92 		while (virt_queue__available(vq)) {
93 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
94 			len = ndev->ops->rx(iov, in, ndev);
95 			virt_queue__set_used_elem(vq, head, len);
96 
97 			/* We should interrupt guest right now, otherwise latency is huge. */
98 			if (virtio_queue__should_signal(vq))
99 				ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id);
100 		}
101 	}
102 
103 	pthread_exit(NULL);
104 	return NULL;
105 
106 }
107 
108 static void *virtio_net_tx_thread(void *p)
109 {
110 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
111 	struct virt_queue *vq;
112 	struct kvm *kvm;
113 	struct net_dev *ndev = p;
114 	u16 out, in;
115 	u16 head;
116 	int len;
117 	u32 id;
118 
119 	mutex_lock(&ndev->mutex);
120 	id = ndev->tx_vqs++ * 2 + 1;
121 	mutex_unlock(&ndev->mutex);
122 
123 	kvm__set_thread_name("virtio-net-tx");
124 
125 	kvm = ndev->kvm;
126 	vq = &ndev->vqs[id];
127 
128 	while (1) {
129 		mutex_lock(&ndev->io_lock[id]);
130 		if (!virt_queue__available(vq))
131 			pthread_cond_wait(&ndev->io_cond[id], &ndev->io_lock[id].mutex);
132 		mutex_unlock(&ndev->io_lock[id]);
133 
134 		while (virt_queue__available(vq)) {
135 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
136 			len = ndev->ops->tx(iov, out, ndev);
137 			virt_queue__set_used_elem(vq, head, len);
138 		}
139 
140 		if (virtio_queue__should_signal(vq))
141 			ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id);
142 	}
143 
144 	pthread_exit(NULL);
145 
146 	return NULL;
147 
148 }
149 
150 static virtio_net_ctrl_ack virtio_net_handle_mq(struct kvm* kvm, struct net_dev *ndev, struct virtio_net_ctrl_hdr *ctrl)
151 {
152 	/* Not much to do here */
153 	return VIRTIO_NET_OK;
154 }
155 
156 static void *virtio_net_ctrl_thread(void *p)
157 {
158 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
159 	u16 out, in, head;
160 	struct net_dev *ndev = p;
161 	struct kvm *kvm = ndev->kvm;
162 	u32 id = ndev->queue_pairs * 2;
163 	struct virt_queue *vq = &ndev->vqs[id];
164 	struct virtio_net_ctrl_hdr *ctrl;
165 	virtio_net_ctrl_ack *ack;
166 
167 	while (1) {
168 		mutex_lock(&ndev->io_lock[id]);
169 		if (!virt_queue__available(vq))
170 			pthread_cond_wait(&ndev->io_cond[id], &ndev->io_lock[id].mutex);
171 		mutex_unlock(&ndev->io_lock[id]);
172 
173 		while (virt_queue__available(vq)) {
174 			head = virt_queue__get_iov(&ndev->vqs[id], iov, &out, &in, kvm);
175 			ctrl = iov[0].iov_base;
176 			ack = iov[out].iov_base;
177 
178 			switch (ctrl->class) {
179 			case VIRTIO_NET_CTRL_MQ:
180 				*ack = virtio_net_handle_mq(kvm, ndev, ctrl);
181 				break;
182 			default:
183 				*ack = VIRTIO_NET_ERR;
184 				break;
185 			}
186 			virt_queue__set_used_elem(&ndev->vqs[id], head, iov[out].iov_len);
187 		}
188 
189 		if (virtio_queue__should_signal(&ndev->vqs[id]))
190 			ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id);
191 	}
192 
193 	pthread_exit(NULL);
194 
195 	return NULL;
196 }
197 
198 static void virtio_net_handle_callback(struct kvm *kvm, struct net_dev *ndev, int queue)
199 {
200 	if ((u32)queue >= (ndev->queue_pairs * 2 + 1)) {
201 		pr_warning("Unknown queue index %u", queue);
202 		return;
203 	}
204 
205 	mutex_lock(&ndev->io_lock[queue]);
206 	pthread_cond_signal(&ndev->io_cond[queue]);
207 	mutex_unlock(&ndev->io_lock[queue]);
208 }
209 
210 static bool virtio_net__tap_init(const struct virtio_net_params *params,
211 					struct net_dev *ndev)
212 {
213 	int sock = socket(AF_INET, SOCK_STREAM, 0);
214 	int pid, status, offload, hdr_len;
215 	struct sockaddr_in sin = {0};
216 	struct ifreq ifr;
217 
218 	/* Did the user already gave us the FD? */
219 	if (params->fd) {
220 		ndev->tap_fd = params->fd;
221 		return 1;
222 	}
223 
224 	ndev->tap_fd = open("/dev/net/tun", O_RDWR);
225 	if (ndev->tap_fd < 0) {
226 		pr_warning("Unable to open /dev/net/tun");
227 		goto fail;
228 	}
229 
230 	memset(&ifr, 0, sizeof(ifr));
231 	ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
232 	if (ioctl(ndev->tap_fd, TUNSETIFF, &ifr) < 0) {
233 		pr_warning("Config tap device error. Are you root?");
234 		goto fail;
235 	}
236 
237 	strncpy(ndev->tap_name, ifr.ifr_name, sizeof(ndev->tap_name));
238 
239 	if (ioctl(ndev->tap_fd, TUNSETNOCSUM, 1) < 0) {
240 		pr_warning("Config tap device TUNSETNOCSUM error");
241 		goto fail;
242 	}
243 
244 	hdr_len = sizeof(struct virtio_net_hdr);
245 	if (ioctl(ndev->tap_fd, TUNSETVNETHDRSZ, &hdr_len) < 0)
246 		pr_warning("Config tap device TUNSETVNETHDRSZ error");
247 
248 	offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_UFO;
249 	if (ioctl(ndev->tap_fd, TUNSETOFFLOAD, offload) < 0) {
250 		pr_warning("Config tap device TUNSETOFFLOAD error");
251 		goto fail;
252 	}
253 
254 	if (strcmp(params->script, "none")) {
255 		pid = fork();
256 		if (pid == 0) {
257 			execl(params->script, params->script, ndev->tap_name, NULL);
258 			_exit(1);
259 		} else {
260 			waitpid(pid, &status, 0);
261 			if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
262 				pr_warning("Fail to setup tap by %s", params->script);
263 				goto fail;
264 			}
265 		}
266 	} else {
267 		memset(&ifr, 0, sizeof(ifr));
268 		strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ndev->tap_name));
269 		sin.sin_addr.s_addr = inet_addr(params->host_ip);
270 		memcpy(&(ifr.ifr_addr), &sin, sizeof(ifr.ifr_addr));
271 		ifr.ifr_addr.sa_family = AF_INET;
272 		if (ioctl(sock, SIOCSIFADDR, &ifr) < 0) {
273 			pr_warning("Could not set ip address on tap device");
274 			goto fail;
275 		}
276 	}
277 
278 	memset(&ifr, 0, sizeof(ifr));
279 	strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ndev->tap_name));
280 	ioctl(sock, SIOCGIFFLAGS, &ifr);
281 	ifr.ifr_flags |= IFF_UP | IFF_RUNNING;
282 	if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0)
283 		pr_warning("Could not bring tap device up");
284 
285 	close(sock);
286 
287 	return 1;
288 
289 fail:
290 	if (sock >= 0)
291 		close(sock);
292 	if (ndev->tap_fd >= 0)
293 		close(ndev->tap_fd);
294 
295 	return 0;
296 }
297 
298 static inline int tap_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev)
299 {
300 	return writev(ndev->tap_fd, iov, out);
301 }
302 
303 static inline int tap_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev)
304 {
305 	return readv(ndev->tap_fd, iov, in);
306 }
307 
308 static inline int uip_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev)
309 {
310 	return uip_tx(iov, out, &ndev->info);
311 }
312 
313 static inline int uip_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev)
314 {
315 	return uip_rx(iov, in, &ndev->info);
316 }
317 
318 static struct net_dev_operations tap_ops = {
319 	.rx	= tap_ops_rx,
320 	.tx	= tap_ops_tx,
321 };
322 
323 static struct net_dev_operations uip_ops = {
324 	.rx	= uip_ops_rx,
325 	.tx	= uip_ops_tx,
326 };
327 
328 static u8 *get_config(struct kvm *kvm, void *dev)
329 {
330 	struct net_dev *ndev = dev;
331 
332 	return ((u8 *)(&ndev->config));
333 }
334 
335 static u32 get_host_features(struct kvm *kvm, void *dev)
336 {
337 	struct net_dev *ndev = dev;
338 
339 	return 1UL << VIRTIO_NET_F_MAC
340 		| 1UL << VIRTIO_NET_F_CSUM
341 		| 1UL << VIRTIO_NET_F_HOST_UFO
342 		| 1UL << VIRTIO_NET_F_HOST_TSO4
343 		| 1UL << VIRTIO_NET_F_HOST_TSO6
344 		| 1UL << VIRTIO_NET_F_GUEST_UFO
345 		| 1UL << VIRTIO_NET_F_GUEST_TSO4
346 		| 1UL << VIRTIO_NET_F_GUEST_TSO6
347 		| 1UL << VIRTIO_RING_F_EVENT_IDX
348 		| 1UL << VIRTIO_RING_F_INDIRECT_DESC
349 		| 1UL << VIRTIO_NET_F_CTRL_VQ
350 		| 1UL << (ndev->queue_pairs > 1 ? VIRTIO_NET_F_MQ : 0);
351 }
352 
353 static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
354 {
355 	struct net_dev *ndev = dev;
356 
357 	ndev->features = features;
358 }
359 
360 static bool is_ctrl_vq(struct net_dev *ndev, u32 vq)
361 {
362 	return vq == (u32)(ndev->queue_pairs * 2);
363 }
364 
365 static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
366 		   u32 pfn)
367 {
368 	struct vhost_vring_state state = { .index = vq };
369 	struct vhost_vring_addr addr;
370 	struct net_dev *ndev = dev;
371 	struct virt_queue *queue;
372 	void *p;
373 	int r;
374 
375 	compat__remove_message(compat_id);
376 
377 	queue		= &ndev->vqs[vq];
378 	queue->pfn	= pfn;
379 	p		= guest_flat_to_host(kvm, queue->pfn * page_size);
380 
381 	vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, align);
382 
383 	mutex_init(&ndev->io_lock[vq]);
384 	pthread_cond_init(&ndev->io_cond[vq], NULL);
385 	if (is_ctrl_vq(ndev, vq)) {
386 		pthread_create(&ndev->io_thread[vq], NULL, virtio_net_ctrl_thread, ndev);
387 
388 		return 0;
389 	} else if (ndev->vhost_fd == 0 ) {
390 		if (vq & 1)
391 			pthread_create(&ndev->io_thread[vq], NULL, virtio_net_tx_thread, ndev);
392 		else
393 			pthread_create(&ndev->io_thread[vq], NULL, virtio_net_rx_thread, ndev);
394 
395 		return 0;
396 	}
397 
398 	state.num = queue->vring.num;
399 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_NUM, &state);
400 	if (r < 0)
401 		die_perror("VHOST_SET_VRING_NUM failed");
402 	state.num = 0;
403 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_BASE, &state);
404 	if (r < 0)
405 		die_perror("VHOST_SET_VRING_BASE failed");
406 
407 	addr = (struct vhost_vring_addr) {
408 		.index = vq,
409 		.desc_user_addr = (u64)(unsigned long)queue->vring.desc,
410 		.avail_user_addr = (u64)(unsigned long)queue->vring.avail,
411 		.used_user_addr = (u64)(unsigned long)queue->vring.used,
412 	};
413 
414 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_ADDR, &addr);
415 	if (r < 0)
416 		die_perror("VHOST_SET_VRING_ADDR failed");
417 
418 	return 0;
419 }
420 
421 static void notify_vq_gsi(struct kvm *kvm, void *dev, u32 vq, u32 gsi)
422 {
423 	struct net_dev *ndev = dev;
424 	struct kvm_irqfd irq;
425 	struct vhost_vring_file file;
426 	int r;
427 
428 	if (ndev->vhost_fd == 0)
429 		return;
430 
431 	irq = (struct kvm_irqfd) {
432 		.gsi	= gsi,
433 		.fd	= eventfd(0, 0),
434 	};
435 	file = (struct vhost_vring_file) {
436 		.index	= vq,
437 		.fd	= irq.fd,
438 	};
439 
440 	r = ioctl(kvm->vm_fd, KVM_IRQFD, &irq);
441 	if (r < 0)
442 		die_perror("KVM_IRQFD failed");
443 
444 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_CALL, &file);
445 	if (r < 0)
446 		die_perror("VHOST_SET_VRING_CALL failed");
447 	file.fd = ndev->tap_fd;
448 	r = ioctl(ndev->vhost_fd, VHOST_NET_SET_BACKEND, &file);
449 	if (r != 0)
450 		die("VHOST_NET_SET_BACKEND failed %d", errno);
451 
452 }
453 
454 static void notify_vq_eventfd(struct kvm *kvm, void *dev, u32 vq, u32 efd)
455 {
456 	struct net_dev *ndev = dev;
457 	struct vhost_vring_file file = {
458 		.index	= vq,
459 		.fd	= efd,
460 	};
461 	int r;
462 
463 	if (ndev->vhost_fd == 0 || is_ctrl_vq(ndev, vq))
464 		return;
465 
466 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_KICK, &file);
467 	if (r < 0)
468 		die_perror("VHOST_SET_VRING_KICK failed");
469 }
470 
471 static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
472 {
473 	struct net_dev *ndev = dev;
474 
475 	virtio_net_handle_callback(kvm, ndev, vq);
476 
477 	return 0;
478 }
479 
480 static int get_pfn_vq(struct kvm *kvm, void *dev, u32 vq)
481 {
482 	struct net_dev *ndev = dev;
483 
484 	return ndev->vqs[vq].pfn;
485 }
486 
487 static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
488 {
489 	/* FIXME: dynamic */
490 	return VIRTIO_NET_QUEUE_SIZE;
491 }
492 
493 static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
494 {
495 	/* FIXME: dynamic */
496 	return size;
497 }
498 
499 static struct virtio_ops net_dev_virtio_ops = (struct virtio_ops) {
500 	.get_config		= get_config,
501 	.get_host_features	= get_host_features,
502 	.set_guest_features	= set_guest_features,
503 	.init_vq		= init_vq,
504 	.get_pfn_vq		= get_pfn_vq,
505 	.get_size_vq		= get_size_vq,
506 	.set_size_vq		= set_size_vq,
507 	.notify_vq		= notify_vq,
508 	.notify_vq_gsi		= notify_vq_gsi,
509 	.notify_vq_eventfd	= notify_vq_eventfd,
510 };
511 
512 static void virtio_net__vhost_init(struct kvm *kvm, struct net_dev *ndev)
513 {
514 	u64 features = 1UL << VIRTIO_RING_F_EVENT_IDX;
515 	struct vhost_memory *mem;
516 	int r;
517 
518 	ndev->vhost_fd = open("/dev/vhost-net", O_RDWR);
519 	if (ndev->vhost_fd < 0)
520 		die_perror("Failed openning vhost-net device");
521 
522 	mem = calloc(1, sizeof(*mem) + sizeof(struct vhost_memory_region));
523 	if (mem == NULL)
524 		die("Failed allocating memory for vhost memory map");
525 
526 	mem->nregions = 1;
527 	mem->regions[0] = (struct vhost_memory_region) {
528 		.guest_phys_addr	= 0,
529 		.memory_size		= kvm->ram_size,
530 		.userspace_addr		= (unsigned long)kvm->ram_start,
531 	};
532 
533 	r = ioctl(ndev->vhost_fd, VHOST_SET_OWNER);
534 	if (r != 0)
535 		die_perror("VHOST_SET_OWNER failed");
536 
537 	r = ioctl(ndev->vhost_fd, VHOST_SET_FEATURES, &features);
538 	if (r != 0)
539 		die_perror("VHOST_SET_FEATURES failed");
540 	r = ioctl(ndev->vhost_fd, VHOST_SET_MEM_TABLE, mem);
541 	if (r != 0)
542 		die_perror("VHOST_SET_MEM_TABLE failed");
543 
544 	ndev->vdev.use_vhost = true;
545 
546 	free(mem);
547 }
548 
549 static inline void str_to_mac(const char *str, char *mac)
550 {
551 	sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
552 		mac, mac+1, mac+2, mac+3, mac+4, mac+5);
553 }
554 static int set_net_param(struct kvm *kvm, struct virtio_net_params *p,
555 			const char *param, const char *val)
556 {
557 	if (strcmp(param, "guest_mac") == 0) {
558 		str_to_mac(val, p->guest_mac);
559 	} else if (strcmp(param, "mode") == 0) {
560 		if (!strncmp(val, "user", 4)) {
561 			int i;
562 
563 			for (i = 0; i < kvm->cfg.num_net_devices; i++)
564 				if (kvm->cfg.net_params[i].mode == NET_MODE_USER)
565 					die("Only one usermode network device allowed at a time");
566 			p->mode = NET_MODE_USER;
567 		} else if (!strncmp(val, "tap", 3)) {
568 			p->mode = NET_MODE_TAP;
569 		} else if (!strncmp(val, "none", 4)) {
570 			kvm->cfg.no_net = 1;
571 			return -1;
572 		} else
573 			die("Unknown network mode %s, please use user, tap or none", kvm->cfg.network);
574 	} else if (strcmp(param, "script") == 0) {
575 		p->script = strdup(val);
576 	} else if (strcmp(param, "guest_ip") == 0) {
577 		p->guest_ip = strdup(val);
578 	} else if (strcmp(param, "host_ip") == 0) {
579 		p->host_ip = strdup(val);
580 	} else if (strcmp(param, "trans") == 0) {
581 		p->trans = strdup(val);
582 	} else if (strcmp(param, "vhost") == 0) {
583 		p->vhost = atoi(val);
584 	} else if (strcmp(param, "fd") == 0) {
585 		p->fd = atoi(val);
586 	} else if (strcmp(param, "mq") == 0) {
587 		p->mq = atoi(val);
588 	} else
589 		die("Unknown network parameter %s", param);
590 
591 	return 0;
592 }
593 
594 int netdev_parser(const struct option *opt, const char *arg, int unset)
595 {
596 	struct virtio_net_params p;
597 	char *buf = NULL, *cmd = NULL, *cur = NULL;
598 	bool on_cmd = true;
599 	struct kvm *kvm = opt->ptr;
600 
601 	if (arg) {
602 		buf = strdup(arg);
603 		if (buf == NULL)
604 			die("Failed allocating new net buffer");
605 		cur = strtok(buf, ",=");
606 	}
607 
608 	p = (struct virtio_net_params) {
609 		.guest_ip	= DEFAULT_GUEST_ADDR,
610 		.host_ip	= DEFAULT_HOST_ADDR,
611 		.script		= DEFAULT_SCRIPT,
612 		.mode		= NET_MODE_TAP,
613 	};
614 
615 	str_to_mac(DEFAULT_GUEST_MAC, p.guest_mac);
616 	p.guest_mac[5] += kvm->cfg.num_net_devices;
617 
618 	while (cur) {
619 		if (on_cmd) {
620 			cmd = cur;
621 		} else {
622 			if (set_net_param(kvm, &p, cmd, cur) < 0)
623 				goto done;
624 		}
625 		on_cmd = !on_cmd;
626 
627 		cur = strtok(NULL, ",=");
628 	};
629 
630 	kvm->cfg.num_net_devices++;
631 
632 	kvm->cfg.net_params = realloc(kvm->cfg.net_params, kvm->cfg.num_net_devices * sizeof(*kvm->cfg.net_params));
633 	if (kvm->cfg.net_params == NULL)
634 		die("Failed adding new network device");
635 
636 	kvm->cfg.net_params[kvm->cfg.num_net_devices - 1] = p;
637 
638 done:
639 	free(buf);
640 	return 0;
641 }
642 
643 static int virtio_net__init_one(struct virtio_net_params *params)
644 {
645 	int i;
646 	struct net_dev *ndev;
647 
648 	ndev = calloc(1, sizeof(struct net_dev));
649 	if (ndev == NULL)
650 		return -ENOMEM;
651 
652 	list_add_tail(&ndev->list, &ndevs);
653 
654 	ndev->kvm = params->kvm;
655 
656 	mutex_init(&ndev->mutex);
657 	ndev->queue_pairs = max(1, min(VIRTIO_NET_NUM_QUEUES, params->mq));
658 	ndev->config.status = VIRTIO_NET_S_LINK_UP;
659 	if (ndev->queue_pairs > 1)
660 		ndev->config.max_virtqueue_pairs = ndev->queue_pairs;
661 
662 	for (i = 0 ; i < 6 ; i++) {
663 		ndev->config.mac[i]		= params->guest_mac[i];
664 		ndev->info.guest_mac.addr[i]	= params->guest_mac[i];
665 		ndev->info.host_mac.addr[i]	= params->host_mac[i];
666 	}
667 
668 	ndev->mode = params->mode;
669 	if (ndev->mode == NET_MODE_TAP) {
670 		if (!virtio_net__tap_init(params, ndev))
671 			die_perror("You have requested a TAP device, but creation of one has failed because");
672 		ndev->ops = &tap_ops;
673 	} else {
674 		ndev->info.host_ip		= ntohl(inet_addr(params->host_ip));
675 		ndev->info.guest_ip		= ntohl(inet_addr(params->guest_ip));
676 		ndev->info.guest_netmask	= ntohl(inet_addr("255.255.255.0"));
677 		ndev->info.buf_nr		= 20,
678 		ndev->info.vnet_hdr_len		= sizeof(struct virtio_net_hdr);
679 		uip_init(&ndev->info);
680 		ndev->ops = &uip_ops;
681 	}
682 
683 	if (params->trans && strcmp(params->trans, "mmio") == 0)
684 		virtio_init(params->kvm, ndev, &ndev->vdev, &net_dev_virtio_ops,
685 			    VIRTIO_MMIO, PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET, PCI_CLASS_NET);
686 	else
687 		virtio_init(params->kvm, ndev, &ndev->vdev, &net_dev_virtio_ops,
688 			    VIRTIO_PCI, PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET, PCI_CLASS_NET);
689 
690 	if (params->vhost)
691 		virtio_net__vhost_init(params->kvm, ndev);
692 
693 	if (compat_id == -1)
694 		compat_id = virtio_compat_add_message("virtio-net", "CONFIG_VIRTIO_NET");
695 
696 	return 0;
697 }
698 
699 int virtio_net__init(struct kvm *kvm)
700 {
701 	int i;
702 
703 	for (i = 0; i < kvm->cfg.num_net_devices; i++) {
704 		kvm->cfg.net_params[i].kvm = kvm;
705 		virtio_net__init_one(&kvm->cfg.net_params[i]);
706 	}
707 
708 	if (kvm->cfg.num_net_devices == 0 && kvm->cfg.no_net == 0) {
709 		struct virtio_net_params net_params;
710 
711 		net_params = (struct virtio_net_params) {
712 			.guest_ip	= kvm->cfg.guest_ip,
713 			.host_ip	= kvm->cfg.host_ip,
714 			.kvm		= kvm,
715 			.script		= kvm->cfg.script,
716 			.mode		= NET_MODE_USER,
717 		};
718 		str_to_mac(kvm->cfg.guest_mac, net_params.guest_mac);
719 		str_to_mac(kvm->cfg.host_mac, net_params.host_mac);
720 
721 		virtio_net__init_one(&net_params);
722 	}
723 
724 	return 0;
725 }
726 virtio_dev_init(virtio_net__init);
727 
728 int virtio_net__exit(struct kvm *kvm)
729 {
730 	return 0;
731 }
732 virtio_dev_exit(virtio_net__exit);
733