xref: /kvmtool/virtio/net.c (revision 49a8afd1b9a4e503bdafb2bbc04549e03d514836)
1 #include "kvm/virtio-pci-dev.h"
2 #include "kvm/virtio-net.h"
3 #include "kvm/virtio.h"
4 #include "kvm/types.h"
5 #include "kvm/mutex.h"
6 #include "kvm/util.h"
7 #include "kvm/kvm.h"
8 #include "kvm/irq.h"
9 #include "kvm/uip.h"
10 #include "kvm/guest_compat.h"
11 
12 #include <linux/vhost.h>
13 #include <linux/virtio_net.h>
14 #include <linux/if_tun.h>
15 #include <linux/types.h>
16 
17 #include <arpa/inet.h>
18 #include <net/if.h>
19 
20 #include <unistd.h>
21 #include <fcntl.h>
22 
23 #include <sys/socket.h>
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <sys/eventfd.h>
28 
29 #define VIRTIO_NET_QUEUE_SIZE		256
30 #define VIRTIO_NET_NUM_QUEUES		2
31 #define VIRTIO_NET_RX_QUEUE		0
32 #define VIRTIO_NET_TX_QUEUE		1
33 
34 struct net_dev;
35 
36 extern struct kvm *kvm;
37 
38 struct net_dev_operations {
39 	int (*rx)(struct iovec *iov, u16 in, struct net_dev *ndev);
40 	int (*tx)(struct iovec *iov, u16 in, struct net_dev *ndev);
41 };
42 
43 struct net_dev {
44 	pthread_mutex_t			mutex;
45 	struct virtio_device		vdev;
46 	struct list_head		list;
47 
48 	struct virt_queue		vqs[VIRTIO_NET_NUM_QUEUES];
49 	struct virtio_net_config	config;
50 	u32				features;
51 
52 	pthread_t			io_rx_thread;
53 	pthread_mutex_t			io_rx_lock;
54 	pthread_cond_t			io_rx_cond;
55 
56 	pthread_t			io_tx_thread;
57 	pthread_mutex_t			io_tx_lock;
58 	pthread_cond_t			io_tx_cond;
59 
60 	int				vhost_fd;
61 	int				tap_fd;
62 	char				tap_name[IFNAMSIZ];
63 
64 	int				mode;
65 
66 	struct uip_info			info;
67 	struct net_dev_operations	*ops;
68 	struct kvm			*kvm;
69 };
70 
71 static LIST_HEAD(ndevs);
72 static int compat_id = -1;
73 
74 static void *virtio_net_rx_thread(void *p)
75 {
76 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
77 	struct virt_queue *vq;
78 	struct kvm *kvm;
79 	struct net_dev *ndev = p;
80 	u16 out, in;
81 	u16 head;
82 	int len;
83 
84 	kvm = ndev->kvm;
85 	vq = &ndev->vqs[VIRTIO_NET_RX_QUEUE];
86 
87 	while (1) {
88 		mutex_lock(&ndev->io_rx_lock);
89 		if (!virt_queue__available(vq))
90 			pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock);
91 		mutex_unlock(&ndev->io_rx_lock);
92 
93 		while (virt_queue__available(vq)) {
94 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
95 			len = ndev->ops->rx(iov, in, ndev);
96 			virt_queue__set_used_elem(vq, head, len);
97 
98 			/* We should interrupt guest right now, otherwise latency is huge. */
99 			if (virtio_queue__should_signal(&ndev->vqs[VIRTIO_NET_RX_QUEUE]))
100 				ndev->vdev.ops->signal_vq(kvm, &ndev->vdev,
101 							   VIRTIO_NET_RX_QUEUE);
102 		}
103 	}
104 
105 	pthread_exit(NULL);
106 	return NULL;
107 
108 }
109 
110 static void *virtio_net_tx_thread(void *p)
111 {
112 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
113 	struct virt_queue *vq;
114 	struct kvm *kvm;
115 	struct net_dev *ndev = p;
116 	u16 out, in;
117 	u16 head;
118 	int len;
119 
120 	kvm = ndev->kvm;
121 	vq = &ndev->vqs[VIRTIO_NET_TX_QUEUE];
122 
123 	while (1) {
124 		mutex_lock(&ndev->io_tx_lock);
125 		if (!virt_queue__available(vq))
126 			pthread_cond_wait(&ndev->io_tx_cond, &ndev->io_tx_lock);
127 		mutex_unlock(&ndev->io_tx_lock);
128 
129 		while (virt_queue__available(vq)) {
130 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
131 			len = ndev->ops->tx(iov, out, ndev);
132 			virt_queue__set_used_elem(vq, head, len);
133 		}
134 
135 		if (virtio_queue__should_signal(&ndev->vqs[VIRTIO_NET_TX_QUEUE]))
136 			ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, VIRTIO_NET_TX_QUEUE);
137 	}
138 
139 	pthread_exit(NULL);
140 
141 	return NULL;
142 
143 }
144 
145 static void virtio_net_handle_callback(struct kvm *kvm, struct net_dev *ndev, int queue)
146 {
147 	switch (queue) {
148 	case VIRTIO_NET_TX_QUEUE:
149 		mutex_lock(&ndev->io_tx_lock);
150 		pthread_cond_signal(&ndev->io_tx_cond);
151 		mutex_unlock(&ndev->io_tx_lock);
152 		break;
153 	case VIRTIO_NET_RX_QUEUE:
154 		mutex_lock(&ndev->io_rx_lock);
155 		pthread_cond_signal(&ndev->io_rx_cond);
156 		mutex_unlock(&ndev->io_rx_lock);
157 		break;
158 	default:
159 		pr_warning("Unknown queue index %u", queue);
160 	}
161 }
162 
163 static bool virtio_net__tap_init(const struct virtio_net_params *params,
164 					struct net_dev *ndev)
165 {
166 	int sock = socket(AF_INET, SOCK_STREAM, 0);
167 	int pid, status, offload, hdr_len;
168 	struct sockaddr_in sin = {0};
169 	struct ifreq ifr;
170 
171 	/* Did the user already gave us the FD? */
172 	if (params->fd) {
173 		ndev->tap_fd = params->fd;
174 		return 1;
175 	}
176 
177 	ndev->tap_fd = open("/dev/net/tun", O_RDWR);
178 	if (ndev->tap_fd < 0) {
179 		pr_warning("Unable to open /dev/net/tun");
180 		goto fail;
181 	}
182 
183 	memset(&ifr, 0, sizeof(ifr));
184 	ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
185 	if (ioctl(ndev->tap_fd, TUNSETIFF, &ifr) < 0) {
186 		pr_warning("Config tap device error. Are you root?");
187 		goto fail;
188 	}
189 
190 	strncpy(ndev->tap_name, ifr.ifr_name, sizeof(ndev->tap_name));
191 
192 	if (ioctl(ndev->tap_fd, TUNSETNOCSUM, 1) < 0) {
193 		pr_warning("Config tap device TUNSETNOCSUM error");
194 		goto fail;
195 	}
196 
197 	hdr_len = sizeof(struct virtio_net_hdr);
198 	if (ioctl(ndev->tap_fd, TUNSETVNETHDRSZ, &hdr_len) < 0)
199 		pr_warning("Config tap device TUNSETVNETHDRSZ error");
200 
201 	offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_UFO;
202 	if (ioctl(ndev->tap_fd, TUNSETOFFLOAD, offload) < 0) {
203 		pr_warning("Config tap device TUNSETOFFLOAD error");
204 		goto fail;
205 	}
206 
207 	if (strcmp(params->script, "none")) {
208 		pid = fork();
209 		if (pid == 0) {
210 			execl(params->script, params->script, ndev->tap_name, NULL);
211 			_exit(1);
212 		} else {
213 			waitpid(pid, &status, 0);
214 			if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
215 				pr_warning("Fail to setup tap by %s", params->script);
216 				goto fail;
217 			}
218 		}
219 	} else {
220 		memset(&ifr, 0, sizeof(ifr));
221 		strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ndev->tap_name));
222 		sin.sin_addr.s_addr = inet_addr(params->host_ip);
223 		memcpy(&(ifr.ifr_addr), &sin, sizeof(ifr.ifr_addr));
224 		ifr.ifr_addr.sa_family = AF_INET;
225 		if (ioctl(sock, SIOCSIFADDR, &ifr) < 0) {
226 			pr_warning("Could not set ip address on tap device");
227 			goto fail;
228 		}
229 	}
230 
231 	memset(&ifr, 0, sizeof(ifr));
232 	strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ndev->tap_name));
233 	ioctl(sock, SIOCGIFFLAGS, &ifr);
234 	ifr.ifr_flags |= IFF_UP | IFF_RUNNING;
235 	if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0)
236 		pr_warning("Could not bring tap device up");
237 
238 	close(sock);
239 
240 	return 1;
241 
242 fail:
243 	if (sock >= 0)
244 		close(sock);
245 	if (ndev->tap_fd >= 0)
246 		close(ndev->tap_fd);
247 
248 	return 0;
249 }
250 
251 static void virtio_net__io_thread_init(struct kvm *kvm, struct net_dev *ndev)
252 {
253 	pthread_mutex_init(&ndev->io_tx_lock, NULL);
254 	pthread_mutex_init(&ndev->io_rx_lock, NULL);
255 
256 	pthread_cond_init(&ndev->io_tx_cond, NULL);
257 	pthread_cond_init(&ndev->io_rx_cond, NULL);
258 
259 	pthread_create(&ndev->io_tx_thread, NULL, virtio_net_tx_thread, ndev);
260 	pthread_create(&ndev->io_rx_thread, NULL, virtio_net_rx_thread, ndev);
261 }
262 
263 static inline int tap_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev)
264 {
265 	return writev(ndev->tap_fd, iov, out);
266 }
267 
268 static inline int tap_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev)
269 {
270 	return readv(ndev->tap_fd, iov, in);
271 }
272 
273 static inline int uip_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev)
274 {
275 	return uip_tx(iov, out, &ndev->info);
276 }
277 
278 static inline int uip_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev)
279 {
280 	return uip_rx(iov, in, &ndev->info);
281 }
282 
283 static struct net_dev_operations tap_ops = {
284 	.rx	= tap_ops_rx,
285 	.tx	= tap_ops_tx,
286 };
287 
288 static struct net_dev_operations uip_ops = {
289 	.rx	= uip_ops_rx,
290 	.tx	= uip_ops_tx,
291 };
292 
293 static u8 *get_config(struct kvm *kvm, void *dev)
294 {
295 	struct net_dev *ndev = dev;
296 
297 	return ((u8 *)(&ndev->config));
298 }
299 
300 static u32 get_host_features(struct kvm *kvm, void *dev)
301 {
302 	return 1UL << VIRTIO_NET_F_MAC
303 		| 1UL << VIRTIO_NET_F_CSUM
304 		| 1UL << VIRTIO_NET_F_HOST_UFO
305 		| 1UL << VIRTIO_NET_F_HOST_TSO4
306 		| 1UL << VIRTIO_NET_F_HOST_TSO6
307 		| 1UL << VIRTIO_NET_F_GUEST_UFO
308 		| 1UL << VIRTIO_NET_F_GUEST_TSO4
309 		| 1UL << VIRTIO_NET_F_GUEST_TSO6
310 		| 1UL << VIRTIO_RING_F_EVENT_IDX
311 		| 1UL << VIRTIO_RING_F_INDIRECT_DESC;
312 }
313 
314 static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
315 {
316 	struct net_dev *ndev = dev;
317 
318 	ndev->features = features;
319 }
320 
321 static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
322 {
323 	struct vhost_vring_state state = { .index = vq };
324 	struct vhost_vring_addr addr;
325 	struct net_dev *ndev = dev;
326 	struct virt_queue *queue;
327 	void *p;
328 	int r;
329 
330 	compat__remove_message(compat_id);
331 
332 	queue		= &ndev->vqs[vq];
333 	queue->pfn	= pfn;
334 	p		= guest_pfn_to_host(kvm, queue->pfn);
335 
336 	/* FIXME: respect pci and mmio vring alignment */
337 	vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
338 
339 	if (ndev->vhost_fd == 0)
340 		return 0;
341 
342 	state.num = queue->vring.num;
343 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_NUM, &state);
344 	if (r < 0)
345 		die_perror("VHOST_SET_VRING_NUM failed");
346 	state.num = 0;
347 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_BASE, &state);
348 	if (r < 0)
349 		die_perror("VHOST_SET_VRING_BASE failed");
350 
351 	addr = (struct vhost_vring_addr) {
352 		.index = vq,
353 		.desc_user_addr = (u64)(unsigned long)queue->vring.desc,
354 		.avail_user_addr = (u64)(unsigned long)queue->vring.avail,
355 		.used_user_addr = (u64)(unsigned long)queue->vring.used,
356 	};
357 
358 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_ADDR, &addr);
359 	if (r < 0)
360 		die_perror("VHOST_SET_VRING_ADDR failed");
361 
362 	return 0;
363 }
364 
365 static void notify_vq_gsi(struct kvm *kvm, void *dev, u32 vq, u32 gsi)
366 {
367 	struct net_dev *ndev = dev;
368 	struct kvm_irqfd irq;
369 	struct vhost_vring_file file;
370 	int r;
371 
372 	if (ndev->vhost_fd == 0)
373 		return;
374 
375 	irq = (struct kvm_irqfd) {
376 		.gsi	= gsi,
377 		.fd	= eventfd(0, 0),
378 	};
379 	file = (struct vhost_vring_file) {
380 		.index	= vq,
381 		.fd	= irq.fd,
382 	};
383 
384 	r = ioctl(kvm->vm_fd, KVM_IRQFD, &irq);
385 	if (r < 0)
386 		die_perror("KVM_IRQFD failed");
387 
388 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_CALL, &file);
389 	if (r < 0)
390 		die_perror("VHOST_SET_VRING_CALL failed");
391 	file.fd = ndev->tap_fd;
392 	r = ioctl(ndev->vhost_fd, VHOST_NET_SET_BACKEND, &file);
393 	if (r != 0)
394 		die("VHOST_NET_SET_BACKEND failed %d", errno);
395 
396 }
397 
398 static void notify_vq_eventfd(struct kvm *kvm, void *dev, u32 vq, u32 efd)
399 {
400 	struct net_dev *ndev = dev;
401 	struct vhost_vring_file file = {
402 		.index	= vq,
403 		.fd	= efd,
404 	};
405 	int r;
406 
407 	if (ndev->vhost_fd == 0)
408 		return;
409 
410 	r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_KICK, &file);
411 	if (r < 0)
412 		die_perror("VHOST_SET_VRING_KICK failed");
413 }
414 
415 static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
416 {
417 	struct net_dev *ndev = dev;
418 
419 	virtio_net_handle_callback(kvm, ndev, vq);
420 
421 	return 0;
422 }
423 
424 static int get_pfn_vq(struct kvm *kvm, void *dev, u32 vq)
425 {
426 	struct net_dev *ndev = dev;
427 
428 	return ndev->vqs[vq].pfn;
429 }
430 
431 static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
432 {
433 	/* FIXME: dynamic */
434 	return VIRTIO_NET_QUEUE_SIZE;
435 }
436 
437 static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
438 {
439 	/* FIXME: dynamic */
440 	return size;
441 }
442 
443 static struct virtio_ops net_dev_virtio_ops = (struct virtio_ops) {
444 	.get_config		= get_config,
445 	.get_host_features	= get_host_features,
446 	.set_guest_features	= set_guest_features,
447 	.init_vq		= init_vq,
448 	.get_pfn_vq		= get_pfn_vq,
449 	.get_size_vq		= get_size_vq,
450 	.set_size_vq		= set_size_vq,
451 	.notify_vq		= notify_vq,
452 	.notify_vq_gsi		= notify_vq_gsi,
453 	.notify_vq_eventfd	= notify_vq_eventfd,
454 };
455 
456 static void virtio_net__vhost_init(struct kvm *kvm, struct net_dev *ndev)
457 {
458 	u64 features = 1UL << VIRTIO_RING_F_EVENT_IDX;
459 	struct vhost_memory *mem;
460 	int r;
461 
462 	ndev->vhost_fd = open("/dev/vhost-net", O_RDWR);
463 	if (ndev->vhost_fd < 0)
464 		die_perror("Failed openning vhost-net device");
465 
466 	mem = calloc(1, sizeof(*mem) + sizeof(struct vhost_memory_region));
467 	if (mem == NULL)
468 		die("Failed allocating memory for vhost memory map");
469 
470 	mem->nregions = 1;
471 	mem->regions[0] = (struct vhost_memory_region) {
472 		.guest_phys_addr	= 0,
473 		.memory_size		= kvm->ram_size,
474 		.userspace_addr		= (unsigned long)kvm->ram_start,
475 	};
476 
477 	r = ioctl(ndev->vhost_fd, VHOST_SET_OWNER);
478 	if (r != 0)
479 		die_perror("VHOST_SET_OWNER failed");
480 
481 	r = ioctl(ndev->vhost_fd, VHOST_SET_FEATURES, &features);
482 	if (r != 0)
483 		die_perror("VHOST_SET_FEATURES failed");
484 	r = ioctl(ndev->vhost_fd, VHOST_SET_MEM_TABLE, mem);
485 	if (r != 0)
486 		die_perror("VHOST_SET_MEM_TABLE failed");
487 
488 	ndev->vdev.use_vhost = true;
489 
490 	free(mem);
491 }
492 
493 static inline void str_to_mac(const char *str, char *mac)
494 {
495 	sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
496 		mac, mac+1, mac+2, mac+3, mac+4, mac+5);
497 }
498 static int set_net_param(struct virtio_net_params *p, const char *param,
499 				const char *val)
500 {
501 	if (strcmp(param, "guest_mac") == 0) {
502 		str_to_mac(val, p->guest_mac);
503 	} else if (strcmp(param, "mode") == 0) {
504 		if (!strncmp(val, "user", 4)) {
505 			int i;
506 
507 			for (i = 0; i < kvm->cfg.num_net_devices; i++)
508 				if (kvm->cfg.net_params[i].mode == NET_MODE_USER)
509 					die("Only one usermode network device allowed at a time");
510 			p->mode = NET_MODE_USER;
511 		} else if (!strncmp(val, "tap", 3)) {
512 			p->mode = NET_MODE_TAP;
513 		} else if (!strncmp(val, "none", 4)) {
514 			kvm->cfg.no_net = 1;
515 			return -1;
516 		} else
517 			die("Unknown network mode %s, please use user, tap or none", kvm->cfg.network);
518 	} else if (strcmp(param, "script") == 0) {
519 		p->script = strdup(val);
520 	} else if (strcmp(param, "guest_ip") == 0) {
521 		p->guest_ip = strdup(val);
522 	} else if (strcmp(param, "host_ip") == 0) {
523 		p->host_ip = strdup(val);
524 	} else if (strcmp(param, "trans") == 0) {
525 		p->trans = strdup(val);
526 	} else if (strcmp(param, "vhost") == 0) {
527 		p->vhost = atoi(val);
528 	} else if (strcmp(param, "fd") == 0) {
529 		p->fd = atoi(val);
530 	} else
531 		die("Unknown network parameter %s", param);
532 
533 	return 0;
534 }
535 
536 int netdev_parser(const struct option *opt, const char *arg, int unset)
537 {
538 	struct virtio_net_params p;
539 	char *buf = NULL, *cmd = NULL, *cur = NULL;
540 	bool on_cmd = true;
541 	struct kvm *kvm = opt->ptr;
542 
543 	if (arg) {
544 		buf = strdup(arg);
545 		if (buf == NULL)
546 			die("Failed allocating new net buffer");
547 		cur = strtok(buf, ",=");
548 	}
549 
550 	p = (struct virtio_net_params) {
551 		.guest_ip	= DEFAULT_GUEST_ADDR,
552 		.host_ip	= DEFAULT_HOST_ADDR,
553 		.script		= DEFAULT_SCRIPT,
554 		.mode		= NET_MODE_TAP,
555 	};
556 
557 	str_to_mac(DEFAULT_GUEST_MAC, p.guest_mac);
558 	p.guest_mac[5] += kvm->cfg.num_net_devices;
559 
560 	while (cur) {
561 		if (on_cmd) {
562 			cmd = cur;
563 		} else {
564 			if (set_net_param(&p, cmd, cur) < 0)
565 				goto done;
566 		}
567 		on_cmd = !on_cmd;
568 
569 		cur = strtok(NULL, ",=");
570 	};
571 
572 	kvm->cfg.num_net_devices++;
573 
574 	kvm->cfg.net_params = realloc(kvm->cfg.net_params, kvm->cfg.num_net_devices * sizeof(*kvm->cfg.net_params));
575 	if (kvm->cfg.net_params == NULL)
576 		die("Failed adding new network device");
577 
578 	kvm->cfg.net_params[kvm->cfg.num_net_devices - 1] = p;
579 
580 done:
581 	free(buf);
582 	return 0;
583 }
584 
585 static int virtio_net__init_one(struct virtio_net_params *params)
586 {
587 	int i;
588 	struct net_dev *ndev;
589 
590 	ndev = calloc(1, sizeof(struct net_dev));
591 	if (ndev == NULL)
592 		return -ENOMEM;
593 
594 	list_add_tail(&ndev->list, &ndevs);
595 
596 	ndev->kvm = params->kvm;
597 
598 	mutex_init(&ndev->mutex);
599 	ndev->config.status = VIRTIO_NET_S_LINK_UP;
600 
601 	for (i = 0 ; i < 6 ; i++) {
602 		ndev->config.mac[i]		= params->guest_mac[i];
603 		ndev->info.guest_mac.addr[i]	= params->guest_mac[i];
604 		ndev->info.host_mac.addr[i]	= params->host_mac[i];
605 	}
606 
607 	ndev->mode = params->mode;
608 	if (ndev->mode == NET_MODE_TAP) {
609 		if (!virtio_net__tap_init(params, ndev))
610 			die_perror("You have requested a TAP device, but creation of one has failed because");
611 		ndev->ops = &tap_ops;
612 	} else {
613 		ndev->info.host_ip		= ntohl(inet_addr(params->host_ip));
614 		ndev->info.guest_ip		= ntohl(inet_addr(params->guest_ip));
615 		ndev->info.guest_netmask	= ntohl(inet_addr("255.255.255.0"));
616 		ndev->info.buf_nr		= 20,
617 		uip_init(&ndev->info);
618 		ndev->ops = &uip_ops;
619 	}
620 
621 	if (params->trans && strcmp(params->trans, "mmio") == 0)
622 		virtio_init(kvm, ndev, &ndev->vdev, &net_dev_virtio_ops,
623 			    VIRTIO_MMIO, PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET, PCI_CLASS_NET);
624 	else
625 		virtio_init(kvm, ndev, &ndev->vdev, &net_dev_virtio_ops,
626 			    VIRTIO_PCI, PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET, PCI_CLASS_NET);
627 
628 	if (params->vhost)
629 		virtio_net__vhost_init(params->kvm, ndev);
630 	else
631 		virtio_net__io_thread_init(params->kvm, ndev);
632 
633 	if (compat_id == -1)
634 		compat_id = virtio_compat_add_message("virtio-net", "CONFIG_VIRTIO_NET");
635 
636 	return 0;
637 }
638 
639 int virtio_net__init(struct kvm *kvm)
640 {
641 	int i;
642 
643 	for (i = 0; i < kvm->cfg.num_net_devices; i++) {
644 		kvm->cfg.net_params[i].kvm = kvm;
645 		virtio_net__init_one(&kvm->cfg.net_params[i]);
646 	}
647 
648 	if (kvm->cfg.num_net_devices == 0 && kvm->cfg.no_net == 0) {
649 		struct virtio_net_params net_params;
650 
651 		net_params = (struct virtio_net_params) {
652 			.guest_ip	= kvm->cfg.guest_ip,
653 			.host_ip	= kvm->cfg.host_ip,
654 			.kvm		= kvm,
655 			.script		= kvm->cfg.script,
656 			.mode		= NET_MODE_USER,
657 		};
658 		str_to_mac(kvm->cfg.guest_mac, net_params.guest_mac);
659 		str_to_mac(kvm->cfg.host_mac, net_params.host_mac);
660 
661 		virtio_net__init_one(&net_params);
662 	}
663 
664 	return 0;
665 }
666 virtio_dev_init(virtio_net__init);
667 
668 int virtio_net__exit(struct kvm *kvm)
669 {
670 	return 0;
671 }
672 virtio_dev_exit(virtio_net__exit);
673