xref: /kvmtool/virtio/net.c (revision 8626798b5d3588f6abd8124c2bff1843324484ab)
14f56d42cSAsias He #include "kvm/virtio-net.h"
231638bcaSCyrill Gorcunov #include "kvm/virtio-pci-dev.h"
34f56d42cSAsias He #include "kvm/virtio.h"
44f56d42cSAsias He #include "kvm/ioport.h"
54f56d42cSAsias He #include "kvm/types.h"
64f56d42cSAsias He #include "kvm/mutex.h"
74f56d42cSAsias He #include "kvm/util.h"
84f56d42cSAsias He #include "kvm/kvm.h"
94f56d42cSAsias He #include "kvm/pci.h"
102449f6e3SSasha Levin #include "kvm/irq.h"
1127ab67f5SSasha Levin #include "kvm/ioeventfd.h"
124f56d42cSAsias He 
134f56d42cSAsias He #include <linux/virtio_net.h>
144f56d42cSAsias He #include <linux/if_tun.h>
15c229370aSIngo Molnar 
16c229370aSIngo Molnar #include <arpa/inet.h>
174f56d42cSAsias He #include <net/if.h>
18c229370aSIngo Molnar 
19c229370aSIngo Molnar #include <unistd.h>
204f56d42cSAsias He #include <assert.h>
214f56d42cSAsias He #include <fcntl.h>
22c229370aSIngo Molnar 
23cb7202c1SSasha Levin #include <sys/socket.h>
24c229370aSIngo Molnar #include <sys/ioctl.h>
25c229370aSIngo Molnar #include <sys/types.h>
2673b7d038SAmos Kong #include <sys/wait.h>
274f56d42cSAsias He 
284f56d42cSAsias He #define VIRTIO_NET_QUEUE_SIZE		128
294f56d42cSAsias He #define VIRTIO_NET_NUM_QUEUES		2
304f56d42cSAsias He #define VIRTIO_NET_RX_QUEUE		0
314f56d42cSAsias He #define VIRTIO_NET_TX_QUEUE		1
324f56d42cSAsias He 
33c229370aSIngo Molnar static struct pci_device_header pci_header = {
342449f6e3SSasha Levin 	.vendor_id			= PCI_VENDOR_ID_REDHAT_QUMRANET,
352449f6e3SSasha Levin 	.device_id			= PCI_DEVICE_ID_VIRTIO_NET,
362449f6e3SSasha Levin 	.header_type			= PCI_HEADER_TYPE_NORMAL,
372449f6e3SSasha Levin 	.revision_id			= 0,
382449f6e3SSasha Levin 	.class				= 0x020000,
392449f6e3SSasha Levin 	.subsys_vendor_id		= PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET,
400a7ab0c6SSasha Levin 	.subsys_id			= VIRTIO_ID_NET,
412449f6e3SSasha Levin };
422449f6e3SSasha Levin 
43*8626798bSAsias He struct net_dev {
444f56d42cSAsias He 	pthread_mutex_t			mutex;
454f56d42cSAsias He 
464f56d42cSAsias He 	struct virt_queue		vqs[VIRTIO_NET_NUM_QUEUES];
47c229370aSIngo Molnar 	struct virtio_net_config	config;
483fdf659dSSasha Levin 	u32				host_features;
493fdf659dSSasha Levin 	u32				guest_features;
503fdf659dSSasha Levin 	u16				config_vector;
513fdf659dSSasha Levin 	u8				status;
527f5ffaf5SAsias He 	u8				isr;
533fdf659dSSasha Levin 	u16				queue_selector;
5407f90696SSasha Levin 	u16				base_addr;
554f56d42cSAsias He 
56c4aa7c02SPekka Enberg 	pthread_t			io_rx_thread;
57c229370aSIngo Molnar 	pthread_mutex_t			io_rx_lock;
58c4aa7c02SPekka Enberg 	pthread_cond_t			io_rx_cond;
59c4aa7c02SPekka Enberg 
60c4aa7c02SPekka Enberg 	pthread_t			io_tx_thread;
61c229370aSIngo Molnar 	pthread_mutex_t			io_tx_lock;
62c4aa7c02SPekka Enberg 	pthread_cond_t			io_tx_cond;
63c4aa7c02SPekka Enberg 
644f56d42cSAsias He 	int				tap_fd;
654f56d42cSAsias He 	char				tap_name[IFNAMSIZ];
66bb1a32f1SAsias He 
67bb1a32f1SAsias He 	int				mode;
68bb1a32f1SAsias He 
694f56d42cSAsias He };
704f56d42cSAsias He 
71*8626798bSAsias He 
72*8626798bSAsias He static struct net_dev ndev = {
734f56d42cSAsias He 	.mutex	= PTHREAD_MUTEX_INITIALIZER,
744f56d42cSAsias He 
75c229370aSIngo Molnar 	.config = {
764f56d42cSAsias He 		.mac			= { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
774f56d42cSAsias He 		.status			= VIRTIO_NET_S_LINK_UP,
784f56d42cSAsias He 	},
79407475bfSPekka Enberg 	.host_features			= 1UL << VIRTIO_NET_F_MAC
80407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_CSUM
81407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_HOST_UFO
82407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_HOST_TSO4
83407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_HOST_TSO6
84407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_GUEST_UFO
85407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_GUEST_TSO4
86407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_GUEST_TSO6,
874f56d42cSAsias He };
884f56d42cSAsias He 
89c4aa7c02SPekka Enberg static void *virtio_net_rx_thread(void *p)
904f56d42cSAsias He {
914f56d42cSAsias He 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
924f56d42cSAsias He 	struct virt_queue *vq;
9343835ac9SSasha Levin 	struct kvm *kvm;
943fdf659dSSasha Levin 	u16 out, in;
953fdf659dSSasha Levin 	u16 head;
964f56d42cSAsias He 	int len;
974f56d42cSAsias He 
9843835ac9SSasha Levin 	kvm	= p;
99c229370aSIngo Molnar 	vq	= &ndev.vqs[VIRTIO_NET_RX_QUEUE];
100c4aa7c02SPekka Enberg 
101c4aa7c02SPekka Enberg 	while (1) {
102c229370aSIngo Molnar 		mutex_lock(&ndev.io_rx_lock);
103c4aa7c02SPekka Enberg 		if (!virt_queue__available(vq))
104c229370aSIngo Molnar 			pthread_cond_wait(&ndev.io_rx_cond, &ndev.io_rx_lock);
105c229370aSIngo Molnar 		mutex_unlock(&ndev.io_rx_lock);
1064f56d42cSAsias He 
1074f56d42cSAsias He 		while (virt_queue__available(vq)) {
10843835ac9SSasha Levin 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
109c229370aSIngo Molnar 			len  = readv(ndev.tap_fd, iov, in);
110246c8347SAsias He 			virt_queue__set_used_elem(vq, head, len);
1117f5ffaf5SAsias He 
112c4aa7c02SPekka Enberg 			/* We should interrupt guest right now, otherwise latency is huge. */
113c229370aSIngo Molnar 			virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
1144f56d42cSAsias He 		}
1154f56d42cSAsias He 
116c4aa7c02SPekka Enberg 	}
117c4aa7c02SPekka Enberg 
118c4aa7c02SPekka Enberg 	pthread_exit(NULL);
119c4aa7c02SPekka Enberg 	return NULL;
120c4aa7c02SPekka Enberg 
121c4aa7c02SPekka Enberg }
122c4aa7c02SPekka Enberg 
123c4aa7c02SPekka Enberg static void *virtio_net_tx_thread(void *p)
1244f56d42cSAsias He {
1254f56d42cSAsias He 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
1264f56d42cSAsias He 	struct virt_queue *vq;
12743835ac9SSasha Levin 	struct kvm *kvm;
1283fdf659dSSasha Levin 	u16 out, in;
1293fdf659dSSasha Levin 	u16 head;
1304f56d42cSAsias He 	int len;
1314f56d42cSAsias He 
13243835ac9SSasha Levin 	kvm	= p;
133c229370aSIngo Molnar 	vq	= &ndev.vqs[VIRTIO_NET_TX_QUEUE];
134c4aa7c02SPekka Enberg 
135c4aa7c02SPekka Enberg 	while (1) {
136c229370aSIngo Molnar 		mutex_lock(&ndev.io_tx_lock);
137c4aa7c02SPekka Enberg 		if (!virt_queue__available(vq))
138c229370aSIngo Molnar 			pthread_cond_wait(&ndev.io_tx_cond, &ndev.io_tx_lock);
139c229370aSIngo Molnar 		mutex_unlock(&ndev.io_tx_lock);
1404f56d42cSAsias He 
1414f56d42cSAsias He 		while (virt_queue__available(vq)) {
14243835ac9SSasha Levin 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
143c229370aSIngo Molnar 			len  = writev(ndev.tap_fd, iov, out);
1444f56d42cSAsias He 			virt_queue__set_used_elem(vq, head, len);
1454f56d42cSAsias He 		}
1464f56d42cSAsias He 
147c229370aSIngo Molnar 		virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
1487f5ffaf5SAsias He 
1494f56d42cSAsias He 	}
1504f56d42cSAsias He 
151c4aa7c02SPekka Enberg 	pthread_exit(NULL);
152407475bfSPekka Enberg 
153c4aa7c02SPekka Enberg 	return NULL;
154c4aa7c02SPekka Enberg 
155c4aa7c02SPekka Enberg }
156407475bfSPekka Enberg 
1573fdf659dSSasha Levin static bool virtio_net_pci_io_device_specific_in(void *data, unsigned long offset, int size, u32 count)
1584f56d42cSAsias He {
159c229370aSIngo Molnar 	u8 *config_space = (u8 *)&ndev.config;
1604f56d42cSAsias He 
1614f56d42cSAsias He 	if (size != 1 || count != 1)
1624f56d42cSAsias He 		return false;
1634f56d42cSAsias He 
164b8f43678SSasha Levin 	if ((offset - VIRTIO_MSI_CONFIG_VECTOR) > sizeof(struct virtio_net_config))
1654542f276SCyrill Gorcunov 		pr_error("config offset is too big: %li", offset - VIRTIO_MSI_CONFIG_VECTOR);
1664f56d42cSAsias He 
167b8f43678SSasha Levin 	ioport__write8(data, config_space[offset - VIRTIO_MSI_CONFIG_VECTOR]);
1684f56d42cSAsias He 
1694f56d42cSAsias He 	return true;
1704f56d42cSAsias He }
1714f56d42cSAsias He 
1723d62dea6SSasha Levin static bool virtio_net_pci_io_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count)
1734f56d42cSAsias He {
17407f90696SSasha Levin 	unsigned long	offset	= port - ndev.base_addr;
1754f56d42cSAsias He 	bool		ret	= true;
1764f56d42cSAsias He 
177c229370aSIngo Molnar 	mutex_lock(&ndev.mutex);
1784f56d42cSAsias He 
1794f56d42cSAsias He 	switch (offset) {
1804f56d42cSAsias He 	case VIRTIO_PCI_HOST_FEATURES:
181c229370aSIngo Molnar 		ioport__write32(data, ndev.host_features);
1824f56d42cSAsias He 		break;
1834f56d42cSAsias He 	case VIRTIO_PCI_GUEST_FEATURES:
1844f56d42cSAsias He 		ret = false;
1854f56d42cSAsias He 		break;
1864f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_PFN:
187c229370aSIngo Molnar 		ioport__write32(data, ndev.vqs[ndev.queue_selector].pfn);
1884f56d42cSAsias He 		break;
1894f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_NUM:
1904f56d42cSAsias He 		ioport__write16(data, VIRTIO_NET_QUEUE_SIZE);
1914f56d42cSAsias He 		break;
1924f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_SEL:
1934f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_NOTIFY:
1944f56d42cSAsias He 		ret = false;
1954f56d42cSAsias He 		break;
1964f56d42cSAsias He 	case VIRTIO_PCI_STATUS:
197c229370aSIngo Molnar 		ioport__write8(data, ndev.status);
1984f56d42cSAsias He 		break;
1994f56d42cSAsias He 	case VIRTIO_PCI_ISR:
200c229370aSIngo Molnar 		ioport__write8(data, ndev.isr);
201c229370aSIngo Molnar 		kvm__irq_line(kvm, pci_header.irq_line, VIRTIO_IRQ_LOW);
202c229370aSIngo Molnar 		ndev.isr = VIRTIO_IRQ_LOW;
2034f56d42cSAsias He 		break;
2044f56d42cSAsias He 	case VIRTIO_MSI_CONFIG_VECTOR:
205c229370aSIngo Molnar 		ioport__write16(data, ndev.config_vector);
2064f56d42cSAsias He 		break;
2074f56d42cSAsias He 	default:
2084f56d42cSAsias He 		ret = virtio_net_pci_io_device_specific_in(data, offset, size, count);
2094f56d42cSAsias He 	};
2104f56d42cSAsias He 
211c229370aSIngo Molnar 	mutex_unlock(&ndev.mutex);
2124f56d42cSAsias He 
2134f56d42cSAsias He 	return ret;
2144f56d42cSAsias He }
2154f56d42cSAsias He 
21643835ac9SSasha Levin static void virtio_net_handle_callback(struct kvm *kvm, u16 queue_index)
2174f56d42cSAsias He {
218407475bfSPekka Enberg 	switch (queue_index) {
219407475bfSPekka Enberg 	case VIRTIO_NET_TX_QUEUE: {
220c229370aSIngo Molnar 		mutex_lock(&ndev.io_tx_lock);
221c229370aSIngo Molnar 		pthread_cond_signal(&ndev.io_tx_cond);
222c229370aSIngo Molnar 		mutex_unlock(&ndev.io_tx_lock);
223407475bfSPekka Enberg 		break;
224407475bfSPekka Enberg 	}
225407475bfSPekka Enberg 	case VIRTIO_NET_RX_QUEUE: {
226c229370aSIngo Molnar 		mutex_lock(&ndev.io_rx_lock);
227c229370aSIngo Molnar 		pthread_cond_signal(&ndev.io_rx_cond);
228c229370aSIngo Molnar 		mutex_unlock(&ndev.io_rx_lock);
229407475bfSPekka Enberg 		break;
230407475bfSPekka Enberg 	}
231407475bfSPekka Enberg 	default:
2324542f276SCyrill Gorcunov 		pr_warning("Unknown queue index %u", queue_index);
233c4aa7c02SPekka Enberg 	}
2344f56d42cSAsias He }
2354f56d42cSAsias He 
2363d62dea6SSasha Levin static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count)
2374f56d42cSAsias He {
23807f90696SSasha Levin 	unsigned long	offset		= port - ndev.base_addr;
2394f56d42cSAsias He 	bool		ret		= true;
2404f56d42cSAsias He 
241c229370aSIngo Molnar 	mutex_lock(&ndev.mutex);
2424f56d42cSAsias He 
2434f56d42cSAsias He 	switch (offset) {
2444f56d42cSAsias He 	case VIRTIO_PCI_GUEST_FEATURES:
245c229370aSIngo Molnar 		ndev.guest_features	= ioport__read32(data);
2464f56d42cSAsias He 		break;
2474f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_PFN: {
2484f56d42cSAsias He 		struct virt_queue *queue;
2494f56d42cSAsias He 		void *p;
2504f56d42cSAsias He 
251c229370aSIngo Molnar 		assert(ndev.queue_selector < VIRTIO_NET_NUM_QUEUES);
2524f56d42cSAsias He 
253c229370aSIngo Molnar 		queue			= &ndev.vqs[ndev.queue_selector];
2544f56d42cSAsias He 		queue->pfn		= ioport__read32(data);
25543835ac9SSasha Levin 		p			= guest_pfn_to_host(kvm, queue->pfn);
2564f56d42cSAsias He 
257b8f43678SSasha Levin 		vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
2584f56d42cSAsias He 
2594f56d42cSAsias He 		break;
2604f56d42cSAsias He 	}
2614f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_SEL:
262c229370aSIngo Molnar 		ndev.queue_selector	= ioport__read16(data);
2634f56d42cSAsias He 		break;
2644f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_NOTIFY: {
2653fdf659dSSasha Levin 		u16 queue_index;
266c229370aSIngo Molnar 
2674f56d42cSAsias He 		queue_index		= ioport__read16(data);
26843835ac9SSasha Levin 		virtio_net_handle_callback(kvm, queue_index);
2694f56d42cSAsias He 		break;
2704f56d42cSAsias He 	}
2714f56d42cSAsias He 	case VIRTIO_PCI_STATUS:
272c229370aSIngo Molnar 		ndev.status		= ioport__read8(data);
2734f56d42cSAsias He 		break;
2744f56d42cSAsias He 	case VIRTIO_MSI_CONFIG_VECTOR:
275c229370aSIngo Molnar 		ndev.config_vector	= VIRTIO_MSI_NO_VECTOR;
2764f56d42cSAsias He 		break;
2774f56d42cSAsias He 	case VIRTIO_MSI_QUEUE_VECTOR:
2784f56d42cSAsias He 		break;
2794f56d42cSAsias He 	default:
2804f56d42cSAsias He 		ret			= false;
2814f56d42cSAsias He 	};
2824f56d42cSAsias He 
283c229370aSIngo Molnar 	mutex_unlock(&ndev.mutex);
284407475bfSPekka Enberg 
2854f56d42cSAsias He 	return ret;
2864f56d42cSAsias He }
2874f56d42cSAsias He 
28827ab67f5SSasha Levin static void ioevent_callback(struct kvm *kvm, void *param)
28927ab67f5SSasha Levin {
290926e0e2fSIngo Molnar 	virtio_net_handle_callback(kvm, (u64)(long)param);
29127ab67f5SSasha Levin }
29227ab67f5SSasha Levin 
2934f56d42cSAsias He static struct ioport_operations virtio_net_io_ops = {
2944f56d42cSAsias He 	.io_in	= virtio_net_pci_io_in,
2954f56d42cSAsias He 	.io_out	= virtio_net_pci_io_out,
2964f56d42cSAsias He };
2974f56d42cSAsias He 
2983b02f580SSasha Levin static bool virtio_net__tap_init(const struct virtio_net_parameters *params)
2994f56d42cSAsias He {
300cb7202c1SSasha Levin 	int sock = socket(AF_INET, SOCK_STREAM, 0);
301246c8347SAsias He 	int i, pid, status, offload, hdr_len;
302cb7202c1SSasha Levin 	struct sockaddr_in sin = {0};
303246c8347SAsias He 	struct ifreq ifr;
3044f56d42cSAsias He 
305a4e724ddSSasha Levin 	for (i = 0 ; i < 6 ; i++)
306c229370aSIngo Molnar 		ndev.config.mac[i] = params->guest_mac[i];
307a4e724ddSSasha Levin 
308c229370aSIngo Molnar 	ndev.tap_fd = open("/dev/net/tun", O_RDWR);
309c229370aSIngo Molnar 	if (ndev.tap_fd < 0) {
3104542f276SCyrill Gorcunov 		pr_warning("Unable to open /dev/net/tun");
3113b02f580SSasha Levin 		goto fail;
3123b02f580SSasha Levin 	}
3134f56d42cSAsias He 
3144f56d42cSAsias He 	memset(&ifr, 0, sizeof(ifr));
315246c8347SAsias He 	ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
316c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETIFF, &ifr) < 0) {
3174542f276SCyrill Gorcunov 		pr_warning("Config tap device error. Are you root?");
3183b02f580SSasha Levin 		goto fail;
3193b02f580SSasha Levin 	}
3204f56d42cSAsias He 
321c229370aSIngo Molnar 	strncpy(ndev.tap_name, ifr.ifr_name, sizeof(ndev.tap_name));
3224f56d42cSAsias He 
323c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETNOCSUM, 1) < 0) {
3244542f276SCyrill Gorcunov 		pr_warning("Config tap device TUNSETNOCSUM error");
325246c8347SAsias He 		goto fail;
326246c8347SAsias He 	}
327246c8347SAsias He 
328246c8347SAsias He 	hdr_len = sizeof(struct virtio_net_hdr);
329c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETVNETHDRSZ, &hdr_len) < 0) {
3304542f276SCyrill Gorcunov 		pr_warning("Config tap device TUNSETVNETHDRSZ error");
331246c8347SAsias He 		goto fail;
332246c8347SAsias He 	}
333246c8347SAsias He 
334246c8347SAsias He 	offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_UFO;
335c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETOFFLOAD, offload) < 0) {
3364542f276SCyrill Gorcunov 		pr_warning("Config tap device TUNSETOFFLOAD error");
337246c8347SAsias He 		goto fail;
338246c8347SAsias He 	}
3394f56d42cSAsias He 
34073b7d038SAmos Kong 	if (strcmp(params->script, "none")) {
34173b7d038SAmos Kong 		pid = fork();
34273b7d038SAmos Kong 		if (pid == 0) {
343c229370aSIngo Molnar 			execl(params->script, params->script, ndev.tap_name, NULL);
34473b7d038SAmos Kong 			_exit(1);
34573b7d038SAmos Kong 		} else {
34673b7d038SAmos Kong 			waitpid(pid, &status, 0);
34773b7d038SAmos Kong 			if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
3484542f276SCyrill Gorcunov 				pr_warning("Fail to setup tap by %s", params->script);
34973b7d038SAmos Kong 				goto fail;
35073b7d038SAmos Kong 			}
35173b7d038SAmos Kong 		}
35273b7d038SAmos Kong 	} else {
353cb7202c1SSasha Levin 		memset(&ifr, 0, sizeof(ifr));
354c229370aSIngo Molnar 		strncpy(ifr.ifr_name, ndev.tap_name, sizeof(ndev.tap_name));
355bdfcfca6SSasha Levin 		sin.sin_addr.s_addr = inet_addr(params->host_ip);
356cb7202c1SSasha Levin 		memcpy(&(ifr.ifr_addr), &sin, sizeof(ifr.ifr_addr));
357cb7202c1SSasha Levin 		ifr.ifr_addr.sa_family = AF_INET;
3583b02f580SSasha Levin 		if (ioctl(sock, SIOCSIFADDR, &ifr) < 0) {
3594542f276SCyrill Gorcunov 			pr_warning("Could not set ip address on tap device");
3603b02f580SSasha Levin 			goto fail;
3613b02f580SSasha Levin 		}
36273b7d038SAmos Kong 	}
363cb7202c1SSasha Levin 
364cb7202c1SSasha Levin 	memset(&ifr, 0, sizeof(ifr));
365c229370aSIngo Molnar 	strncpy(ifr.ifr_name, ndev.tap_name, sizeof(ndev.tap_name));
366cb7202c1SSasha Levin 	ioctl(sock, SIOCGIFFLAGS, &ifr);
367cb7202c1SSasha Levin 	ifr.ifr_flags |= IFF_UP | IFF_RUNNING;
368cb7202c1SSasha Levin 	if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0)
3694542f276SCyrill Gorcunov 		pr_warning("Could not bring tap device up");
370cb7202c1SSasha Levin 
371cb7202c1SSasha Levin 	close(sock);
3723b02f580SSasha Levin 
3733b02f580SSasha Levin 	return 1;
3743b02f580SSasha Levin 
3753b02f580SSasha Levin fail:
3763b02f580SSasha Levin 	if (sock >= 0)
3773b02f580SSasha Levin 		close(sock);
378c229370aSIngo Molnar 	if (ndev.tap_fd >= 0)
379c229370aSIngo Molnar 		close(ndev.tap_fd);
3803b02f580SSasha Levin 
3813b02f580SSasha Levin 	return 0;
3824f56d42cSAsias He }
3834f56d42cSAsias He 
38443835ac9SSasha Levin static void virtio_net__io_thread_init(struct kvm *kvm)
385c4aa7c02SPekka Enberg {
386c229370aSIngo Molnar 	pthread_mutex_init(&ndev.io_rx_lock, NULL);
387c229370aSIngo Molnar 	pthread_cond_init(&ndev.io_tx_cond, NULL);
388c4aa7c02SPekka Enberg 
389c229370aSIngo Molnar 	pthread_mutex_init(&ndev.io_rx_lock, NULL);
390c229370aSIngo Molnar 	pthread_cond_init(&ndev.io_tx_cond, NULL);
391c4aa7c02SPekka Enberg 
392c229370aSIngo Molnar 	pthread_create(&ndev.io_rx_thread, NULL, virtio_net_rx_thread, (void *)kvm);
393c229370aSIngo Molnar 	pthread_create(&ndev.io_tx_thread, NULL, virtio_net_tx_thread, (void *)kvm);
394c4aa7c02SPekka Enberg }
395c4aa7c02SPekka Enberg 
396bdfcfca6SSasha Levin void virtio_net__init(const struct virtio_net_parameters *params)
3974f56d42cSAsias He {
3983b02f580SSasha Levin 	if (virtio_net__tap_init(params)) {
3992449f6e3SSasha Levin 		u8 dev, line, pin;
40007f90696SSasha Levin 		u16 net_base_addr;
40127ab67f5SSasha Levin 		u64 i;
40227ab67f5SSasha Levin 		struct ioevent ioevent;
4032449f6e3SSasha Levin 
4040a7ab0c6SSasha Levin 		if (irq__register_device(VIRTIO_ID_NET, &dev, &pin, &line) < 0)
4052449f6e3SSasha Levin 			return;
4062449f6e3SSasha Levin 
407c229370aSIngo Molnar 		pci_header.irq_pin	= pin;
408c229370aSIngo Molnar 		pci_header.irq_line	= line;
40907f90696SSasha Levin 		net_base_addr		= ioport__register(IOPORT_EMPTY, &virtio_net_io_ops, IOPORT_SIZE, NULL);
41007f90696SSasha Levin 		pci_header.bar[0]	= net_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
41107f90696SSasha Levin 		ndev.base_addr		= net_base_addr;
41207f90696SSasha Levin 
413c229370aSIngo Molnar 		pci__register(&pci_header, dev);
414c4aa7c02SPekka Enberg 
41543835ac9SSasha Levin 		virtio_net__io_thread_init(params->kvm);
41627ab67f5SSasha Levin 
41727ab67f5SSasha Levin 		for (i = 0; i < VIRTIO_NET_NUM_QUEUES; i++) {
41827ab67f5SSasha Levin 			ioevent = (struct ioevent) {
41927ab67f5SSasha Levin 				.io_addr		= net_base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
42027ab67f5SSasha Levin 				.io_len			= sizeof(u16),
42127ab67f5SSasha Levin 				.fn			= ioevent_callback,
42227ab67f5SSasha Levin 				.datamatch		= i,
423926e0e2fSIngo Molnar 				.fn_ptr			= (void *)(long)i,
42427ab67f5SSasha Levin 				.fn_kvm			= params->kvm,
42527ab67f5SSasha Levin 				.fd			= eventfd(0, 0),
42627ab67f5SSasha Levin 			};
42727ab67f5SSasha Levin 
42827ab67f5SSasha Levin 			ioeventfd__add_event(&ioevent);
42927ab67f5SSasha Levin 		}
4304f56d42cSAsias He 	}
4313b02f580SSasha Levin }
432