xref: /kvmtool/virtio/net.c (revision bb1a32f12288de2afe07aec7af4a6e1cb99bf1e1)
14f56d42cSAsias He #include "kvm/virtio-net.h"
231638bcaSCyrill Gorcunov #include "kvm/virtio-pci-dev.h"
34f56d42cSAsias He #include "kvm/virtio.h"
44f56d42cSAsias He #include "kvm/ioport.h"
54f56d42cSAsias He #include "kvm/types.h"
64f56d42cSAsias He #include "kvm/mutex.h"
74f56d42cSAsias He #include "kvm/util.h"
84f56d42cSAsias He #include "kvm/kvm.h"
94f56d42cSAsias He #include "kvm/pci.h"
102449f6e3SSasha Levin #include "kvm/irq.h"
1127ab67f5SSasha Levin #include "kvm/ioeventfd.h"
124f56d42cSAsias He 
134f56d42cSAsias He #include <linux/virtio_net.h>
144f56d42cSAsias He #include <linux/if_tun.h>
15c229370aSIngo Molnar 
16c229370aSIngo Molnar #include <arpa/inet.h>
174f56d42cSAsias He #include <net/if.h>
18c229370aSIngo Molnar 
19c229370aSIngo Molnar #include <unistd.h>
204f56d42cSAsias He #include <assert.h>
214f56d42cSAsias He #include <fcntl.h>
22c229370aSIngo Molnar 
23cb7202c1SSasha Levin #include <sys/socket.h>
24c229370aSIngo Molnar #include <sys/ioctl.h>
25c229370aSIngo Molnar #include <sys/types.h>
2673b7d038SAmos Kong #include <sys/wait.h>
274f56d42cSAsias He 
284f56d42cSAsias He #define VIRTIO_NET_QUEUE_SIZE		128
294f56d42cSAsias He #define VIRTIO_NET_NUM_QUEUES		2
304f56d42cSAsias He #define VIRTIO_NET_RX_QUEUE		0
314f56d42cSAsias He #define VIRTIO_NET_TX_QUEUE		1
324f56d42cSAsias He 
33c229370aSIngo Molnar static struct pci_device_header pci_header = {
342449f6e3SSasha Levin 	.vendor_id			= PCI_VENDOR_ID_REDHAT_QUMRANET,
352449f6e3SSasha Levin 	.device_id			= PCI_DEVICE_ID_VIRTIO_NET,
362449f6e3SSasha Levin 	.header_type			= PCI_HEADER_TYPE_NORMAL,
372449f6e3SSasha Levin 	.revision_id			= 0,
382449f6e3SSasha Levin 	.class				= 0x020000,
392449f6e3SSasha Levin 	.subsys_vendor_id		= PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET,
400a7ab0c6SSasha Levin 	.subsys_id			= VIRTIO_ID_NET,
412449f6e3SSasha Levin };
422449f6e3SSasha Levin 
434f56d42cSAsias He struct net_device {
444f56d42cSAsias He 	pthread_mutex_t			mutex;
454f56d42cSAsias He 
464f56d42cSAsias He 	struct virt_queue		vqs[VIRTIO_NET_NUM_QUEUES];
47c229370aSIngo Molnar 	struct virtio_net_config	config;
483fdf659dSSasha Levin 	u32				host_features;
493fdf659dSSasha Levin 	u32				guest_features;
503fdf659dSSasha Levin 	u16				config_vector;
513fdf659dSSasha Levin 	u8				status;
527f5ffaf5SAsias He 	u8				isr;
533fdf659dSSasha Levin 	u16				queue_selector;
5407f90696SSasha Levin 	u16				base_addr;
554f56d42cSAsias He 
56c4aa7c02SPekka Enberg 	pthread_t			io_rx_thread;
57c229370aSIngo Molnar 	pthread_mutex_t			io_rx_lock;
58c4aa7c02SPekka Enberg 	pthread_cond_t			io_rx_cond;
59c4aa7c02SPekka Enberg 
60c4aa7c02SPekka Enberg 	pthread_t			io_tx_thread;
61c229370aSIngo Molnar 	pthread_mutex_t			io_tx_lock;
62c4aa7c02SPekka Enberg 	pthread_cond_t			io_tx_cond;
63c4aa7c02SPekka Enberg 
644f56d42cSAsias He 	int				tap_fd;
654f56d42cSAsias He 	char				tap_name[IFNAMSIZ];
66*bb1a32f1SAsias He 
67*bb1a32f1SAsias He 	int				mode;
68*bb1a32f1SAsias He 
694f56d42cSAsias He };
704f56d42cSAsias He 
71c229370aSIngo Molnar static struct net_device ndev = {
724f56d42cSAsias He 	.mutex				= PTHREAD_MUTEX_INITIALIZER,
734f56d42cSAsias He 
74c229370aSIngo Molnar 	.config = {
754f56d42cSAsias He 		.mac			= { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
764f56d42cSAsias He 		.status			= VIRTIO_NET_S_LINK_UP,
774f56d42cSAsias He 	},
78407475bfSPekka Enberg 	.host_features			= 1UL << VIRTIO_NET_F_MAC
79407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_CSUM
80407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_HOST_UFO
81407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_HOST_TSO4
82407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_HOST_TSO6
83407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_GUEST_UFO
84407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_GUEST_TSO4
85407475bfSPekka Enberg 					| 1UL << VIRTIO_NET_F_GUEST_TSO6,
864f56d42cSAsias He };
874f56d42cSAsias He 
88c4aa7c02SPekka Enberg static void *virtio_net_rx_thread(void *p)
894f56d42cSAsias He {
904f56d42cSAsias He 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
914f56d42cSAsias He 	struct virt_queue *vq;
9243835ac9SSasha Levin 	struct kvm *kvm;
933fdf659dSSasha Levin 	u16 out, in;
943fdf659dSSasha Levin 	u16 head;
954f56d42cSAsias He 	int len;
964f56d42cSAsias He 
9743835ac9SSasha Levin 	kvm	= p;
98c229370aSIngo Molnar 	vq	= &ndev.vqs[VIRTIO_NET_RX_QUEUE];
99c4aa7c02SPekka Enberg 
100c4aa7c02SPekka Enberg 	while (1) {
101c229370aSIngo Molnar 		mutex_lock(&ndev.io_rx_lock);
102c4aa7c02SPekka Enberg 		if (!virt_queue__available(vq))
103c229370aSIngo Molnar 			pthread_cond_wait(&ndev.io_rx_cond, &ndev.io_rx_lock);
104c229370aSIngo Molnar 		mutex_unlock(&ndev.io_rx_lock);
1054f56d42cSAsias He 
1064f56d42cSAsias He 		while (virt_queue__available(vq)) {
10743835ac9SSasha Levin 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
108c229370aSIngo Molnar 			len  = readv(ndev.tap_fd, iov, in);
109246c8347SAsias He 			virt_queue__set_used_elem(vq, head, len);
1107f5ffaf5SAsias He 
111c4aa7c02SPekka Enberg 			/* We should interrupt guest right now, otherwise latency is huge. */
112c229370aSIngo Molnar 			virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
1134f56d42cSAsias He 		}
1144f56d42cSAsias He 
115c4aa7c02SPekka Enberg 	}
116c4aa7c02SPekka Enberg 
117c4aa7c02SPekka Enberg 	pthread_exit(NULL);
118c4aa7c02SPekka Enberg 	return NULL;
119c4aa7c02SPekka Enberg 
120c4aa7c02SPekka Enberg }
121c4aa7c02SPekka Enberg 
122c4aa7c02SPekka Enberg static void *virtio_net_tx_thread(void *p)
1234f56d42cSAsias He {
1244f56d42cSAsias He 	struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
1254f56d42cSAsias He 	struct virt_queue *vq;
12643835ac9SSasha Levin 	struct kvm *kvm;
1273fdf659dSSasha Levin 	u16 out, in;
1283fdf659dSSasha Levin 	u16 head;
1294f56d42cSAsias He 	int len;
1304f56d42cSAsias He 
13143835ac9SSasha Levin 	kvm	= p;
132c229370aSIngo Molnar 	vq	= &ndev.vqs[VIRTIO_NET_TX_QUEUE];
133c4aa7c02SPekka Enberg 
134c4aa7c02SPekka Enberg 	while (1) {
135c229370aSIngo Molnar 		mutex_lock(&ndev.io_tx_lock);
136c4aa7c02SPekka Enberg 		if (!virt_queue__available(vq))
137c229370aSIngo Molnar 			pthread_cond_wait(&ndev.io_tx_cond, &ndev.io_tx_lock);
138c229370aSIngo Molnar 		mutex_unlock(&ndev.io_tx_lock);
1394f56d42cSAsias He 
1404f56d42cSAsias He 		while (virt_queue__available(vq)) {
14143835ac9SSasha Levin 			head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
142c229370aSIngo Molnar 			len  = writev(ndev.tap_fd, iov, out);
1434f56d42cSAsias He 			virt_queue__set_used_elem(vq, head, len);
1444f56d42cSAsias He 		}
1454f56d42cSAsias He 
146c229370aSIngo Molnar 		virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm);
1477f5ffaf5SAsias He 
1484f56d42cSAsias He 	}
1494f56d42cSAsias He 
150c4aa7c02SPekka Enberg 	pthread_exit(NULL);
151407475bfSPekka Enberg 
152c4aa7c02SPekka Enberg 	return NULL;
153c4aa7c02SPekka Enberg 
154c4aa7c02SPekka Enberg }
155407475bfSPekka Enberg 
1563fdf659dSSasha Levin static bool virtio_net_pci_io_device_specific_in(void *data, unsigned long offset, int size, u32 count)
1574f56d42cSAsias He {
158c229370aSIngo Molnar 	u8 *config_space = (u8 *)&ndev.config;
1594f56d42cSAsias He 
1604f56d42cSAsias He 	if (size != 1 || count != 1)
1614f56d42cSAsias He 		return false;
1624f56d42cSAsias He 
163b8f43678SSasha Levin 	if ((offset - VIRTIO_MSI_CONFIG_VECTOR) > sizeof(struct virtio_net_config))
1644542f276SCyrill Gorcunov 		pr_error("config offset is too big: %li", offset - VIRTIO_MSI_CONFIG_VECTOR);
1654f56d42cSAsias He 
166b8f43678SSasha Levin 	ioport__write8(data, config_space[offset - VIRTIO_MSI_CONFIG_VECTOR]);
1674f56d42cSAsias He 
1684f56d42cSAsias He 	return true;
1694f56d42cSAsias He }
1704f56d42cSAsias He 
1713d62dea6SSasha Levin static bool virtio_net_pci_io_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count)
1724f56d42cSAsias He {
17307f90696SSasha Levin 	unsigned long	offset	= port - ndev.base_addr;
1744f56d42cSAsias He 	bool		ret	= true;
1754f56d42cSAsias He 
176c229370aSIngo Molnar 	mutex_lock(&ndev.mutex);
1774f56d42cSAsias He 
1784f56d42cSAsias He 	switch (offset) {
1794f56d42cSAsias He 	case VIRTIO_PCI_HOST_FEATURES:
180c229370aSIngo Molnar 		ioport__write32(data, ndev.host_features);
1814f56d42cSAsias He 		break;
1824f56d42cSAsias He 	case VIRTIO_PCI_GUEST_FEATURES:
1834f56d42cSAsias He 		ret = false;
1844f56d42cSAsias He 		break;
1854f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_PFN:
186c229370aSIngo Molnar 		ioport__write32(data, ndev.vqs[ndev.queue_selector].pfn);
1874f56d42cSAsias He 		break;
1884f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_NUM:
1894f56d42cSAsias He 		ioport__write16(data, VIRTIO_NET_QUEUE_SIZE);
1904f56d42cSAsias He 		break;
1914f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_SEL:
1924f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_NOTIFY:
1934f56d42cSAsias He 		ret = false;
1944f56d42cSAsias He 		break;
1954f56d42cSAsias He 	case VIRTIO_PCI_STATUS:
196c229370aSIngo Molnar 		ioport__write8(data, ndev.status);
1974f56d42cSAsias He 		break;
1984f56d42cSAsias He 	case VIRTIO_PCI_ISR:
199c229370aSIngo Molnar 		ioport__write8(data, ndev.isr);
200c229370aSIngo Molnar 		kvm__irq_line(kvm, pci_header.irq_line, VIRTIO_IRQ_LOW);
201c229370aSIngo Molnar 		ndev.isr = VIRTIO_IRQ_LOW;
2024f56d42cSAsias He 		break;
2034f56d42cSAsias He 	case VIRTIO_MSI_CONFIG_VECTOR:
204c229370aSIngo Molnar 		ioport__write16(data, ndev.config_vector);
2054f56d42cSAsias He 		break;
2064f56d42cSAsias He 	default:
2074f56d42cSAsias He 		ret = virtio_net_pci_io_device_specific_in(data, offset, size, count);
2084f56d42cSAsias He 	};
2094f56d42cSAsias He 
210c229370aSIngo Molnar 	mutex_unlock(&ndev.mutex);
2114f56d42cSAsias He 
2124f56d42cSAsias He 	return ret;
2134f56d42cSAsias He }
2144f56d42cSAsias He 
21543835ac9SSasha Levin static void virtio_net_handle_callback(struct kvm *kvm, u16 queue_index)
2164f56d42cSAsias He {
217407475bfSPekka Enberg 	switch (queue_index) {
218407475bfSPekka Enberg 	case VIRTIO_NET_TX_QUEUE: {
219c229370aSIngo Molnar 		mutex_lock(&ndev.io_tx_lock);
220c229370aSIngo Molnar 		pthread_cond_signal(&ndev.io_tx_cond);
221c229370aSIngo Molnar 		mutex_unlock(&ndev.io_tx_lock);
222407475bfSPekka Enberg 		break;
223407475bfSPekka Enberg 	}
224407475bfSPekka Enberg 	case VIRTIO_NET_RX_QUEUE: {
225c229370aSIngo Molnar 		mutex_lock(&ndev.io_rx_lock);
226c229370aSIngo Molnar 		pthread_cond_signal(&ndev.io_rx_cond);
227c229370aSIngo Molnar 		mutex_unlock(&ndev.io_rx_lock);
228407475bfSPekka Enberg 		break;
229407475bfSPekka Enberg 	}
230407475bfSPekka Enberg 	default:
2314542f276SCyrill Gorcunov 		pr_warning("Unknown queue index %u", queue_index);
232c4aa7c02SPekka Enberg 	}
2334f56d42cSAsias He }
2344f56d42cSAsias He 
2353d62dea6SSasha Levin static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count)
2364f56d42cSAsias He {
23707f90696SSasha Levin 	unsigned long	offset		= port - ndev.base_addr;
2384f56d42cSAsias He 	bool		ret		= true;
2394f56d42cSAsias He 
240c229370aSIngo Molnar 	mutex_lock(&ndev.mutex);
2414f56d42cSAsias He 
2424f56d42cSAsias He 	switch (offset) {
2434f56d42cSAsias He 	case VIRTIO_PCI_GUEST_FEATURES:
244c229370aSIngo Molnar 		ndev.guest_features	= ioport__read32(data);
2454f56d42cSAsias He 		break;
2464f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_PFN: {
2474f56d42cSAsias He 		struct virt_queue *queue;
2484f56d42cSAsias He 		void *p;
2494f56d42cSAsias He 
250c229370aSIngo Molnar 		assert(ndev.queue_selector < VIRTIO_NET_NUM_QUEUES);
2514f56d42cSAsias He 
252c229370aSIngo Molnar 		queue			= &ndev.vqs[ndev.queue_selector];
2534f56d42cSAsias He 		queue->pfn		= ioport__read32(data);
25443835ac9SSasha Levin 		p			= guest_pfn_to_host(kvm, queue->pfn);
2554f56d42cSAsias He 
256b8f43678SSasha Levin 		vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
2574f56d42cSAsias He 
2584f56d42cSAsias He 		break;
2594f56d42cSAsias He 	}
2604f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_SEL:
261c229370aSIngo Molnar 		ndev.queue_selector	= ioport__read16(data);
2624f56d42cSAsias He 		break;
2634f56d42cSAsias He 	case VIRTIO_PCI_QUEUE_NOTIFY: {
2643fdf659dSSasha Levin 		u16 queue_index;
265c229370aSIngo Molnar 
2664f56d42cSAsias He 		queue_index		= ioport__read16(data);
26743835ac9SSasha Levin 		virtio_net_handle_callback(kvm, queue_index);
2684f56d42cSAsias He 		break;
2694f56d42cSAsias He 	}
2704f56d42cSAsias He 	case VIRTIO_PCI_STATUS:
271c229370aSIngo Molnar 		ndev.status		= ioport__read8(data);
2724f56d42cSAsias He 		break;
2734f56d42cSAsias He 	case VIRTIO_MSI_CONFIG_VECTOR:
274c229370aSIngo Molnar 		ndev.config_vector	= VIRTIO_MSI_NO_VECTOR;
2754f56d42cSAsias He 		break;
2764f56d42cSAsias He 	case VIRTIO_MSI_QUEUE_VECTOR:
2774f56d42cSAsias He 		break;
2784f56d42cSAsias He 	default:
2794f56d42cSAsias He 		ret			= false;
2804f56d42cSAsias He 	};
2814f56d42cSAsias He 
282c229370aSIngo Molnar 	mutex_unlock(&ndev.mutex);
283407475bfSPekka Enberg 
2844f56d42cSAsias He 	return ret;
2854f56d42cSAsias He }
2864f56d42cSAsias He 
28727ab67f5SSasha Levin static void ioevent_callback(struct kvm *kvm, void *param)
28827ab67f5SSasha Levin {
289926e0e2fSIngo Molnar 	virtio_net_handle_callback(kvm, (u64)(long)param);
29027ab67f5SSasha Levin }
29127ab67f5SSasha Levin 
2924f56d42cSAsias He static struct ioport_operations virtio_net_io_ops = {
2934f56d42cSAsias He 	.io_in	= virtio_net_pci_io_in,
2944f56d42cSAsias He 	.io_out	= virtio_net_pci_io_out,
2954f56d42cSAsias He };
2964f56d42cSAsias He 
2973b02f580SSasha Levin static bool virtio_net__tap_init(const struct virtio_net_parameters *params)
2984f56d42cSAsias He {
299cb7202c1SSasha Levin 	int sock = socket(AF_INET, SOCK_STREAM, 0);
300246c8347SAsias He 	int i, pid, status, offload, hdr_len;
301cb7202c1SSasha Levin 	struct sockaddr_in sin = {0};
302246c8347SAsias He 	struct ifreq ifr;
3034f56d42cSAsias He 
304a4e724ddSSasha Levin 	for (i = 0 ; i < 6 ; i++)
305c229370aSIngo Molnar 		ndev.config.mac[i] = params->guest_mac[i];
306a4e724ddSSasha Levin 
307c229370aSIngo Molnar 	ndev.tap_fd = open("/dev/net/tun", O_RDWR);
308c229370aSIngo Molnar 	if (ndev.tap_fd < 0) {
3094542f276SCyrill Gorcunov 		pr_warning("Unable to open /dev/net/tun");
3103b02f580SSasha Levin 		goto fail;
3113b02f580SSasha Levin 	}
3124f56d42cSAsias He 
3134f56d42cSAsias He 	memset(&ifr, 0, sizeof(ifr));
314246c8347SAsias He 	ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
315c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETIFF, &ifr) < 0) {
3164542f276SCyrill Gorcunov 		pr_warning("Config tap device error. Are you root?");
3173b02f580SSasha Levin 		goto fail;
3183b02f580SSasha Levin 	}
3194f56d42cSAsias He 
320c229370aSIngo Molnar 	strncpy(ndev.tap_name, ifr.ifr_name, sizeof(ndev.tap_name));
3214f56d42cSAsias He 
322c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETNOCSUM, 1) < 0) {
3234542f276SCyrill Gorcunov 		pr_warning("Config tap device TUNSETNOCSUM error");
324246c8347SAsias He 		goto fail;
325246c8347SAsias He 	}
326246c8347SAsias He 
327246c8347SAsias He 	hdr_len = sizeof(struct virtio_net_hdr);
328c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETVNETHDRSZ, &hdr_len) < 0) {
3294542f276SCyrill Gorcunov 		pr_warning("Config tap device TUNSETVNETHDRSZ error");
330246c8347SAsias He 		goto fail;
331246c8347SAsias He 	}
332246c8347SAsias He 
333246c8347SAsias He 	offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_UFO;
334c229370aSIngo Molnar 	if (ioctl(ndev.tap_fd, TUNSETOFFLOAD, offload) < 0) {
3354542f276SCyrill Gorcunov 		pr_warning("Config tap device TUNSETOFFLOAD error");
336246c8347SAsias He 		goto fail;
337246c8347SAsias He 	}
3384f56d42cSAsias He 
33973b7d038SAmos Kong 	if (strcmp(params->script, "none")) {
34073b7d038SAmos Kong 		pid = fork();
34173b7d038SAmos Kong 		if (pid == 0) {
342c229370aSIngo Molnar 			execl(params->script, params->script, ndev.tap_name, NULL);
34373b7d038SAmos Kong 			_exit(1);
34473b7d038SAmos Kong 		} else {
34573b7d038SAmos Kong 			waitpid(pid, &status, 0);
34673b7d038SAmos Kong 			if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
3474542f276SCyrill Gorcunov 				pr_warning("Fail to setup tap by %s", params->script);
34873b7d038SAmos Kong 				goto fail;
34973b7d038SAmos Kong 			}
35073b7d038SAmos Kong 		}
35173b7d038SAmos Kong 	} else {
352cb7202c1SSasha Levin 		memset(&ifr, 0, sizeof(ifr));
353c229370aSIngo Molnar 		strncpy(ifr.ifr_name, ndev.tap_name, sizeof(ndev.tap_name));
354bdfcfca6SSasha Levin 		sin.sin_addr.s_addr = inet_addr(params->host_ip);
355cb7202c1SSasha Levin 		memcpy(&(ifr.ifr_addr), &sin, sizeof(ifr.ifr_addr));
356cb7202c1SSasha Levin 		ifr.ifr_addr.sa_family = AF_INET;
3573b02f580SSasha Levin 		if (ioctl(sock, SIOCSIFADDR, &ifr) < 0) {
3584542f276SCyrill Gorcunov 			pr_warning("Could not set ip address on tap device");
3593b02f580SSasha Levin 			goto fail;
3603b02f580SSasha Levin 		}
36173b7d038SAmos Kong 	}
362cb7202c1SSasha Levin 
363cb7202c1SSasha Levin 	memset(&ifr, 0, sizeof(ifr));
364c229370aSIngo Molnar 	strncpy(ifr.ifr_name, ndev.tap_name, sizeof(ndev.tap_name));
365cb7202c1SSasha Levin 	ioctl(sock, SIOCGIFFLAGS, &ifr);
366cb7202c1SSasha Levin 	ifr.ifr_flags |= IFF_UP | IFF_RUNNING;
367cb7202c1SSasha Levin 	if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0)
3684542f276SCyrill Gorcunov 		pr_warning("Could not bring tap device up");
369cb7202c1SSasha Levin 
370cb7202c1SSasha Levin 	close(sock);
3713b02f580SSasha Levin 
3723b02f580SSasha Levin 	return 1;
3733b02f580SSasha Levin 
3743b02f580SSasha Levin fail:
3753b02f580SSasha Levin 	if (sock >= 0)
3763b02f580SSasha Levin 		close(sock);
377c229370aSIngo Molnar 	if (ndev.tap_fd >= 0)
378c229370aSIngo Molnar 		close(ndev.tap_fd);
3793b02f580SSasha Levin 
3803b02f580SSasha Levin 	return 0;
3814f56d42cSAsias He }
3824f56d42cSAsias He 
38343835ac9SSasha Levin static void virtio_net__io_thread_init(struct kvm *kvm)
384c4aa7c02SPekka Enberg {
385c229370aSIngo Molnar 	pthread_mutex_init(&ndev.io_rx_lock, NULL);
386c229370aSIngo Molnar 	pthread_cond_init(&ndev.io_tx_cond, NULL);
387c4aa7c02SPekka Enberg 
388c229370aSIngo Molnar 	pthread_mutex_init(&ndev.io_rx_lock, NULL);
389c229370aSIngo Molnar 	pthread_cond_init(&ndev.io_tx_cond, NULL);
390c4aa7c02SPekka Enberg 
391c229370aSIngo Molnar 	pthread_create(&ndev.io_rx_thread, NULL, virtio_net_rx_thread, (void *)kvm);
392c229370aSIngo Molnar 	pthread_create(&ndev.io_tx_thread, NULL, virtio_net_tx_thread, (void *)kvm);
393c4aa7c02SPekka Enberg }
394c4aa7c02SPekka Enberg 
395bdfcfca6SSasha Levin void virtio_net__init(const struct virtio_net_parameters *params)
3964f56d42cSAsias He {
3973b02f580SSasha Levin 	if (virtio_net__tap_init(params)) {
3982449f6e3SSasha Levin 		u8 dev, line, pin;
39907f90696SSasha Levin 		u16 net_base_addr;
40027ab67f5SSasha Levin 		u64 i;
40127ab67f5SSasha Levin 		struct ioevent ioevent;
4022449f6e3SSasha Levin 
4030a7ab0c6SSasha Levin 		if (irq__register_device(VIRTIO_ID_NET, &dev, &pin, &line) < 0)
4042449f6e3SSasha Levin 			return;
4052449f6e3SSasha Levin 
406c229370aSIngo Molnar 		pci_header.irq_pin	= pin;
407c229370aSIngo Molnar 		pci_header.irq_line	= line;
40807f90696SSasha Levin 		net_base_addr		= ioport__register(IOPORT_EMPTY, &virtio_net_io_ops, IOPORT_SIZE, NULL);
40907f90696SSasha Levin 		pci_header.bar[0]	= net_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
41007f90696SSasha Levin 		ndev.base_addr		= net_base_addr;
41107f90696SSasha Levin 
412c229370aSIngo Molnar 		pci__register(&pci_header, dev);
413c4aa7c02SPekka Enberg 
41443835ac9SSasha Levin 		virtio_net__io_thread_init(params->kvm);
41527ab67f5SSasha Levin 
41627ab67f5SSasha Levin 		for (i = 0; i < VIRTIO_NET_NUM_QUEUES; i++) {
41727ab67f5SSasha Levin 			ioevent = (struct ioevent) {
41827ab67f5SSasha Levin 				.io_addr		= net_base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
41927ab67f5SSasha Levin 				.io_len			= sizeof(u16),
42027ab67f5SSasha Levin 				.fn			= ioevent_callback,
42127ab67f5SSasha Levin 				.datamatch		= i,
422926e0e2fSIngo Molnar 				.fn_ptr			= (void *)(long)i,
42327ab67f5SSasha Levin 				.fn_kvm			= params->kvm,
42427ab67f5SSasha Levin 				.fd			= eventfd(0, 0),
42527ab67f5SSasha Levin 			};
42627ab67f5SSasha Levin 
42727ab67f5SSasha Levin 			ioeventfd__add_event(&ioevent);
42827ab67f5SSasha Levin 		}
4294f56d42cSAsias He 	}
4303b02f580SSasha Levin }
431