131638bcaSCyrill Gorcunov #include "kvm/virtio-pci-dev.h" 2b5ee1ea7SAsias He #include "kvm/virtio-net.h" 34f56d42cSAsias He #include "kvm/virtio.h" 44f56d42cSAsias He #include "kvm/ioport.h" 54f56d42cSAsias He #include "kvm/types.h" 64f56d42cSAsias He #include "kvm/mutex.h" 74f56d42cSAsias He #include "kvm/util.h" 84f56d42cSAsias He #include "kvm/kvm.h" 94f56d42cSAsias He #include "kvm/pci.h" 102449f6e3SSasha Levin #include "kvm/irq.h" 11b5ee1ea7SAsias He #include "kvm/uip.h" 1227ab67f5SSasha Levin #include "kvm/ioeventfd.h" 134f56d42cSAsias He 144f56d42cSAsias He #include <linux/virtio_net.h> 154f56d42cSAsias He #include <linux/if_tun.h> 16c229370aSIngo Molnar 17c229370aSIngo Molnar #include <arpa/inet.h> 184f56d42cSAsias He #include <net/if.h> 19c229370aSIngo Molnar 20c229370aSIngo Molnar #include <unistd.h> 214f56d42cSAsias He #include <assert.h> 224f56d42cSAsias He #include <fcntl.h> 23c229370aSIngo Molnar 24cb7202c1SSasha Levin #include <sys/socket.h> 25c229370aSIngo Molnar #include <sys/ioctl.h> 26c229370aSIngo Molnar #include <sys/types.h> 2773b7d038SAmos Kong #include <sys/wait.h> 284f56d42cSAsias He 294f56d42cSAsias He #define VIRTIO_NET_QUEUE_SIZE 128 304f56d42cSAsias He #define VIRTIO_NET_NUM_QUEUES 2 314f56d42cSAsias He #define VIRTIO_NET_RX_QUEUE 0 324f56d42cSAsias He #define VIRTIO_NET_TX_QUEUE 1 334f56d42cSAsias He 34c229370aSIngo Molnar static struct pci_device_header pci_header = { 352449f6e3SSasha Levin .vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET, 362449f6e3SSasha Levin .device_id = PCI_DEVICE_ID_VIRTIO_NET, 372449f6e3SSasha Levin .header_type = PCI_HEADER_TYPE_NORMAL, 382449f6e3SSasha Levin .revision_id = 0, 392449f6e3SSasha Levin .class = 0x020000, 402449f6e3SSasha Levin .subsys_vendor_id = PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET, 410a7ab0c6SSasha Levin .subsys_id = VIRTIO_ID_NET, 422449f6e3SSasha Levin }; 432449f6e3SSasha Levin 44*b4fdde6dSAsias He struct net_dev; 45*b4fdde6dSAsias He 46*b4fdde6dSAsias He struct net_dev_operations { 47*b4fdde6dSAsias He int (*rx)(struct iovec *iov, u16 in, struct net_dev *ndev); 48*b4fdde6dSAsias He int (*tx)(struct iovec *iov, u16 in, struct net_dev *ndev); 49*b4fdde6dSAsias He }; 50*b4fdde6dSAsias He 518626798bSAsias He struct net_dev { 524f56d42cSAsias He pthread_mutex_t mutex; 534f56d42cSAsias He 544f56d42cSAsias He struct virt_queue vqs[VIRTIO_NET_NUM_QUEUES]; 55c229370aSIngo Molnar struct virtio_net_config config; 563fdf659dSSasha Levin u32 host_features; 573fdf659dSSasha Levin u32 guest_features; 583fdf659dSSasha Levin u16 config_vector; 593fdf659dSSasha Levin u8 status; 607f5ffaf5SAsias He u8 isr; 613fdf659dSSasha Levin u16 queue_selector; 6207f90696SSasha Levin u16 base_addr; 634f56d42cSAsias He 64c4aa7c02SPekka Enberg pthread_t io_rx_thread; 65c229370aSIngo Molnar pthread_mutex_t io_rx_lock; 66c4aa7c02SPekka Enberg pthread_cond_t io_rx_cond; 67c4aa7c02SPekka Enberg 68c4aa7c02SPekka Enberg pthread_t io_tx_thread; 69c229370aSIngo Molnar pthread_mutex_t io_tx_lock; 70c4aa7c02SPekka Enberg pthread_cond_t io_tx_cond; 71c4aa7c02SPekka Enberg 724f56d42cSAsias He int tap_fd; 734f56d42cSAsias He char tap_name[IFNAMSIZ]; 74bb1a32f1SAsias He 75bb1a32f1SAsias He int mode; 76bb1a32f1SAsias He 77b5ee1ea7SAsias He struct uip_info info; 78*b4fdde6dSAsias He struct net_dev_operations *ops; 794f56d42cSAsias He }; 804f56d42cSAsias He 818626798bSAsias He static struct net_dev ndev = { 824f56d42cSAsias He .mutex = PTHREAD_MUTEX_INITIALIZER, 834f56d42cSAsias He 84c229370aSIngo Molnar .config = { 85a0db70d9SAsias He .mac = {0x00, 0x15, 0x15, 0x15, 0x15, 0x15}, 864f56d42cSAsias He .status = VIRTIO_NET_S_LINK_UP, 874f56d42cSAsias He }, 88407475bfSPekka Enberg .host_features = 1UL << VIRTIO_NET_F_MAC 89407475bfSPekka Enberg | 1UL << VIRTIO_NET_F_CSUM 90407475bfSPekka Enberg | 1UL << VIRTIO_NET_F_HOST_UFO 91407475bfSPekka Enberg | 1UL << VIRTIO_NET_F_HOST_TSO4 92407475bfSPekka Enberg | 1UL << VIRTIO_NET_F_HOST_TSO6 93407475bfSPekka Enberg | 1UL << VIRTIO_NET_F_GUEST_UFO 94407475bfSPekka Enberg | 1UL << VIRTIO_NET_F_GUEST_TSO4 95407475bfSPekka Enberg | 1UL << VIRTIO_NET_F_GUEST_TSO6, 96b5ee1ea7SAsias He .info = { 97b5ee1ea7SAsias He .host_mac.addr = {0x00, 0x01, 0x01, 0x01, 0x01, 0x01}, 98b5ee1ea7SAsias He .guest_mac.addr = {0x00, 0x15, 0x15, 0x15, 0x15, 0x15}, 99b5ee1ea7SAsias He .host_ip = 0xc0a82101, 100b5ee1ea7SAsias He .buf_nr = 20, 101b5ee1ea7SAsias He } 1024f56d42cSAsias He }; 1034f56d42cSAsias He 104c4aa7c02SPekka Enberg static void *virtio_net_rx_thread(void *p) 1054f56d42cSAsias He { 1064f56d42cSAsias He struct iovec iov[VIRTIO_NET_QUEUE_SIZE]; 1074f56d42cSAsias He struct virt_queue *vq; 10843835ac9SSasha Levin struct kvm *kvm; 1093fdf659dSSasha Levin u16 out, in; 1103fdf659dSSasha Levin u16 head; 1114f56d42cSAsias He int len; 1124f56d42cSAsias He 11343835ac9SSasha Levin kvm = p; 114c229370aSIngo Molnar vq = &ndev.vqs[VIRTIO_NET_RX_QUEUE]; 115c4aa7c02SPekka Enberg 116c4aa7c02SPekka Enberg while (1) { 117b5ee1ea7SAsias He 118c229370aSIngo Molnar mutex_lock(&ndev.io_rx_lock); 119c4aa7c02SPekka Enberg if (!virt_queue__available(vq)) 120c229370aSIngo Molnar pthread_cond_wait(&ndev.io_rx_cond, &ndev.io_rx_lock); 121c229370aSIngo Molnar mutex_unlock(&ndev.io_rx_lock); 1224f56d42cSAsias He 1234f56d42cSAsias He while (virt_queue__available(vq)) { 124b5ee1ea7SAsias He 12543835ac9SSasha Levin head = virt_queue__get_iov(vq, iov, &out, &in, kvm); 126b5ee1ea7SAsias He 127*b4fdde6dSAsias He len = ndev.ops->rx(iov, in, &ndev); 128b5ee1ea7SAsias He 129246c8347SAsias He virt_queue__set_used_elem(vq, head, len); 1307f5ffaf5SAsias He 131c4aa7c02SPekka Enberg /* We should interrupt guest right now, otherwise latency is huge. */ 132c229370aSIngo Molnar virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm); 1334f56d42cSAsias He } 1344f56d42cSAsias He 135c4aa7c02SPekka Enberg } 136c4aa7c02SPekka Enberg 137c4aa7c02SPekka Enberg pthread_exit(NULL); 138c4aa7c02SPekka Enberg return NULL; 139c4aa7c02SPekka Enberg 140c4aa7c02SPekka Enberg } 141c4aa7c02SPekka Enberg 142c4aa7c02SPekka Enberg static void *virtio_net_tx_thread(void *p) 1434f56d42cSAsias He { 1444f56d42cSAsias He struct iovec iov[VIRTIO_NET_QUEUE_SIZE]; 1454f56d42cSAsias He struct virt_queue *vq; 14643835ac9SSasha Levin struct kvm *kvm; 1473fdf659dSSasha Levin u16 out, in; 1483fdf659dSSasha Levin u16 head; 1494f56d42cSAsias He int len; 1504f56d42cSAsias He 15143835ac9SSasha Levin kvm = p; 152c229370aSIngo Molnar vq = &ndev.vqs[VIRTIO_NET_TX_QUEUE]; 153c4aa7c02SPekka Enberg 154c4aa7c02SPekka Enberg while (1) { 155c229370aSIngo Molnar mutex_lock(&ndev.io_tx_lock); 156c4aa7c02SPekka Enberg if (!virt_queue__available(vq)) 157c229370aSIngo Molnar pthread_cond_wait(&ndev.io_tx_cond, &ndev.io_tx_lock); 158c229370aSIngo Molnar mutex_unlock(&ndev.io_tx_lock); 1594f56d42cSAsias He 1604f56d42cSAsias He while (virt_queue__available(vq)) { 161b5ee1ea7SAsias He 16243835ac9SSasha Levin head = virt_queue__get_iov(vq, iov, &out, &in, kvm); 163b5ee1ea7SAsias He 164*b4fdde6dSAsias He len = ndev.ops->tx(iov, out, &ndev); 165b5ee1ea7SAsias He 1664f56d42cSAsias He virt_queue__set_used_elem(vq, head, len); 1674f56d42cSAsias He } 1684f56d42cSAsias He 169c229370aSIngo Molnar virt_queue__trigger_irq(vq, pci_header.irq_line, &ndev.isr, kvm); 1707f5ffaf5SAsias He 1714f56d42cSAsias He } 1724f56d42cSAsias He 173c4aa7c02SPekka Enberg pthread_exit(NULL); 174407475bfSPekka Enberg 175c4aa7c02SPekka Enberg return NULL; 176c4aa7c02SPekka Enberg 177c4aa7c02SPekka Enberg } 178407475bfSPekka Enberg 1793fdf659dSSasha Levin static bool virtio_net_pci_io_device_specific_in(void *data, unsigned long offset, int size, u32 count) 1804f56d42cSAsias He { 181c229370aSIngo Molnar u8 *config_space = (u8 *)&ndev.config; 1824f56d42cSAsias He 1834f56d42cSAsias He if (size != 1 || count != 1) 1844f56d42cSAsias He return false; 1854f56d42cSAsias He 186b8f43678SSasha Levin if ((offset - VIRTIO_MSI_CONFIG_VECTOR) > sizeof(struct virtio_net_config)) 1874542f276SCyrill Gorcunov pr_error("config offset is too big: %li", offset - VIRTIO_MSI_CONFIG_VECTOR); 1884f56d42cSAsias He 189b8f43678SSasha Levin ioport__write8(data, config_space[offset - VIRTIO_MSI_CONFIG_VECTOR]); 1904f56d42cSAsias He 1914f56d42cSAsias He return true; 1924f56d42cSAsias He } 1934f56d42cSAsias He 1943d62dea6SSasha Levin static bool virtio_net_pci_io_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count) 1954f56d42cSAsias He { 19607f90696SSasha Levin unsigned long offset = port - ndev.base_addr; 1974f56d42cSAsias He bool ret = true; 1984f56d42cSAsias He 199c229370aSIngo Molnar mutex_lock(&ndev.mutex); 2004f56d42cSAsias He 2014f56d42cSAsias He switch (offset) { 2024f56d42cSAsias He case VIRTIO_PCI_HOST_FEATURES: 203c229370aSIngo Molnar ioport__write32(data, ndev.host_features); 2044f56d42cSAsias He break; 2054f56d42cSAsias He case VIRTIO_PCI_GUEST_FEATURES: 2064f56d42cSAsias He ret = false; 2074f56d42cSAsias He break; 2084f56d42cSAsias He case VIRTIO_PCI_QUEUE_PFN: 209c229370aSIngo Molnar ioport__write32(data, ndev.vqs[ndev.queue_selector].pfn); 2104f56d42cSAsias He break; 2114f56d42cSAsias He case VIRTIO_PCI_QUEUE_NUM: 2124f56d42cSAsias He ioport__write16(data, VIRTIO_NET_QUEUE_SIZE); 2134f56d42cSAsias He break; 2144f56d42cSAsias He case VIRTIO_PCI_QUEUE_SEL: 2154f56d42cSAsias He case VIRTIO_PCI_QUEUE_NOTIFY: 2164f56d42cSAsias He ret = false; 2174f56d42cSAsias He break; 2184f56d42cSAsias He case VIRTIO_PCI_STATUS: 219c229370aSIngo Molnar ioport__write8(data, ndev.status); 2204f56d42cSAsias He break; 2214f56d42cSAsias He case VIRTIO_PCI_ISR: 222c229370aSIngo Molnar ioport__write8(data, ndev.isr); 223c229370aSIngo Molnar kvm__irq_line(kvm, pci_header.irq_line, VIRTIO_IRQ_LOW); 224c229370aSIngo Molnar ndev.isr = VIRTIO_IRQ_LOW; 2254f56d42cSAsias He break; 2264f56d42cSAsias He case VIRTIO_MSI_CONFIG_VECTOR: 227c229370aSIngo Molnar ioport__write16(data, ndev.config_vector); 2284f56d42cSAsias He break; 2294f56d42cSAsias He default: 2304f56d42cSAsias He ret = virtio_net_pci_io_device_specific_in(data, offset, size, count); 2314f56d42cSAsias He }; 2324f56d42cSAsias He 233c229370aSIngo Molnar mutex_unlock(&ndev.mutex); 2344f56d42cSAsias He 2354f56d42cSAsias He return ret; 2364f56d42cSAsias He } 2374f56d42cSAsias He 23843835ac9SSasha Levin static void virtio_net_handle_callback(struct kvm *kvm, u16 queue_index) 2394f56d42cSAsias He { 240407475bfSPekka Enberg switch (queue_index) { 241b5ee1ea7SAsias He case VIRTIO_NET_TX_QUEUE: 242c229370aSIngo Molnar mutex_lock(&ndev.io_tx_lock); 243c229370aSIngo Molnar pthread_cond_signal(&ndev.io_tx_cond); 244c229370aSIngo Molnar mutex_unlock(&ndev.io_tx_lock); 245407475bfSPekka Enberg break; 246b5ee1ea7SAsias He case VIRTIO_NET_RX_QUEUE: 247c229370aSIngo Molnar mutex_lock(&ndev.io_rx_lock); 248c229370aSIngo Molnar pthread_cond_signal(&ndev.io_rx_cond); 249c229370aSIngo Molnar mutex_unlock(&ndev.io_rx_lock); 250407475bfSPekka Enberg break; 251407475bfSPekka Enberg default: 2524542f276SCyrill Gorcunov pr_warning("Unknown queue index %u", queue_index); 253c4aa7c02SPekka Enberg } 2544f56d42cSAsias He } 2554f56d42cSAsias He 2563d62dea6SSasha Levin static bool virtio_net_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count) 2574f56d42cSAsias He { 25807f90696SSasha Levin unsigned long offset = port - ndev.base_addr; 2594f56d42cSAsias He bool ret = true; 2604f56d42cSAsias He 261c229370aSIngo Molnar mutex_lock(&ndev.mutex); 2624f56d42cSAsias He 2634f56d42cSAsias He switch (offset) { 2644f56d42cSAsias He case VIRTIO_PCI_GUEST_FEATURES: 265c229370aSIngo Molnar ndev.guest_features = ioport__read32(data); 2664f56d42cSAsias He break; 2674f56d42cSAsias He case VIRTIO_PCI_QUEUE_PFN: { 2684f56d42cSAsias He struct virt_queue *queue; 2694f56d42cSAsias He void *p; 2704f56d42cSAsias He 271c229370aSIngo Molnar assert(ndev.queue_selector < VIRTIO_NET_NUM_QUEUES); 2724f56d42cSAsias He 273c229370aSIngo Molnar queue = &ndev.vqs[ndev.queue_selector]; 2744f56d42cSAsias He queue->pfn = ioport__read32(data); 27543835ac9SSasha Levin p = guest_pfn_to_host(kvm, queue->pfn); 2764f56d42cSAsias He 277b8f43678SSasha Levin vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN); 2784f56d42cSAsias He 2794f56d42cSAsias He break; 2804f56d42cSAsias He } 2814f56d42cSAsias He case VIRTIO_PCI_QUEUE_SEL: 282c229370aSIngo Molnar ndev.queue_selector = ioport__read16(data); 2834f56d42cSAsias He break; 2844f56d42cSAsias He case VIRTIO_PCI_QUEUE_NOTIFY: { 2853fdf659dSSasha Levin u16 queue_index; 286c229370aSIngo Molnar 2874f56d42cSAsias He queue_index = ioport__read16(data); 28843835ac9SSasha Levin virtio_net_handle_callback(kvm, queue_index); 2894f56d42cSAsias He break; 2904f56d42cSAsias He } 2914f56d42cSAsias He case VIRTIO_PCI_STATUS: 292c229370aSIngo Molnar ndev.status = ioport__read8(data); 2934f56d42cSAsias He break; 2944f56d42cSAsias He case VIRTIO_MSI_CONFIG_VECTOR: 295c229370aSIngo Molnar ndev.config_vector = VIRTIO_MSI_NO_VECTOR; 2964f56d42cSAsias He break; 2974f56d42cSAsias He case VIRTIO_MSI_QUEUE_VECTOR: 2984f56d42cSAsias He break; 2994f56d42cSAsias He default: 3004f56d42cSAsias He ret = false; 3014f56d42cSAsias He }; 3024f56d42cSAsias He 303c229370aSIngo Molnar mutex_unlock(&ndev.mutex); 304407475bfSPekka Enberg 3054f56d42cSAsias He return ret; 3064f56d42cSAsias He } 3074f56d42cSAsias He 30827ab67f5SSasha Levin static void ioevent_callback(struct kvm *kvm, void *param) 30927ab67f5SSasha Levin { 310926e0e2fSIngo Molnar virtio_net_handle_callback(kvm, (u64)(long)param); 31127ab67f5SSasha Levin } 31227ab67f5SSasha Levin 3134f56d42cSAsias He static struct ioport_operations virtio_net_io_ops = { 3144f56d42cSAsias He .io_in = virtio_net_pci_io_in, 3154f56d42cSAsias He .io_out = virtio_net_pci_io_out, 3164f56d42cSAsias He }; 3174f56d42cSAsias He 3183b02f580SSasha Levin static bool virtio_net__tap_init(const struct virtio_net_parameters *params) 3194f56d42cSAsias He { 320cb7202c1SSasha Levin int sock = socket(AF_INET, SOCK_STREAM, 0); 321246c8347SAsias He int i, pid, status, offload, hdr_len; 322cb7202c1SSasha Levin struct sockaddr_in sin = {0}; 323246c8347SAsias He struct ifreq ifr; 3244f56d42cSAsias He 325a4e724ddSSasha Levin for (i = 0 ; i < 6 ; i++) 326c229370aSIngo Molnar ndev.config.mac[i] = params->guest_mac[i]; 327a4e724ddSSasha Levin 328c229370aSIngo Molnar ndev.tap_fd = open("/dev/net/tun", O_RDWR); 329c229370aSIngo Molnar if (ndev.tap_fd < 0) { 3304542f276SCyrill Gorcunov pr_warning("Unable to open /dev/net/tun"); 3313b02f580SSasha Levin goto fail; 3323b02f580SSasha Levin } 3334f56d42cSAsias He 3344f56d42cSAsias He memset(&ifr, 0, sizeof(ifr)); 335246c8347SAsias He ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; 336c229370aSIngo Molnar if (ioctl(ndev.tap_fd, TUNSETIFF, &ifr) < 0) { 3374542f276SCyrill Gorcunov pr_warning("Config tap device error. Are you root?"); 3383b02f580SSasha Levin goto fail; 3393b02f580SSasha Levin } 3404f56d42cSAsias He 341c229370aSIngo Molnar strncpy(ndev.tap_name, ifr.ifr_name, sizeof(ndev.tap_name)); 3424f56d42cSAsias He 343c229370aSIngo Molnar if (ioctl(ndev.tap_fd, TUNSETNOCSUM, 1) < 0) { 3444542f276SCyrill Gorcunov pr_warning("Config tap device TUNSETNOCSUM error"); 345246c8347SAsias He goto fail; 346246c8347SAsias He } 347246c8347SAsias He 348246c8347SAsias He hdr_len = sizeof(struct virtio_net_hdr); 349c229370aSIngo Molnar if (ioctl(ndev.tap_fd, TUNSETVNETHDRSZ, &hdr_len) < 0) { 3504542f276SCyrill Gorcunov pr_warning("Config tap device TUNSETVNETHDRSZ error"); 351246c8347SAsias He goto fail; 352246c8347SAsias He } 353246c8347SAsias He 354246c8347SAsias He offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_UFO; 355c229370aSIngo Molnar if (ioctl(ndev.tap_fd, TUNSETOFFLOAD, offload) < 0) { 3564542f276SCyrill Gorcunov pr_warning("Config tap device TUNSETOFFLOAD error"); 357246c8347SAsias He goto fail; 358246c8347SAsias He } 3594f56d42cSAsias He 36073b7d038SAmos Kong if (strcmp(params->script, "none")) { 36173b7d038SAmos Kong pid = fork(); 36273b7d038SAmos Kong if (pid == 0) { 363c229370aSIngo Molnar execl(params->script, params->script, ndev.tap_name, NULL); 36473b7d038SAmos Kong _exit(1); 36573b7d038SAmos Kong } else { 36673b7d038SAmos Kong waitpid(pid, &status, 0); 36773b7d038SAmos Kong if (WIFEXITED(status) && WEXITSTATUS(status) != 0) { 3684542f276SCyrill Gorcunov pr_warning("Fail to setup tap by %s", params->script); 36973b7d038SAmos Kong goto fail; 37073b7d038SAmos Kong } 37173b7d038SAmos Kong } 37273b7d038SAmos Kong } else { 373cb7202c1SSasha Levin memset(&ifr, 0, sizeof(ifr)); 374c229370aSIngo Molnar strncpy(ifr.ifr_name, ndev.tap_name, sizeof(ndev.tap_name)); 375bdfcfca6SSasha Levin sin.sin_addr.s_addr = inet_addr(params->host_ip); 376cb7202c1SSasha Levin memcpy(&(ifr.ifr_addr), &sin, sizeof(ifr.ifr_addr)); 377cb7202c1SSasha Levin ifr.ifr_addr.sa_family = AF_INET; 3783b02f580SSasha Levin if (ioctl(sock, SIOCSIFADDR, &ifr) < 0) { 3794542f276SCyrill Gorcunov pr_warning("Could not set ip address on tap device"); 3803b02f580SSasha Levin goto fail; 3813b02f580SSasha Levin } 38273b7d038SAmos Kong } 383cb7202c1SSasha Levin 384cb7202c1SSasha Levin memset(&ifr, 0, sizeof(ifr)); 385c229370aSIngo Molnar strncpy(ifr.ifr_name, ndev.tap_name, sizeof(ndev.tap_name)); 386cb7202c1SSasha Levin ioctl(sock, SIOCGIFFLAGS, &ifr); 387cb7202c1SSasha Levin ifr.ifr_flags |= IFF_UP | IFF_RUNNING; 388cb7202c1SSasha Levin if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0) 3894542f276SCyrill Gorcunov pr_warning("Could not bring tap device up"); 390cb7202c1SSasha Levin 391cb7202c1SSasha Levin close(sock); 3923b02f580SSasha Levin 3933b02f580SSasha Levin return 1; 3943b02f580SSasha Levin 3953b02f580SSasha Levin fail: 3963b02f580SSasha Levin if (sock >= 0) 3973b02f580SSasha Levin close(sock); 398c229370aSIngo Molnar if (ndev.tap_fd >= 0) 399c229370aSIngo Molnar close(ndev.tap_fd); 4003b02f580SSasha Levin 4013b02f580SSasha Levin return 0; 4024f56d42cSAsias He } 4034f56d42cSAsias He 40443835ac9SSasha Levin static void virtio_net__io_thread_init(struct kvm *kvm) 405c4aa7c02SPekka Enberg { 406c229370aSIngo Molnar pthread_mutex_init(&ndev.io_rx_lock, NULL); 407c229370aSIngo Molnar pthread_cond_init(&ndev.io_tx_cond, NULL); 408c4aa7c02SPekka Enberg 409c229370aSIngo Molnar pthread_mutex_init(&ndev.io_rx_lock, NULL); 410c229370aSIngo Molnar pthread_cond_init(&ndev.io_tx_cond, NULL); 411c4aa7c02SPekka Enberg 412c229370aSIngo Molnar pthread_create(&ndev.io_rx_thread, NULL, virtio_net_rx_thread, (void *)kvm); 413c229370aSIngo Molnar pthread_create(&ndev.io_tx_thread, NULL, virtio_net_tx_thread, (void *)kvm); 414c4aa7c02SPekka Enberg } 415c4aa7c02SPekka Enberg 416*b4fdde6dSAsias He static inline int tap_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev) 417*b4fdde6dSAsias He { 418*b4fdde6dSAsias He return writev(ndev->tap_fd, iov, out); 419*b4fdde6dSAsias He } 420*b4fdde6dSAsias He 421*b4fdde6dSAsias He static inline int tap_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev) 422*b4fdde6dSAsias He { 423*b4fdde6dSAsias He return readv(ndev->tap_fd, iov, in); 424*b4fdde6dSAsias He } 425*b4fdde6dSAsias He 426*b4fdde6dSAsias He static inline int uip_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev) 427*b4fdde6dSAsias He { 428*b4fdde6dSAsias He return uip_tx(iov, out, &ndev->info); 429*b4fdde6dSAsias He } 430*b4fdde6dSAsias He 431*b4fdde6dSAsias He static inline int uip_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev) 432*b4fdde6dSAsias He { 433*b4fdde6dSAsias He return uip_rx(iov, in, &ndev->info); 434*b4fdde6dSAsias He } 435*b4fdde6dSAsias He 436*b4fdde6dSAsias He static struct net_dev_operations tap_ops = { 437*b4fdde6dSAsias He .rx = tap_ops_rx, 438*b4fdde6dSAsias He .tx = tap_ops_tx, 439*b4fdde6dSAsias He }; 440*b4fdde6dSAsias He 441*b4fdde6dSAsias He static struct net_dev_operations uip_ops = { 442*b4fdde6dSAsias He .rx = uip_ops_rx, 443*b4fdde6dSAsias He .tx = uip_ops_tx, 444*b4fdde6dSAsias He }; 445*b4fdde6dSAsias He 446bdfcfca6SSasha Levin void virtio_net__init(const struct virtio_net_parameters *params) 4474f56d42cSAsias He { 448b5ee1ea7SAsias He struct ioevent ioevent; 4492449f6e3SSasha Levin u8 dev, line, pin; 45007f90696SSasha Levin u16 net_base_addr; 451b5ee1ea7SAsias He int i; 4522449f6e3SSasha Levin 4530a7ab0c6SSasha Levin if (irq__register_device(VIRTIO_ID_NET, &dev, &pin, &line) < 0) 4542449f6e3SSasha Levin return; 4552449f6e3SSasha Levin 456c229370aSIngo Molnar pci_header.irq_pin = pin; 457c229370aSIngo Molnar pci_header.irq_line = line; 45807f90696SSasha Levin net_base_addr = ioport__register(IOPORT_EMPTY, &virtio_net_io_ops, IOPORT_SIZE, NULL); 45907f90696SSasha Levin pci_header.bar[0] = net_base_addr | PCI_BASE_ADDRESS_SPACE_IO; 46007f90696SSasha Levin ndev.base_addr = net_base_addr; 461c229370aSIngo Molnar pci__register(&pci_header, dev); 462c4aa7c02SPekka Enberg 463b5ee1ea7SAsias He ndev.mode = params->mode; 464*b4fdde6dSAsias He if (ndev.mode == NET_MODE_TAP) { 465b5ee1ea7SAsias He virtio_net__tap_init(params); 466*b4fdde6dSAsias He ndev.ops = &tap_ops; 467*b4fdde6dSAsias He } else { 468b5ee1ea7SAsias He uip_init(&ndev.info); 469*b4fdde6dSAsias He ndev.ops = &uip_ops; 470*b4fdde6dSAsias He } 471b5ee1ea7SAsias He 47243835ac9SSasha Levin virtio_net__io_thread_init(params->kvm); 47327ab67f5SSasha Levin 47427ab67f5SSasha Levin for (i = 0; i < VIRTIO_NET_NUM_QUEUES; i++) { 47527ab67f5SSasha Levin ioevent = (struct ioevent) { 47627ab67f5SSasha Levin .io_addr = net_base_addr + VIRTIO_PCI_QUEUE_NOTIFY, 47727ab67f5SSasha Levin .io_len = sizeof(u16), 47827ab67f5SSasha Levin .fn = ioevent_callback, 47927ab67f5SSasha Levin .datamatch = i, 480926e0e2fSIngo Molnar .fn_ptr = (void *)(long)i, 48127ab67f5SSasha Levin .fn_kvm = params->kvm, 48227ab67f5SSasha Levin .fd = eventfd(0, 0), 48327ab67f5SSasha Levin }; 48427ab67f5SSasha Levin 48527ab67f5SSasha Levin ioeventfd__add_event(&ioevent); 48627ab67f5SSasha Levin } 4874f56d42cSAsias He } 488