136f5dc91SSasha Levin #include "kvm/virtio-pci.h" 236f5dc91SSasha Levin 336f5dc91SSasha Levin #include "kvm/ioport.h" 436f5dc91SSasha Levin #include "kvm/kvm.h" 54123ca55SMarc Zyngier #include "kvm/kvm-cpu.h" 636f5dc91SSasha Levin #include "kvm/virtio-pci-dev.h" 736f5dc91SSasha Levin #include "kvm/irq.h" 836f5dc91SSasha Levin #include "kvm/virtio.h" 91599d724SSasha Levin #include "kvm/ioeventfd.h" 1036f5dc91SSasha Levin 1143c81c74SSasha Levin #include <sys/ioctl.h> 1236f5dc91SSasha Levin #include <linux/virtio_pci.h> 13aa73be70SMatt Evans #include <linux/byteorder.h> 1436f5dc91SSasha Levin #include <string.h> 1536f5dc91SSasha Levin 161599d724SSasha Levin static void virtio_pci__ioevent_callback(struct kvm *kvm, void *param) 171599d724SSasha Levin { 181599d724SSasha Levin struct virtio_pci_ioevent_param *ioeventfd = param; 1902eca50cSAsias He struct virtio_pci *vpci = ioeventfd->vdev->virtio; 201599d724SSasha Levin 2102eca50cSAsias He ioeventfd->vdev->ops->notify_vq(kvm, vpci->dev, ioeventfd->vq); 221599d724SSasha Levin } 231599d724SSasha Levin 2402eca50cSAsias He static int virtio_pci__init_ioeventfd(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 251599d724SSasha Levin { 261599d724SSasha Levin struct ioevent ioevent; 2702eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 2871ca0facSAndre Przywara int i, r, flags = 0; 29a463650cSWill Deacon int fds[2]; 301599d724SSasha Levin 311599d724SSasha Levin vpci->ioeventfds[vq] = (struct virtio_pci_ioevent_param) { 3202eca50cSAsias He .vdev = vdev, 331599d724SSasha Levin .vq = vq, 341599d724SSasha Levin }; 351599d724SSasha Levin 361599d724SSasha Levin ioevent = (struct ioevent) { 371599d724SSasha Levin .fn = virtio_pci__ioevent_callback, 381599d724SSasha Levin .fn_ptr = &vpci->ioeventfds[vq], 391599d724SSasha Levin .datamatch = vq, 401599d724SSasha Levin .fn_kvm = kvm, 411599d724SSasha Levin }; 421599d724SSasha Levin 43627d6874SAsias He /* 44a463650cSWill Deacon * Vhost will poll the eventfd in host kernel side, otherwise we 45a463650cSWill Deacon * need to poll in userspace. 46627d6874SAsias He */ 47a463650cSWill Deacon if (!vdev->use_vhost) 48a463650cSWill Deacon flags |= IOEVENTFD_FLAG_USER_POLL; 49a463650cSWill Deacon 50a463650cSWill Deacon /* ioport */ 51a463650cSWill Deacon ioevent.io_addr = vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY; 52a463650cSWill Deacon ioevent.io_len = sizeof(u16); 53a463650cSWill Deacon ioevent.fd = fds[0] = eventfd(0, 0); 5471ca0facSAndre Przywara r = ioeventfd__add_event(&ioevent, flags | IOEVENTFD_FLAG_PIO); 55ea6eeb1cSSasha Levin if (r) 56ea6eeb1cSSasha Levin return r; 571599d724SSasha Levin 58a463650cSWill Deacon /* mmio */ 59a463650cSWill Deacon ioevent.io_addr = vpci->mmio_addr + VIRTIO_PCI_QUEUE_NOTIFY; 60fe50bacbSAndreas Herrmann ioevent.io_len = sizeof(u16); 61a463650cSWill Deacon ioevent.fd = fds[1] = eventfd(0, 0); 62a463650cSWill Deacon r = ioeventfd__add_event(&ioevent, flags); 63a463650cSWill Deacon if (r) 64a463650cSWill Deacon goto free_ioport_evt; 65263b80e8SSasha Levin 66a463650cSWill Deacon if (vdev->ops->notify_vq_eventfd) 67a463650cSWill Deacon for (i = 0; i < 2; ++i) 68a463650cSWill Deacon vdev->ops->notify_vq_eventfd(kvm, vpci->dev, vq, 69a463650cSWill Deacon fds[i]); 701599d724SSasha Levin return 0; 71a463650cSWill Deacon 72a463650cSWill Deacon free_ioport_evt: 73a463650cSWill Deacon ioeventfd__del_event(vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY, vq); 74a463650cSWill Deacon return r; 751599d724SSasha Levin } 761599d724SSasha Levin 7706f48103SSasha Levin static inline bool virtio_pci__msix_enabled(struct virtio_pci *vpci) 7806f48103SSasha Levin { 79aa73be70SMatt Evans return vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_ENABLE); 8006f48103SSasha Levin } 8106f48103SSasha Levin 8202eca50cSAsias He static bool virtio_pci__specific_io_in(struct kvm *kvm, struct virtio_device *vdev, u16 port, 8336f5dc91SSasha Levin void *data, int size, int offset) 8436f5dc91SSasha Levin { 8536f5dc91SSasha Levin u32 config_offset; 8602eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 8706f48103SSasha Levin int type = virtio__get_dev_specific_field(offset - 20, 8806f48103SSasha Levin virtio_pci__msix_enabled(vpci), 891382aba0SSasha Levin &config_offset); 9036f5dc91SSasha Levin if (type == VIRTIO_PCI_O_MSIX) { 9136f5dc91SSasha Levin switch (offset) { 9236f5dc91SSasha Levin case VIRTIO_MSI_CONFIG_VECTOR: 9336f5dc91SSasha Levin ioport__write16(data, vpci->config_vector); 9436f5dc91SSasha Levin break; 9536f5dc91SSasha Levin case VIRTIO_MSI_QUEUE_VECTOR: 9636f5dc91SSasha Levin ioport__write16(data, vpci->vq_vector[vpci->queue_selector]); 9736f5dc91SSasha Levin break; 9836f5dc91SSasha Levin }; 9936f5dc91SSasha Levin 10036f5dc91SSasha Levin return true; 10136f5dc91SSasha Levin } else if (type == VIRTIO_PCI_O_CONFIG) { 10236f5dc91SSasha Levin u8 cfg; 10336f5dc91SSasha Levin 104c5ae742bSSasha Levin cfg = vdev->ops->get_config(kvm, vpci->dev)[config_offset]; 10536f5dc91SSasha Levin ioport__write8(data, cfg); 10636f5dc91SSasha Levin return true; 10736f5dc91SSasha Levin } 10836f5dc91SSasha Levin 10936f5dc91SSasha Levin return false; 11036f5dc91SSasha Levin } 11136f5dc91SSasha Levin 1124123ca55SMarc Zyngier static bool virtio_pci__io_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 11336f5dc91SSasha Levin { 11436f5dc91SSasha Levin unsigned long offset; 11536f5dc91SSasha Levin bool ret = true; 11602eca50cSAsias He struct virtio_device *vdev; 11736f5dc91SSasha Levin struct virtio_pci *vpci; 1184123ca55SMarc Zyngier struct kvm *kvm; 11936f5dc91SSasha Levin u32 val; 12036f5dc91SSasha Levin 1214123ca55SMarc Zyngier kvm = vcpu->kvm; 12202eca50cSAsias He vdev = ioport->priv; 12302eca50cSAsias He vpci = vdev->virtio; 124a463650cSWill Deacon offset = port - vpci->port_addr; 12536f5dc91SSasha Levin 12636f5dc91SSasha Levin switch (offset) { 12736f5dc91SSasha Levin case VIRTIO_PCI_HOST_FEATURES: 12802eca50cSAsias He val = vdev->ops->get_host_features(kvm, vpci->dev); 12936f5dc91SSasha Levin ioport__write32(data, val); 13036f5dc91SSasha Levin break; 13136f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_PFN: 13202eca50cSAsias He val = vdev->ops->get_pfn_vq(kvm, vpci->dev, vpci->queue_selector); 13336f5dc91SSasha Levin ioport__write32(data, val); 13436f5dc91SSasha Levin break; 13536f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_NUM: 13602eca50cSAsias He val = vdev->ops->get_size_vq(kvm, vpci->dev, vpci->queue_selector); 137657ee18bSMatt Evans ioport__write16(data, val); 13836f5dc91SSasha Levin break; 13936f5dc91SSasha Levin case VIRTIO_PCI_STATUS: 14036f5dc91SSasha Levin ioport__write8(data, vpci->status); 14136f5dc91SSasha Levin break; 14236f5dc91SSasha Levin case VIRTIO_PCI_ISR: 14336f5dc91SSasha Levin ioport__write8(data, vpci->isr); 144e9922aafSAndre Przywara kvm__irq_line(kvm, vpci->legacy_irq_line, VIRTIO_IRQ_LOW); 14536f5dc91SSasha Levin vpci->isr = VIRTIO_IRQ_LOW; 14636f5dc91SSasha Levin break; 14736f5dc91SSasha Levin default: 14802eca50cSAsias He ret = virtio_pci__specific_io_in(kvm, vdev, port, data, size, offset); 14936f5dc91SSasha Levin break; 15036f5dc91SSasha Levin }; 15136f5dc91SSasha Levin 15236f5dc91SSasha Levin return ret; 15336f5dc91SSasha Levin } 15436f5dc91SSasha Levin 1556518065aSAndre Przywara static void update_msix_map(struct virtio_pci *vpci, 1566518065aSAndre Przywara struct msix_table *msix_entry, u32 vecnum) 1576518065aSAndre Przywara { 1586518065aSAndre Przywara u32 gsi, i; 1596518065aSAndre Przywara 1606518065aSAndre Przywara /* Find the GSI number used for that vector */ 1616518065aSAndre Przywara if (vecnum == vpci->config_vector) { 1626518065aSAndre Przywara gsi = vpci->config_gsi; 1636518065aSAndre Przywara } else { 1646518065aSAndre Przywara for (i = 0; i < VIRTIO_PCI_MAX_VQ; i++) 1656518065aSAndre Przywara if (vpci->vq_vector[i] == vecnum) 1666518065aSAndre Przywara break; 1676518065aSAndre Przywara if (i == VIRTIO_PCI_MAX_VQ) 1686518065aSAndre Przywara return; 1696518065aSAndre Przywara gsi = vpci->gsis[i]; 1706518065aSAndre Przywara } 1716518065aSAndre Przywara 1726518065aSAndre Przywara if (gsi == 0) 1736518065aSAndre Przywara return; 1746518065aSAndre Przywara 1756518065aSAndre Przywara msix_entry = &msix_entry[vecnum]; 1766518065aSAndre Przywara irq__update_msix_route(vpci->kvm, gsi, &msix_entry->msg); 1776518065aSAndre Przywara } 1786518065aSAndre Przywara 17902eca50cSAsias He static bool virtio_pci__specific_io_out(struct kvm *kvm, struct virtio_device *vdev, u16 port, 18036f5dc91SSasha Levin void *data, int size, int offset) 18136f5dc91SSasha Levin { 18202eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 1838ccc8549SAndre Przywara u32 config_offset, vec; 1848ccc8549SAndre Przywara int gsi; 18506f48103SSasha Levin int type = virtio__get_dev_specific_field(offset - 20, virtio_pci__msix_enabled(vpci), 1861382aba0SSasha Levin &config_offset); 18736f5dc91SSasha Levin if (type == VIRTIO_PCI_O_MSIX) { 18836f5dc91SSasha Levin switch (offset) { 18936f5dc91SSasha Levin case VIRTIO_MSI_CONFIG_VECTOR: 19036f5dc91SSasha Levin vec = vpci->config_vector = ioport__read16(data); 191f8327b05SSasha Levin if (vec == VIRTIO_MSI_NO_VECTOR) 192f8327b05SSasha Levin break; 19336f5dc91SSasha Levin 1948ccc8549SAndre Przywara gsi = irq__add_msix_route(kvm, 195f9ef46f2SAndre Przywara &vpci->msix_table[vec].msg, 196f9ef46f2SAndre Przywara vpci->dev_hdr.dev_num << 3); 197928ab7acSAndre Przywara /* 198928ab7acSAndre Przywara * We don't need IRQ routing if we can use 199928ab7acSAndre Przywara * MSI injection via the KVM_SIGNAL_MSI ioctl. 200928ab7acSAndre Przywara */ 201928ab7acSAndre Przywara if (gsi == -ENXIO && 202928ab7acSAndre Przywara vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 203928ab7acSAndre Przywara break; 204928ab7acSAndre Przywara 205928ab7acSAndre Przywara if (gsi < 0) { 206928ab7acSAndre Przywara die("failed to configure MSIs"); 207928ab7acSAndre Przywara break; 208928ab7acSAndre Przywara } 209928ab7acSAndre Przywara 21036f5dc91SSasha Levin vpci->config_gsi = gsi; 21136f5dc91SSasha Levin break; 2123a60be06SSasha Levin case VIRTIO_MSI_QUEUE_VECTOR: 2138ccc8549SAndre Przywara vec = ioport__read16(data); 2148ccc8549SAndre Przywara vpci->vq_vector[vpci->queue_selector] = vec; 21536f5dc91SSasha Levin 216f8327b05SSasha Levin if (vec == VIRTIO_MSI_NO_VECTOR) 217f8327b05SSasha Levin break; 218f8327b05SSasha Levin 2198ccc8549SAndre Przywara gsi = irq__add_msix_route(kvm, 220f9ef46f2SAndre Przywara &vpci->msix_table[vec].msg, 221f9ef46f2SAndre Przywara vpci->dev_hdr.dev_num << 3); 222928ab7acSAndre Przywara /* 223928ab7acSAndre Przywara * We don't need IRQ routing if we can use 224928ab7acSAndre Przywara * MSI injection via the KVM_SIGNAL_MSI ioctl. 225928ab7acSAndre Przywara */ 226928ab7acSAndre Przywara if (gsi == -ENXIO && 227928ab7acSAndre Przywara vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 2288ccc8549SAndre Przywara break; 229928ab7acSAndre Przywara 230928ab7acSAndre Przywara if (gsi < 0) { 231928ab7acSAndre Przywara die("failed to configure MSIs"); 232928ab7acSAndre Przywara break; 233928ab7acSAndre Przywara } 234928ab7acSAndre Przywara 23536f5dc91SSasha Levin vpci->gsis[vpci->queue_selector] = gsi; 23602eca50cSAsias He if (vdev->ops->notify_vq_gsi) 23702eca50cSAsias He vdev->ops->notify_vq_gsi(kvm, vpci->dev, 2388ccc8549SAndre Przywara vpci->queue_selector, 2398ccc8549SAndre Przywara gsi); 24036f5dc91SSasha Levin break; 24136f5dc91SSasha Levin }; 24236f5dc91SSasha Levin 24336f5dc91SSasha Levin return true; 24436f5dc91SSasha Levin } else if (type == VIRTIO_PCI_O_CONFIG) { 245c5ae742bSSasha Levin vdev->ops->get_config(kvm, vpci->dev)[config_offset] = *(u8 *)data; 24636f5dc91SSasha Levin 24736f5dc91SSasha Levin return true; 24836f5dc91SSasha Levin } 24936f5dc91SSasha Levin 25036f5dc91SSasha Levin return false; 25136f5dc91SSasha Levin } 25236f5dc91SSasha Levin 2534123ca55SMarc Zyngier static bool virtio_pci__io_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 25436f5dc91SSasha Levin { 25536f5dc91SSasha Levin unsigned long offset; 25636f5dc91SSasha Levin bool ret = true; 25702eca50cSAsias He struct virtio_device *vdev; 25836f5dc91SSasha Levin struct virtio_pci *vpci; 2594123ca55SMarc Zyngier struct kvm *kvm; 26036f5dc91SSasha Levin u32 val; 26136f5dc91SSasha Levin 2624123ca55SMarc Zyngier kvm = vcpu->kvm; 26302eca50cSAsias He vdev = ioport->priv; 26402eca50cSAsias He vpci = vdev->virtio; 265a463650cSWill Deacon offset = port - vpci->port_addr; 26636f5dc91SSasha Levin 26736f5dc91SSasha Levin switch (offset) { 26836f5dc91SSasha Levin case VIRTIO_PCI_GUEST_FEATURES: 26936f5dc91SSasha Levin val = ioport__read32(data); 27056a16c90SJean-Philippe Brucker virtio_set_guest_features(kvm, vdev, vpci->dev, val); 27136f5dc91SSasha Levin break; 27236f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_PFN: 27336f5dc91SSasha Levin val = ioport__read32(data); 27402eca50cSAsias He virtio_pci__init_ioeventfd(kvm, vdev, vpci->queue_selector); 275c59ba304SWill Deacon vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector, 276c59ba304SWill Deacon 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT, 277c59ba304SWill Deacon VIRTIO_PCI_VRING_ALIGN, val); 27836f5dc91SSasha Levin break; 27936f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_SEL: 28036f5dc91SSasha Levin vpci->queue_selector = ioport__read16(data); 28136f5dc91SSasha Levin break; 28236f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_NOTIFY: 28336f5dc91SSasha Levin val = ioport__read16(data); 28402eca50cSAsias He vdev->ops->notify_vq(kvm, vpci->dev, val); 28536f5dc91SSasha Levin break; 28636f5dc91SSasha Levin case VIRTIO_PCI_STATUS: 28736f5dc91SSasha Levin vpci->status = ioport__read8(data); 2884123ca55SMarc Zyngier if (!vpci->status) /* Sample endianness on reset */ 2894123ca55SMarc Zyngier vdev->endian = kvm_cpu__get_endianness(vcpu); 29004b53c16SSasha Levin if (vdev->ops->notify_status) 29104b53c16SSasha Levin vdev->ops->notify_status(kvm, vpci->dev, vpci->status); 29236f5dc91SSasha Levin break; 29336f5dc91SSasha Levin default: 29402eca50cSAsias He ret = virtio_pci__specific_io_out(kvm, vdev, port, data, size, offset); 29536f5dc91SSasha Levin break; 29636f5dc91SSasha Levin }; 29736f5dc91SSasha Levin 29836f5dc91SSasha Levin return ret; 29936f5dc91SSasha Levin } 30036f5dc91SSasha Levin 30136f5dc91SSasha Levin static struct ioport_operations virtio_pci__io_ops = { 30236f5dc91SSasha Levin .io_in = virtio_pci__io_in, 30336f5dc91SSasha Levin .io_out = virtio_pci__io_out, 30436f5dc91SSasha Levin }; 30536f5dc91SSasha Levin 3069b735910SMarc Zyngier static void virtio_pci__msix_mmio_callback(struct kvm_cpu *vcpu, 3079b735910SMarc Zyngier u64 addr, u8 *data, u32 len, 308a463650cSWill Deacon u8 is_write, void *ptr) 30936f5dc91SSasha Levin { 31036f5dc91SSasha Levin struct virtio_pci *vpci = ptr; 3116518065aSAndre Przywara struct msix_table *table; 3126518065aSAndre Przywara int vecnum; 3136518065aSAndre Przywara size_t offset; 31436f5dc91SSasha Levin 3159c26dab4SSasha Levin if (addr > vpci->msix_io_block + PCI_IO_SIZE) { 3166518065aSAndre Przywara if (is_write) 3176518065aSAndre Przywara return; 3186518065aSAndre Przywara table = (struct msix_table *)&vpci->msix_pba; 3196518065aSAndre Przywara offset = addr - (vpci->msix_io_block + PCI_IO_SIZE); 3209c26dab4SSasha Levin } else { 3216518065aSAndre Przywara table = vpci->msix_table; 3226518065aSAndre Przywara offset = addr - vpci->msix_io_block; 3236518065aSAndre Przywara } 3246518065aSAndre Przywara vecnum = offset / sizeof(struct msix_table); 3256518065aSAndre Przywara offset = offset % sizeof(struct msix_table); 3266518065aSAndre Przywara 3276518065aSAndre Przywara if (!is_write) { 3286518065aSAndre Przywara memcpy(data, (void *)&table[vecnum] + offset, len); 3296518065aSAndre Przywara return; 33036f5dc91SSasha Levin } 33136f5dc91SSasha Levin 3326518065aSAndre Przywara memcpy((void *)&table[vecnum] + offset, data, len); 3336518065aSAndre Przywara 3346518065aSAndre Przywara /* Did we just update the address or payload? */ 3356518065aSAndre Przywara if (offset < offsetof(struct msix_table, ctrl)) 3366518065aSAndre Przywara update_msix_map(vpci, table, vecnum); 33706f48103SSasha Levin } 33806f48103SSasha Levin 339714ab9e6SAndre Przywara static void virtio_pci__signal_msi(struct kvm *kvm, struct virtio_pci *vpci, 340714ab9e6SAndre Przywara int vec) 34143c81c74SSasha Levin { 34243c81c74SSasha Levin struct kvm_msi msi = { 34343c81c74SSasha Levin .address_lo = vpci->msix_table[vec].msg.address_lo, 34443c81c74SSasha Levin .address_hi = vpci->msix_table[vec].msg.address_hi, 34543c81c74SSasha Levin .data = vpci->msix_table[vec].msg.data, 34643c81c74SSasha Levin }; 34743c81c74SSasha Levin 348714ab9e6SAndre Przywara if (kvm->msix_needs_devid) { 349714ab9e6SAndre Przywara msi.flags = KVM_MSI_VALID_DEVID; 350714ab9e6SAndre Przywara msi.devid = vpci->dev_hdr.dev_num << 3; 351714ab9e6SAndre Przywara } 352714ab9e6SAndre Przywara 353f6108d72SJean-Philippe Brucker irq__signal_msi(kvm, &msi); 35443c81c74SSasha Levin } 35543c81c74SSasha Levin 35602eca50cSAsias He int virtio_pci__signal_vq(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 35736f5dc91SSasha Levin { 35802eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 35906f48103SSasha Levin int tbl = vpci->vq_vector[vq]; 36036f5dc91SSasha Levin 361f8327b05SSasha Levin if (virtio_pci__msix_enabled(vpci) && tbl != VIRTIO_MSI_NO_VECTOR) { 362aa73be70SMatt Evans if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 363aa73be70SMatt Evans vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 36406f48103SSasha Levin 36506f48103SSasha Levin vpci->msix_pba |= 1 << tbl; 36606f48103SSasha Levin return 0; 36706f48103SSasha Levin } 36806f48103SSasha Levin 36943c81c74SSasha Levin if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 37043c81c74SSasha Levin virtio_pci__signal_msi(kvm, vpci, vpci->vq_vector[vq]); 37143c81c74SSasha Levin else 37206f48103SSasha Levin kvm__irq_trigger(kvm, vpci->gsis[vq]); 37306f48103SSasha Levin } else { 374a36eca7bSSasha Levin vpci->isr = VIRTIO_IRQ_HIGH; 375e9922aafSAndre Przywara kvm__irq_trigger(kvm, vpci->legacy_irq_line); 37606f48103SSasha Levin } 37736f5dc91SSasha Levin return 0; 37836f5dc91SSasha Levin } 37936f5dc91SSasha Levin 38002eca50cSAsias He int virtio_pci__signal_config(struct kvm *kvm, struct virtio_device *vdev) 38136f5dc91SSasha Levin { 38202eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 38306f48103SSasha Levin int tbl = vpci->config_vector; 38406f48103SSasha Levin 385f8327b05SSasha Levin if (virtio_pci__msix_enabled(vpci) && tbl != VIRTIO_MSI_NO_VECTOR) { 386aa73be70SMatt Evans if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 387aa73be70SMatt Evans vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 38806f48103SSasha Levin 38906f48103SSasha Levin vpci->msix_pba |= 1 << tbl; 39006f48103SSasha Levin return 0; 39106f48103SSasha Levin } 39206f48103SSasha Levin 39343c81c74SSasha Levin if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 394f8327b05SSasha Levin virtio_pci__signal_msi(kvm, vpci, tbl); 39543c81c74SSasha Levin else 39606f48103SSasha Levin kvm__irq_trigger(kvm, vpci->config_gsi); 39706f48103SSasha Levin } else { 39806f48103SSasha Levin vpci->isr = VIRTIO_PCI_ISR_CONFIG; 399e9922aafSAndre Przywara kvm__irq_trigger(kvm, vpci->legacy_irq_line); 40006f48103SSasha Levin } 40136f5dc91SSasha Levin 40236f5dc91SSasha Levin return 0; 40336f5dc91SSasha Levin } 40436f5dc91SSasha Levin 4059b735910SMarc Zyngier static void virtio_pci__io_mmio_callback(struct kvm_cpu *vcpu, 4069b735910SMarc Zyngier u64 addr, u8 *data, u32 len, 407a463650cSWill Deacon u8 is_write, void *ptr) 408a463650cSWill Deacon { 409a463650cSWill Deacon struct virtio_pci *vpci = ptr; 410a463650cSWill Deacon int direction = is_write ? KVM_EXIT_IO_OUT : KVM_EXIT_IO_IN; 411a463650cSWill Deacon u16 port = vpci->port_addr + (addr & (IOPORT_SIZE - 1)); 412a463650cSWill Deacon 4134123ca55SMarc Zyngier kvm__emulate_io(vcpu, port, data, direction, len, 1); 414a463650cSWill Deacon } 415a463650cSWill Deacon 41602eca50cSAsias He int virtio_pci__init(struct kvm *kvm, void *dev, struct virtio_device *vdev, 417507e02d8SAsias He int device_id, int subsys_id, int class) 41836f5dc91SSasha Levin { 41902eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 4207af40b91SSasha Levin int r; 42136f5dc91SSasha Levin 422a463650cSWill Deacon vpci->kvm = kvm; 42336f5dc91SSasha Levin vpci->dev = dev; 42436f5dc91SSasha Levin 4254346fd8fSSasha Levin r = ioport__register(kvm, IOPORT_EMPTY, &virtio_pci__io_ops, IOPORT_SIZE, vdev); 4267af40b91SSasha Levin if (r < 0) 4277af40b91SSasha Levin return r; 428a463650cSWill Deacon vpci->port_addr = (u16)r; 4297af40b91SSasha Levin 430a463650cSWill Deacon vpci->mmio_addr = pci_get_io_space_block(IOPORT_SIZE); 431a463650cSWill Deacon r = kvm__register_mmio(kvm, vpci->mmio_addr, IOPORT_SIZE, false, 432a463650cSWill Deacon virtio_pci__io_mmio_callback, vpci); 433495fbd4eSSasha Levin if (r < 0) 434495fbd4eSSasha Levin goto free_ioport; 43536f5dc91SSasha Levin 436a463650cSWill Deacon vpci->msix_io_block = pci_get_io_space_block(PCI_IO_SIZE * 2); 437a463650cSWill Deacon r = kvm__register_mmio(kvm, vpci->msix_io_block, PCI_IO_SIZE * 2, false, 438a463650cSWill Deacon virtio_pci__msix_mmio_callback, vpci); 439a463650cSWill Deacon if (r < 0) 440a463650cSWill Deacon goto free_mmio; 441a463650cSWill Deacon 44236f5dc91SSasha Levin vpci->pci_hdr = (struct pci_device_header) { 443aa73be70SMatt Evans .vendor_id = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET), 444aa73be70SMatt Evans .device_id = cpu_to_le16(device_id), 445ec7dd52fSSasha Levin .command = PCI_COMMAND_IO | PCI_COMMAND_MEMORY, 44636f5dc91SSasha Levin .header_type = PCI_HEADER_TYPE_NORMAL, 44736f5dc91SSasha Levin .revision_id = 0, 448aa73be70SMatt Evans .class[0] = class & 0xff, 449aa73be70SMatt Evans .class[1] = (class >> 8) & 0xff, 450aa73be70SMatt Evans .class[2] = (class >> 16) & 0xff, 451aa73be70SMatt Evans .subsys_vendor_id = cpu_to_le16(PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET), 452aa73be70SMatt Evans .subsys_id = cpu_to_le16(subsys_id), 453*a508ea95SJean-Philippe Brucker .bar[0] = cpu_to_le32(vpci->port_addr 4549c26dab4SSasha Levin | PCI_BASE_ADDRESS_SPACE_IO), 455*a508ea95SJean-Philippe Brucker .bar[1] = cpu_to_le32(vpci->mmio_addr 456*a508ea95SJean-Philippe Brucker | PCI_BASE_ADDRESS_SPACE_MEMORY), 457a463650cSWill Deacon .bar[2] = cpu_to_le32(vpci->msix_io_block 458b4dab816SSasha Levin | PCI_BASE_ADDRESS_SPACE_MEMORY), 459aa73be70SMatt Evans .status = cpu_to_le16(PCI_STATUS_CAP_LIST), 46036f5dc91SSasha Levin .capabilities = (void *)&vpci->pci_hdr.msix - (void *)&vpci->pci_hdr, 461a0a7d66fSDavid Daney .bar_size[0] = cpu_to_le32(IOPORT_SIZE), 462a0a7d66fSDavid Daney .bar_size[1] = cpu_to_le32(IOPORT_SIZE), 463a0a7d66fSDavid Daney .bar_size[2] = cpu_to_le32(PCI_IO_SIZE*2), 46436f5dc91SSasha Levin }; 46536f5dc91SSasha Levin 46621ff329dSWill Deacon vpci->dev_hdr = (struct device_header) { 46721ff329dSWill Deacon .bus_type = DEVICE_BUS_PCI, 46821ff329dSWill Deacon .data = &vpci->pci_hdr, 46921ff329dSWill Deacon }; 47021ff329dSWill Deacon 47136f5dc91SSasha Levin vpci->pci_hdr.msix.cap = PCI_CAP_ID_MSIX; 47236f5dc91SSasha Levin vpci->pci_hdr.msix.next = 0; 47314bba8a0SAsias He /* 47414bba8a0SAsias He * We at most have VIRTIO_PCI_MAX_VQ entries for virt queue, 47514bba8a0SAsias He * VIRTIO_PCI_MAX_CONFIG entries for config. 47614bba8a0SAsias He * 47714bba8a0SAsias He * To quote the PCI spec: 47814bba8a0SAsias He * 47914bba8a0SAsias He * System software reads this field to determine the 48014bba8a0SAsias He * MSI-X Table Size N, which is encoded as N-1. 48114bba8a0SAsias He * For example, a returned value of "00000000011" 48214bba8a0SAsias He * indicates a table size of 4. 48314bba8a0SAsias He */ 484aa73be70SMatt Evans vpci->pci_hdr.msix.ctrl = cpu_to_le16(VIRTIO_PCI_MAX_VQ + VIRTIO_PCI_MAX_CONFIG - 1); 48506f48103SSasha Levin 486a463650cSWill Deacon /* Both table and PBA are mapped to the same BAR (2) */ 487a463650cSWill Deacon vpci->pci_hdr.msix.table_offset = cpu_to_le32(2); 488a463650cSWill Deacon vpci->pci_hdr.msix.pba_offset = cpu_to_le32(2 | PCI_IO_SIZE); 48936f5dc91SSasha Levin vpci->config_vector = 0; 49036f5dc91SSasha Levin 491f6108d72SJean-Philippe Brucker if (irq__can_signal_msi(kvm)) 49243c81c74SSasha Levin vpci->features |= VIRTIO_PCI_F_SIGNAL_MSI; 49343c81c74SSasha Levin 49421ff329dSWill Deacon r = device__register(&vpci->dev_hdr); 495495fbd4eSSasha Levin if (r < 0) 496a463650cSWill Deacon goto free_msix_mmio; 497495fbd4eSSasha Levin 498e9922aafSAndre Przywara /* save the IRQ that device__register() has allocated */ 499e9922aafSAndre Przywara vpci->legacy_irq_line = vpci->pci_hdr.irq_line; 500e9922aafSAndre Przywara 501495fbd4eSSasha Levin return 0; 502495fbd4eSSasha Levin 503a463650cSWill Deacon free_msix_mmio: 504495fbd4eSSasha Levin kvm__deregister_mmio(kvm, vpci->msix_io_block); 505a463650cSWill Deacon free_mmio: 506a463650cSWill Deacon kvm__deregister_mmio(kvm, vpci->mmio_addr); 507495fbd4eSSasha Levin free_ioport: 508a463650cSWill Deacon ioport__unregister(kvm, vpci->port_addr); 509495fbd4eSSasha Levin return r; 510495fbd4eSSasha Levin } 511495fbd4eSSasha Levin 51202eca50cSAsias He int virtio_pci__exit(struct kvm *kvm, struct virtio_device *vdev) 513495fbd4eSSasha Levin { 51402eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 515495fbd4eSSasha Levin int i; 516495fbd4eSSasha Levin 517a463650cSWill Deacon kvm__deregister_mmio(kvm, vpci->mmio_addr); 518495fbd4eSSasha Levin kvm__deregister_mmio(kvm, vpci->msix_io_block); 519a463650cSWill Deacon ioport__unregister(kvm, vpci->port_addr); 520495fbd4eSSasha Levin 521a463650cSWill Deacon for (i = 0; i < VIRTIO_PCI_MAX_VQ; i++) { 522a463650cSWill Deacon ioeventfd__del_event(vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY, i); 523a463650cSWill Deacon ioeventfd__del_event(vpci->mmio_addr + VIRTIO_PCI_QUEUE_NOTIFY, i); 524a463650cSWill Deacon } 52536f5dc91SSasha Levin 52636f5dc91SSasha Levin return 0; 52736f5dc91SSasha Levin } 528