136f5dc91SSasha Levin #include "kvm/virtio-pci.h" 236f5dc91SSasha Levin 336f5dc91SSasha Levin #include "kvm/ioport.h" 436f5dc91SSasha Levin #include "kvm/kvm.h" 54123ca55SMarc Zyngier #include "kvm/kvm-cpu.h" 636f5dc91SSasha Levin #include "kvm/virtio-pci-dev.h" 736f5dc91SSasha Levin #include "kvm/irq.h" 836f5dc91SSasha Levin #include "kvm/virtio.h" 91599d724SSasha Levin #include "kvm/ioeventfd.h" 1036f5dc91SSasha Levin 1143c81c74SSasha Levin #include <sys/ioctl.h> 1236f5dc91SSasha Levin #include <linux/virtio_pci.h> 13aa73be70SMatt Evans #include <linux/byteorder.h> 1436f5dc91SSasha Levin #include <string.h> 1536f5dc91SSasha Levin 161599d724SSasha Levin static void virtio_pci__ioevent_callback(struct kvm *kvm, void *param) 171599d724SSasha Levin { 181599d724SSasha Levin struct virtio_pci_ioevent_param *ioeventfd = param; 1902eca50cSAsias He struct virtio_pci *vpci = ioeventfd->vdev->virtio; 201599d724SSasha Levin 2102eca50cSAsias He ioeventfd->vdev->ops->notify_vq(kvm, vpci->dev, ioeventfd->vq); 221599d724SSasha Levin } 231599d724SSasha Levin 2402eca50cSAsias He static int virtio_pci__init_ioeventfd(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 251599d724SSasha Levin { 261599d724SSasha Levin struct ioevent ioevent; 2702eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 28*71ca0facSAndre Przywara int i, r, flags = 0; 29a463650cSWill Deacon int fds[2]; 301599d724SSasha Levin 311599d724SSasha Levin vpci->ioeventfds[vq] = (struct virtio_pci_ioevent_param) { 3202eca50cSAsias He .vdev = vdev, 331599d724SSasha Levin .vq = vq, 341599d724SSasha Levin }; 351599d724SSasha Levin 361599d724SSasha Levin ioevent = (struct ioevent) { 371599d724SSasha Levin .fn = virtio_pci__ioevent_callback, 381599d724SSasha Levin .fn_ptr = &vpci->ioeventfds[vq], 391599d724SSasha Levin .datamatch = vq, 401599d724SSasha Levin .fn_kvm = kvm, 411599d724SSasha Levin }; 421599d724SSasha Levin 43627d6874SAsias He /* 44a463650cSWill Deacon * Vhost will poll the eventfd in host kernel side, otherwise we 45a463650cSWill Deacon * need to poll in userspace. 46627d6874SAsias He */ 47a463650cSWill Deacon if (!vdev->use_vhost) 48a463650cSWill Deacon flags |= IOEVENTFD_FLAG_USER_POLL; 49a463650cSWill Deacon 50a463650cSWill Deacon /* ioport */ 51a463650cSWill Deacon ioevent.io_addr = vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY; 52a463650cSWill Deacon ioevent.io_len = sizeof(u16); 53a463650cSWill Deacon ioevent.fd = fds[0] = eventfd(0, 0); 54*71ca0facSAndre Przywara r = ioeventfd__add_event(&ioevent, flags | IOEVENTFD_FLAG_PIO); 55ea6eeb1cSSasha Levin if (r) 56ea6eeb1cSSasha Levin return r; 571599d724SSasha Levin 58a463650cSWill Deacon /* mmio */ 59a463650cSWill Deacon ioevent.io_addr = vpci->mmio_addr + VIRTIO_PCI_QUEUE_NOTIFY; 60fe50bacbSAndreas Herrmann ioevent.io_len = sizeof(u16); 61a463650cSWill Deacon ioevent.fd = fds[1] = eventfd(0, 0); 62a463650cSWill Deacon r = ioeventfd__add_event(&ioevent, flags); 63a463650cSWill Deacon if (r) 64a463650cSWill Deacon goto free_ioport_evt; 65263b80e8SSasha Levin 66a463650cSWill Deacon if (vdev->ops->notify_vq_eventfd) 67a463650cSWill Deacon for (i = 0; i < 2; ++i) 68a463650cSWill Deacon vdev->ops->notify_vq_eventfd(kvm, vpci->dev, vq, 69a463650cSWill Deacon fds[i]); 701599d724SSasha Levin return 0; 71a463650cSWill Deacon 72a463650cSWill Deacon free_ioport_evt: 73a463650cSWill Deacon ioeventfd__del_event(vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY, vq); 74a463650cSWill Deacon return r; 751599d724SSasha Levin } 761599d724SSasha Levin 7706f48103SSasha Levin static inline bool virtio_pci__msix_enabled(struct virtio_pci *vpci) 7806f48103SSasha Levin { 79aa73be70SMatt Evans return vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_ENABLE); 8006f48103SSasha Levin } 8106f48103SSasha Levin 8202eca50cSAsias He static bool virtio_pci__specific_io_in(struct kvm *kvm, struct virtio_device *vdev, u16 port, 8336f5dc91SSasha Levin void *data, int size, int offset) 8436f5dc91SSasha Levin { 8536f5dc91SSasha Levin u32 config_offset; 8602eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 8706f48103SSasha Levin int type = virtio__get_dev_specific_field(offset - 20, 8806f48103SSasha Levin virtio_pci__msix_enabled(vpci), 891382aba0SSasha Levin &config_offset); 9036f5dc91SSasha Levin if (type == VIRTIO_PCI_O_MSIX) { 9136f5dc91SSasha Levin switch (offset) { 9236f5dc91SSasha Levin case VIRTIO_MSI_CONFIG_VECTOR: 9336f5dc91SSasha Levin ioport__write16(data, vpci->config_vector); 9436f5dc91SSasha Levin break; 9536f5dc91SSasha Levin case VIRTIO_MSI_QUEUE_VECTOR: 9636f5dc91SSasha Levin ioport__write16(data, vpci->vq_vector[vpci->queue_selector]); 9736f5dc91SSasha Levin break; 9836f5dc91SSasha Levin }; 9936f5dc91SSasha Levin 10036f5dc91SSasha Levin return true; 10136f5dc91SSasha Levin } else if (type == VIRTIO_PCI_O_CONFIG) { 10236f5dc91SSasha Levin u8 cfg; 10336f5dc91SSasha Levin 104c5ae742bSSasha Levin cfg = vdev->ops->get_config(kvm, vpci->dev)[config_offset]; 10536f5dc91SSasha Levin ioport__write8(data, cfg); 10636f5dc91SSasha Levin return true; 10736f5dc91SSasha Levin } 10836f5dc91SSasha Levin 10936f5dc91SSasha Levin return false; 11036f5dc91SSasha Levin } 11136f5dc91SSasha Levin 1124123ca55SMarc Zyngier static bool virtio_pci__io_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 11336f5dc91SSasha Levin { 11436f5dc91SSasha Levin unsigned long offset; 11536f5dc91SSasha Levin bool ret = true; 11602eca50cSAsias He struct virtio_device *vdev; 11736f5dc91SSasha Levin struct virtio_pci *vpci; 1184123ca55SMarc Zyngier struct kvm *kvm; 11936f5dc91SSasha Levin u32 val; 12036f5dc91SSasha Levin 1214123ca55SMarc Zyngier kvm = vcpu->kvm; 12202eca50cSAsias He vdev = ioport->priv; 12302eca50cSAsias He vpci = vdev->virtio; 124a463650cSWill Deacon offset = port - vpci->port_addr; 12536f5dc91SSasha Levin 12636f5dc91SSasha Levin switch (offset) { 12736f5dc91SSasha Levin case VIRTIO_PCI_HOST_FEATURES: 12802eca50cSAsias He val = vdev->ops->get_host_features(kvm, vpci->dev); 12936f5dc91SSasha Levin ioport__write32(data, val); 13036f5dc91SSasha Levin break; 13136f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_PFN: 13202eca50cSAsias He val = vdev->ops->get_pfn_vq(kvm, vpci->dev, vpci->queue_selector); 13336f5dc91SSasha Levin ioport__write32(data, val); 13436f5dc91SSasha Levin break; 13536f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_NUM: 13602eca50cSAsias He val = vdev->ops->get_size_vq(kvm, vpci->dev, vpci->queue_selector); 137657ee18bSMatt Evans ioport__write16(data, val); 13836f5dc91SSasha Levin break; 13936f5dc91SSasha Levin case VIRTIO_PCI_STATUS: 14036f5dc91SSasha Levin ioport__write8(data, vpci->status); 14136f5dc91SSasha Levin break; 14236f5dc91SSasha Levin case VIRTIO_PCI_ISR: 14336f5dc91SSasha Levin ioport__write8(data, vpci->isr); 144e9922aafSAndre Przywara kvm__irq_line(kvm, vpci->legacy_irq_line, VIRTIO_IRQ_LOW); 14536f5dc91SSasha Levin vpci->isr = VIRTIO_IRQ_LOW; 14636f5dc91SSasha Levin break; 14736f5dc91SSasha Levin default: 14802eca50cSAsias He ret = virtio_pci__specific_io_in(kvm, vdev, port, data, size, offset); 14936f5dc91SSasha Levin break; 15036f5dc91SSasha Levin }; 15136f5dc91SSasha Levin 15236f5dc91SSasha Levin return ret; 15336f5dc91SSasha Levin } 15436f5dc91SSasha Levin 15502eca50cSAsias He static bool virtio_pci__specific_io_out(struct kvm *kvm, struct virtio_device *vdev, u16 port, 15636f5dc91SSasha Levin void *data, int size, int offset) 15736f5dc91SSasha Levin { 15802eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 15936f5dc91SSasha Levin u32 config_offset, gsi, vec; 16006f48103SSasha Levin int type = virtio__get_dev_specific_field(offset - 20, virtio_pci__msix_enabled(vpci), 1611382aba0SSasha Levin &config_offset); 16236f5dc91SSasha Levin if (type == VIRTIO_PCI_O_MSIX) { 16336f5dc91SSasha Levin switch (offset) { 16436f5dc91SSasha Levin case VIRTIO_MSI_CONFIG_VECTOR: 16536f5dc91SSasha Levin vec = vpci->config_vector = ioport__read16(data); 166f8327b05SSasha Levin if (vec == VIRTIO_MSI_NO_VECTOR) 167f8327b05SSasha Levin break; 16836f5dc91SSasha Levin 1691de74957SSasha Levin gsi = irq__add_msix_route(kvm, &vpci->msix_table[vec].msg); 17036f5dc91SSasha Levin 17136f5dc91SSasha Levin vpci->config_gsi = gsi; 17236f5dc91SSasha Levin break; 1733a60be06SSasha Levin case VIRTIO_MSI_QUEUE_VECTOR: 17436f5dc91SSasha Levin vec = vpci->vq_vector[vpci->queue_selector] = ioport__read16(data); 17536f5dc91SSasha Levin 176f8327b05SSasha Levin if (vec == VIRTIO_MSI_NO_VECTOR) 177f8327b05SSasha Levin break; 178f8327b05SSasha Levin 1791de74957SSasha Levin gsi = irq__add_msix_route(kvm, &vpci->msix_table[vec].msg); 18036f5dc91SSasha Levin vpci->gsis[vpci->queue_selector] = gsi; 18102eca50cSAsias He if (vdev->ops->notify_vq_gsi) 18202eca50cSAsias He vdev->ops->notify_vq_gsi(kvm, vpci->dev, 183263b80e8SSasha Levin vpci->queue_selector, gsi); 18436f5dc91SSasha Levin break; 18536f5dc91SSasha Levin }; 18636f5dc91SSasha Levin 18736f5dc91SSasha Levin return true; 18836f5dc91SSasha Levin } else if (type == VIRTIO_PCI_O_CONFIG) { 189c5ae742bSSasha Levin vdev->ops->get_config(kvm, vpci->dev)[config_offset] = *(u8 *)data; 19036f5dc91SSasha Levin 19136f5dc91SSasha Levin return true; 19236f5dc91SSasha Levin } 19336f5dc91SSasha Levin 19436f5dc91SSasha Levin return false; 19536f5dc91SSasha Levin } 19636f5dc91SSasha Levin 1974123ca55SMarc Zyngier static bool virtio_pci__io_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 19836f5dc91SSasha Levin { 19936f5dc91SSasha Levin unsigned long offset; 20036f5dc91SSasha Levin bool ret = true; 20102eca50cSAsias He struct virtio_device *vdev; 20236f5dc91SSasha Levin struct virtio_pci *vpci; 2034123ca55SMarc Zyngier struct kvm *kvm; 20436f5dc91SSasha Levin u32 val; 20536f5dc91SSasha Levin 2064123ca55SMarc Zyngier kvm = vcpu->kvm; 20702eca50cSAsias He vdev = ioport->priv; 20802eca50cSAsias He vpci = vdev->virtio; 209a463650cSWill Deacon offset = port - vpci->port_addr; 21036f5dc91SSasha Levin 21136f5dc91SSasha Levin switch (offset) { 21236f5dc91SSasha Levin case VIRTIO_PCI_GUEST_FEATURES: 21336f5dc91SSasha Levin val = ioport__read32(data); 21402eca50cSAsias He vdev->ops->set_guest_features(kvm, vpci->dev, val); 21536f5dc91SSasha Levin break; 21636f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_PFN: 21736f5dc91SSasha Levin val = ioport__read32(data); 21802eca50cSAsias He virtio_pci__init_ioeventfd(kvm, vdev, vpci->queue_selector); 219c59ba304SWill Deacon vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector, 220c59ba304SWill Deacon 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT, 221c59ba304SWill Deacon VIRTIO_PCI_VRING_ALIGN, val); 22236f5dc91SSasha Levin break; 22336f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_SEL: 22436f5dc91SSasha Levin vpci->queue_selector = ioport__read16(data); 22536f5dc91SSasha Levin break; 22636f5dc91SSasha Levin case VIRTIO_PCI_QUEUE_NOTIFY: 22736f5dc91SSasha Levin val = ioport__read16(data); 22802eca50cSAsias He vdev->ops->notify_vq(kvm, vpci->dev, val); 22936f5dc91SSasha Levin break; 23036f5dc91SSasha Levin case VIRTIO_PCI_STATUS: 23136f5dc91SSasha Levin vpci->status = ioport__read8(data); 2324123ca55SMarc Zyngier if (!vpci->status) /* Sample endianness on reset */ 2334123ca55SMarc Zyngier vdev->endian = kvm_cpu__get_endianness(vcpu); 23404b53c16SSasha Levin if (vdev->ops->notify_status) 23504b53c16SSasha Levin vdev->ops->notify_status(kvm, vpci->dev, vpci->status); 23636f5dc91SSasha Levin break; 23736f5dc91SSasha Levin default: 23802eca50cSAsias He ret = virtio_pci__specific_io_out(kvm, vdev, port, data, size, offset); 23936f5dc91SSasha Levin break; 24036f5dc91SSasha Levin }; 24136f5dc91SSasha Levin 24236f5dc91SSasha Levin return ret; 24336f5dc91SSasha Levin } 24436f5dc91SSasha Levin 24536f5dc91SSasha Levin static struct ioport_operations virtio_pci__io_ops = { 24636f5dc91SSasha Levin .io_in = virtio_pci__io_in, 24736f5dc91SSasha Levin .io_out = virtio_pci__io_out, 24836f5dc91SSasha Levin }; 24936f5dc91SSasha Levin 2509b735910SMarc Zyngier static void virtio_pci__msix_mmio_callback(struct kvm_cpu *vcpu, 2519b735910SMarc Zyngier u64 addr, u8 *data, u32 len, 252a463650cSWill Deacon u8 is_write, void *ptr) 25336f5dc91SSasha Levin { 25436f5dc91SSasha Levin struct virtio_pci *vpci = ptr; 2559c26dab4SSasha Levin void *table; 2569c26dab4SSasha Levin u32 offset; 25736f5dc91SSasha Levin 2589c26dab4SSasha Levin if (addr > vpci->msix_io_block + PCI_IO_SIZE) { 2599c26dab4SSasha Levin table = &vpci->msix_pba; 2609c26dab4SSasha Levin offset = vpci->msix_io_block + PCI_IO_SIZE; 2619c26dab4SSasha Levin } else { 2629c26dab4SSasha Levin table = &vpci->msix_table; 2639c26dab4SSasha Levin offset = vpci->msix_io_block; 26436f5dc91SSasha Levin } 26536f5dc91SSasha Levin 26606f48103SSasha Levin if (is_write) 2679c26dab4SSasha Levin memcpy(table + addr - offset, data, len); 26806f48103SSasha Levin else 2699c26dab4SSasha Levin memcpy(data, table + addr - offset, len); 27006f48103SSasha Levin } 27106f48103SSasha Levin 27243c81c74SSasha Levin static void virtio_pci__signal_msi(struct kvm *kvm, struct virtio_pci *vpci, int vec) 27343c81c74SSasha Levin { 27443c81c74SSasha Levin struct kvm_msi msi = { 27543c81c74SSasha Levin .address_lo = vpci->msix_table[vec].msg.address_lo, 27643c81c74SSasha Levin .address_hi = vpci->msix_table[vec].msg.address_hi, 27743c81c74SSasha Levin .data = vpci->msix_table[vec].msg.data, 27843c81c74SSasha Levin }; 27943c81c74SSasha Levin 28043c81c74SSasha Levin ioctl(kvm->vm_fd, KVM_SIGNAL_MSI, &msi); 28143c81c74SSasha Levin } 28243c81c74SSasha Levin 28302eca50cSAsias He int virtio_pci__signal_vq(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 28436f5dc91SSasha Levin { 28502eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 28606f48103SSasha Levin int tbl = vpci->vq_vector[vq]; 28736f5dc91SSasha Levin 288f8327b05SSasha Levin if (virtio_pci__msix_enabled(vpci) && tbl != VIRTIO_MSI_NO_VECTOR) { 289aa73be70SMatt Evans if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 290aa73be70SMatt Evans vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 29106f48103SSasha Levin 29206f48103SSasha Levin vpci->msix_pba |= 1 << tbl; 29306f48103SSasha Levin return 0; 29406f48103SSasha Levin } 29506f48103SSasha Levin 29643c81c74SSasha Levin if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 29743c81c74SSasha Levin virtio_pci__signal_msi(kvm, vpci, vpci->vq_vector[vq]); 29843c81c74SSasha Levin else 29906f48103SSasha Levin kvm__irq_trigger(kvm, vpci->gsis[vq]); 30006f48103SSasha Levin } else { 301a36eca7bSSasha Levin vpci->isr = VIRTIO_IRQ_HIGH; 302e9922aafSAndre Przywara kvm__irq_trigger(kvm, vpci->legacy_irq_line); 30306f48103SSasha Levin } 30436f5dc91SSasha Levin return 0; 30536f5dc91SSasha Levin } 30636f5dc91SSasha Levin 30702eca50cSAsias He int virtio_pci__signal_config(struct kvm *kvm, struct virtio_device *vdev) 30836f5dc91SSasha Levin { 30902eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 31006f48103SSasha Levin int tbl = vpci->config_vector; 31106f48103SSasha Levin 312f8327b05SSasha Levin if (virtio_pci__msix_enabled(vpci) && tbl != VIRTIO_MSI_NO_VECTOR) { 313aa73be70SMatt Evans if (vpci->pci_hdr.msix.ctrl & cpu_to_le16(PCI_MSIX_FLAGS_MASKALL) || 314aa73be70SMatt Evans vpci->msix_table[tbl].ctrl & cpu_to_le16(PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 31506f48103SSasha Levin 31606f48103SSasha Levin vpci->msix_pba |= 1 << tbl; 31706f48103SSasha Levin return 0; 31806f48103SSasha Levin } 31906f48103SSasha Levin 32043c81c74SSasha Levin if (vpci->features & VIRTIO_PCI_F_SIGNAL_MSI) 321f8327b05SSasha Levin virtio_pci__signal_msi(kvm, vpci, tbl); 32243c81c74SSasha Levin else 32306f48103SSasha Levin kvm__irq_trigger(kvm, vpci->config_gsi); 32406f48103SSasha Levin } else { 32506f48103SSasha Levin vpci->isr = VIRTIO_PCI_ISR_CONFIG; 326e9922aafSAndre Przywara kvm__irq_trigger(kvm, vpci->legacy_irq_line); 32706f48103SSasha Levin } 32836f5dc91SSasha Levin 32936f5dc91SSasha Levin return 0; 33036f5dc91SSasha Levin } 33136f5dc91SSasha Levin 3329b735910SMarc Zyngier static void virtio_pci__io_mmio_callback(struct kvm_cpu *vcpu, 3339b735910SMarc Zyngier u64 addr, u8 *data, u32 len, 334a463650cSWill Deacon u8 is_write, void *ptr) 335a463650cSWill Deacon { 336a463650cSWill Deacon struct virtio_pci *vpci = ptr; 337a463650cSWill Deacon int direction = is_write ? KVM_EXIT_IO_OUT : KVM_EXIT_IO_IN; 338a463650cSWill Deacon u16 port = vpci->port_addr + (addr & (IOPORT_SIZE - 1)); 339a463650cSWill Deacon 3404123ca55SMarc Zyngier kvm__emulate_io(vcpu, port, data, direction, len, 1); 341a463650cSWill Deacon } 342a463650cSWill Deacon 34302eca50cSAsias He int virtio_pci__init(struct kvm *kvm, void *dev, struct virtio_device *vdev, 344507e02d8SAsias He int device_id, int subsys_id, int class) 34536f5dc91SSasha Levin { 34602eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 3477af40b91SSasha Levin int r; 34836f5dc91SSasha Levin 349a463650cSWill Deacon vpci->kvm = kvm; 35036f5dc91SSasha Levin vpci->dev = dev; 35136f5dc91SSasha Levin 3524346fd8fSSasha Levin r = ioport__register(kvm, IOPORT_EMPTY, &virtio_pci__io_ops, IOPORT_SIZE, vdev); 3537af40b91SSasha Levin if (r < 0) 3547af40b91SSasha Levin return r; 355a463650cSWill Deacon vpci->port_addr = (u16)r; 3567af40b91SSasha Levin 357a463650cSWill Deacon vpci->mmio_addr = pci_get_io_space_block(IOPORT_SIZE); 358a463650cSWill Deacon r = kvm__register_mmio(kvm, vpci->mmio_addr, IOPORT_SIZE, false, 359a463650cSWill Deacon virtio_pci__io_mmio_callback, vpci); 360495fbd4eSSasha Levin if (r < 0) 361495fbd4eSSasha Levin goto free_ioport; 36236f5dc91SSasha Levin 363a463650cSWill Deacon vpci->msix_io_block = pci_get_io_space_block(PCI_IO_SIZE * 2); 364a463650cSWill Deacon r = kvm__register_mmio(kvm, vpci->msix_io_block, PCI_IO_SIZE * 2, false, 365a463650cSWill Deacon virtio_pci__msix_mmio_callback, vpci); 366a463650cSWill Deacon if (r < 0) 367a463650cSWill Deacon goto free_mmio; 368a463650cSWill Deacon 36936f5dc91SSasha Levin vpci->pci_hdr = (struct pci_device_header) { 370aa73be70SMatt Evans .vendor_id = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET), 371aa73be70SMatt Evans .device_id = cpu_to_le16(device_id), 372ec7dd52fSSasha Levin .command = PCI_COMMAND_IO | PCI_COMMAND_MEMORY, 37336f5dc91SSasha Levin .header_type = PCI_HEADER_TYPE_NORMAL, 37436f5dc91SSasha Levin .revision_id = 0, 375aa73be70SMatt Evans .class[0] = class & 0xff, 376aa73be70SMatt Evans .class[1] = (class >> 8) & 0xff, 377aa73be70SMatt Evans .class[2] = (class >> 16) & 0xff, 378aa73be70SMatt Evans .subsys_vendor_id = cpu_to_le16(PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET), 379aa73be70SMatt Evans .subsys_id = cpu_to_le16(subsys_id), 380a463650cSWill Deacon .bar[0] = cpu_to_le32(vpci->mmio_addr 381a463650cSWill Deacon | PCI_BASE_ADDRESS_SPACE_MEMORY), 382a463650cSWill Deacon .bar[1] = cpu_to_le32(vpci->port_addr 3839c26dab4SSasha Levin | PCI_BASE_ADDRESS_SPACE_IO), 384a463650cSWill Deacon .bar[2] = cpu_to_le32(vpci->msix_io_block 385b4dab816SSasha Levin | PCI_BASE_ADDRESS_SPACE_MEMORY), 386aa73be70SMatt Evans .status = cpu_to_le16(PCI_STATUS_CAP_LIST), 38736f5dc91SSasha Levin .capabilities = (void *)&vpci->pci_hdr.msix - (void *)&vpci->pci_hdr, 388a0a7d66fSDavid Daney .bar_size[0] = cpu_to_le32(IOPORT_SIZE), 389a0a7d66fSDavid Daney .bar_size[1] = cpu_to_le32(IOPORT_SIZE), 390a0a7d66fSDavid Daney .bar_size[2] = cpu_to_le32(PCI_IO_SIZE*2), 39136f5dc91SSasha Levin }; 39236f5dc91SSasha Levin 39321ff329dSWill Deacon vpci->dev_hdr = (struct device_header) { 39421ff329dSWill Deacon .bus_type = DEVICE_BUS_PCI, 39521ff329dSWill Deacon .data = &vpci->pci_hdr, 39621ff329dSWill Deacon }; 39721ff329dSWill Deacon 39836f5dc91SSasha Levin vpci->pci_hdr.msix.cap = PCI_CAP_ID_MSIX; 39936f5dc91SSasha Levin vpci->pci_hdr.msix.next = 0; 40014bba8a0SAsias He /* 40114bba8a0SAsias He * We at most have VIRTIO_PCI_MAX_VQ entries for virt queue, 40214bba8a0SAsias He * VIRTIO_PCI_MAX_CONFIG entries for config. 40314bba8a0SAsias He * 40414bba8a0SAsias He * To quote the PCI spec: 40514bba8a0SAsias He * 40614bba8a0SAsias He * System software reads this field to determine the 40714bba8a0SAsias He * MSI-X Table Size N, which is encoded as N-1. 40814bba8a0SAsias He * For example, a returned value of "00000000011" 40914bba8a0SAsias He * indicates a table size of 4. 41014bba8a0SAsias He */ 411aa73be70SMatt Evans vpci->pci_hdr.msix.ctrl = cpu_to_le16(VIRTIO_PCI_MAX_VQ + VIRTIO_PCI_MAX_CONFIG - 1); 41206f48103SSasha Levin 413a463650cSWill Deacon /* Both table and PBA are mapped to the same BAR (2) */ 414a463650cSWill Deacon vpci->pci_hdr.msix.table_offset = cpu_to_le32(2); 415a463650cSWill Deacon vpci->pci_hdr.msix.pba_offset = cpu_to_le32(2 | PCI_IO_SIZE); 41636f5dc91SSasha Levin vpci->config_vector = 0; 41736f5dc91SSasha Levin 41843c81c74SSasha Levin if (kvm__supports_extension(kvm, KVM_CAP_SIGNAL_MSI)) 41943c81c74SSasha Levin vpci->features |= VIRTIO_PCI_F_SIGNAL_MSI; 42043c81c74SSasha Levin 42121ff329dSWill Deacon r = device__register(&vpci->dev_hdr); 422495fbd4eSSasha Levin if (r < 0) 423a463650cSWill Deacon goto free_msix_mmio; 424495fbd4eSSasha Levin 425e9922aafSAndre Przywara /* save the IRQ that device__register() has allocated */ 426e9922aafSAndre Przywara vpci->legacy_irq_line = vpci->pci_hdr.irq_line; 427e9922aafSAndre Przywara 428495fbd4eSSasha Levin return 0; 429495fbd4eSSasha Levin 430a463650cSWill Deacon free_msix_mmio: 431495fbd4eSSasha Levin kvm__deregister_mmio(kvm, vpci->msix_io_block); 432a463650cSWill Deacon free_mmio: 433a463650cSWill Deacon kvm__deregister_mmio(kvm, vpci->mmio_addr); 434495fbd4eSSasha Levin free_ioport: 435a463650cSWill Deacon ioport__unregister(kvm, vpci->port_addr); 436495fbd4eSSasha Levin return r; 437495fbd4eSSasha Levin } 438495fbd4eSSasha Levin 43902eca50cSAsias He int virtio_pci__exit(struct kvm *kvm, struct virtio_device *vdev) 440495fbd4eSSasha Levin { 44102eca50cSAsias He struct virtio_pci *vpci = vdev->virtio; 442495fbd4eSSasha Levin int i; 443495fbd4eSSasha Levin 444a463650cSWill Deacon kvm__deregister_mmio(kvm, vpci->mmio_addr); 445495fbd4eSSasha Levin kvm__deregister_mmio(kvm, vpci->msix_io_block); 446a463650cSWill Deacon ioport__unregister(kvm, vpci->port_addr); 447495fbd4eSSasha Levin 448a463650cSWill Deacon for (i = 0; i < VIRTIO_PCI_MAX_VQ; i++) { 449a463650cSWill Deacon ioeventfd__del_event(vpci->port_addr + VIRTIO_PCI_QUEUE_NOTIFY, i); 450a463650cSWill Deacon ioeventfd__del_event(vpci->mmio_addr + VIRTIO_PCI_QUEUE_NOTIFY, i); 451a463650cSWill Deacon } 45236f5dc91SSasha Levin 45336f5dc91SSasha Levin return 0; 45436f5dc91SSasha Levin } 455