xref: /kvmtool/vfio/pci.c (revision e69b7663b06e8af9cc2dae16e6ec906a64c3c63d)
16078a454SJean-Philippe Brucker #include "kvm/irq.h"
26078a454SJean-Philippe Brucker #include "kvm/kvm.h"
36078a454SJean-Philippe Brucker #include "kvm/kvm-cpu.h"
46078a454SJean-Philippe Brucker #include "kvm/vfio.h"
56078a454SJean-Philippe Brucker 
6e1d0285cSAlexandru Elisei #include <assert.h>
7e1d0285cSAlexandru Elisei 
86078a454SJean-Philippe Brucker #include <sys/ioctl.h>
96078a454SJean-Philippe Brucker #include <sys/eventfd.h>
10c9888d95SJean-Philippe Brucker #include <sys/resource.h>
11c9888d95SJean-Philippe Brucker #include <sys/time.h>
126078a454SJean-Philippe Brucker 
135a8e4f25SAlexandru Elisei #include <assert.h>
145a8e4f25SAlexandru Elisei 
156078a454SJean-Philippe Brucker /* Wrapper around UAPI vfio_irq_set */
16a3704b91SAndre Przywara union vfio_irq_eventfd {
176078a454SJean-Philippe Brucker 	struct vfio_irq_set	irq;
18a3704b91SAndre Przywara 	u8 buffer[sizeof(struct vfio_irq_set) + sizeof(int)];
196078a454SJean-Philippe Brucker };
206078a454SJean-Philippe Brucker 
21a3704b91SAndre Przywara static void set_vfio_irq_eventd_payload(union vfio_irq_eventfd *evfd, int fd)
22a3704b91SAndre Przywara {
23a3704b91SAndre Przywara 	memcpy(&evfd->irq.data, &fd, sizeof(fd));
24a3704b91SAndre Przywara }
25a3704b91SAndre Przywara 
26c9888d95SJean-Philippe Brucker #define msi_is_enabled(state)		((state) & VFIO_PCI_MSI_STATE_ENABLED)
27c9888d95SJean-Philippe Brucker #define msi_is_masked(state)		((state) & VFIO_PCI_MSI_STATE_MASKED)
28c9888d95SJean-Philippe Brucker #define msi_is_empty(state)		((state) & VFIO_PCI_MSI_STATE_EMPTY)
29c9888d95SJean-Philippe Brucker 
30c9888d95SJean-Philippe Brucker #define msi_update_state(state, val, bit)				\
31c9888d95SJean-Philippe Brucker 	(state) = (val) ? (state) | bit : (state) & ~bit;
32c9888d95SJean-Philippe Brucker #define msi_set_enabled(state, val)					\
33c9888d95SJean-Philippe Brucker 	msi_update_state(state, val, VFIO_PCI_MSI_STATE_ENABLED)
34c9888d95SJean-Philippe Brucker #define msi_set_masked(state, val)					\
35c9888d95SJean-Philippe Brucker 	msi_update_state(state, val, VFIO_PCI_MSI_STATE_MASKED)
36c9888d95SJean-Philippe Brucker #define msi_set_empty(state, val)					\
37c9888d95SJean-Philippe Brucker 	msi_update_state(state, val, VFIO_PCI_MSI_STATE_EMPTY)
38c9888d95SJean-Philippe Brucker 
39c9888d95SJean-Philippe Brucker static void vfio_pci_disable_intx(struct kvm *kvm, struct vfio_device *vdev);
407302327aSLeo Yan static int vfio_pci_enable_intx(struct kvm *kvm, struct vfio_device *vdev);
41c9888d95SJean-Philippe Brucker 
428dd28afeSJean-Philippe Brucker static int vfio_pci_enable_msis(struct kvm *kvm, struct vfio_device *vdev,
438dd28afeSJean-Philippe Brucker 				bool msix)
44c9888d95SJean-Philippe Brucker {
45c9888d95SJean-Philippe Brucker 	size_t i;
46c9888d95SJean-Philippe Brucker 	int ret = 0;
47c9888d95SJean-Philippe Brucker 	int *eventfds;
48c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
498dd28afeSJean-Philippe Brucker 	struct vfio_pci_msi_common *msis = msix ? &pdev->msix : &pdev->msi;
50a3704b91SAndre Przywara 	union vfio_irq_eventfd single = {
51c9888d95SJean-Philippe Brucker 		.irq = {
52c9888d95SJean-Philippe Brucker 			.argsz	= sizeof(single),
53c9888d95SJean-Philippe Brucker 			.flags	= VFIO_IRQ_SET_DATA_EVENTFD |
54c9888d95SJean-Philippe Brucker 				  VFIO_IRQ_SET_ACTION_TRIGGER,
55c9888d95SJean-Philippe Brucker 			.index	= msis->info.index,
56c9888d95SJean-Philippe Brucker 			.count	= 1,
57c9888d95SJean-Philippe Brucker 		},
58c9888d95SJean-Philippe Brucker 	};
59c9888d95SJean-Philippe Brucker 
60c9888d95SJean-Philippe Brucker 	if (!msi_is_enabled(msis->virt_state))
61c9888d95SJean-Philippe Brucker 		return 0;
62c9888d95SJean-Philippe Brucker 
637302327aSLeo Yan 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_INTX)
64c9888d95SJean-Philippe Brucker 		/*
65c9888d95SJean-Philippe Brucker 		 * PCI (and VFIO) forbids enabling INTx, MSI or MSIX at the same
66c9888d95SJean-Philippe Brucker 		 * time. Since INTx has to be enabled from the start (we don't
677302327aSLeo Yan 		 * have a reliable way to know when the guest starts using it),
68c9888d95SJean-Philippe Brucker 		 * disable it now.
69c9888d95SJean-Philippe Brucker 		 */
70c9888d95SJean-Philippe Brucker 		vfio_pci_disable_intx(kvm, vdev);
71c9888d95SJean-Philippe Brucker 
72c9888d95SJean-Philippe Brucker 	eventfds = (void *)msis->irq_set + sizeof(struct vfio_irq_set);
73c9888d95SJean-Philippe Brucker 
74c9888d95SJean-Philippe Brucker 	/*
75c9888d95SJean-Philippe Brucker 	 * Initial registration of the full range. This enables the physical
76c9888d95SJean-Philippe Brucker 	 * MSI/MSI-X capability, which might have desired side effects. For
77c9888d95SJean-Philippe Brucker 	 * instance when assigning virtio legacy devices, enabling the MSI
78c9888d95SJean-Philippe Brucker 	 * capability modifies the config space layout!
79c9888d95SJean-Philippe Brucker 	 *
80c9888d95SJean-Philippe Brucker 	 * As an optimization, only update MSIs when guest unmasks the
81c9888d95SJean-Philippe Brucker 	 * capability. This greatly reduces the initialization time for Linux
82c9888d95SJean-Philippe Brucker 	 * guest with 2048+ MSIs. Linux guest starts by enabling the MSI-X cap
83c9888d95SJean-Philippe Brucker 	 * masked, then fills individual vectors, then unmasks the whole
84c9888d95SJean-Philippe Brucker 	 * function. So we only do one VFIO ioctl when enabling for the first
85c9888d95SJean-Philippe Brucker 	 * time, and then one when unmasking.
86c9888d95SJean-Philippe Brucker 	 *
87c9888d95SJean-Philippe Brucker 	 * phys_state is empty when it is enabled but no vector has been
88c9888d95SJean-Philippe Brucker 	 * registered via SET_IRQS yet.
89c9888d95SJean-Philippe Brucker 	 */
90c9888d95SJean-Philippe Brucker 	if (!msi_is_enabled(msis->phys_state) ||
91c9888d95SJean-Philippe Brucker 	    (!msi_is_masked(msis->virt_state) &&
92c9888d95SJean-Philippe Brucker 	     msi_is_empty(msis->phys_state))) {
93c9888d95SJean-Philippe Brucker 		bool empty = true;
94c9888d95SJean-Philippe Brucker 
95c9888d95SJean-Philippe Brucker 		for (i = 0; i < msis->nr_entries; i++) {
96c9888d95SJean-Philippe Brucker 			eventfds[i] = msis->entries[i].gsi >= 0 ?
97c9888d95SJean-Philippe Brucker 				      msis->entries[i].eventfd : -1;
98c9888d95SJean-Philippe Brucker 
99c9888d95SJean-Philippe Brucker 			if (eventfds[i] >= 0)
100c9888d95SJean-Philippe Brucker 				empty = false;
101c9888d95SJean-Philippe Brucker 		}
102c9888d95SJean-Philippe Brucker 
103c9888d95SJean-Philippe Brucker 		ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, msis->irq_set);
104c9888d95SJean-Philippe Brucker 		if (ret < 0) {
105c9888d95SJean-Philippe Brucker 			perror("VFIO_DEVICE_SET_IRQS(multi)");
106c9888d95SJean-Philippe Brucker 			return ret;
107c9888d95SJean-Philippe Brucker 		}
108c9888d95SJean-Philippe Brucker 
109c9888d95SJean-Philippe Brucker 		msi_set_enabled(msis->phys_state, true);
110c9888d95SJean-Philippe Brucker 		msi_set_empty(msis->phys_state, empty);
111c9888d95SJean-Philippe Brucker 
112c9888d95SJean-Philippe Brucker 		return 0;
113c9888d95SJean-Philippe Brucker 	}
114c9888d95SJean-Philippe Brucker 
115c9888d95SJean-Philippe Brucker 	if (msi_is_masked(msis->virt_state)) {
116c9888d95SJean-Philippe Brucker 		/* TODO: if phys_state is not empty nor masked, mask all vectors */
117c9888d95SJean-Philippe Brucker 		return 0;
118c9888d95SJean-Philippe Brucker 	}
119c9888d95SJean-Philippe Brucker 
120c9888d95SJean-Philippe Brucker 	/* Update individual vectors to avoid breaking those in use */
121c9888d95SJean-Philippe Brucker 	for (i = 0; i < msis->nr_entries; i++) {
122c9888d95SJean-Philippe Brucker 		struct vfio_pci_msi_entry *entry = &msis->entries[i];
123c9888d95SJean-Philippe Brucker 		int fd = entry->gsi >= 0 ? entry->eventfd : -1;
124c9888d95SJean-Philippe Brucker 
125c9888d95SJean-Philippe Brucker 		if (fd == eventfds[i])
126c9888d95SJean-Philippe Brucker 			continue;
127c9888d95SJean-Philippe Brucker 
128c9888d95SJean-Philippe Brucker 		single.irq.start = i;
129a3704b91SAndre Przywara 		set_vfio_irq_eventd_payload(&single, fd);
130c9888d95SJean-Philippe Brucker 
131c9888d95SJean-Philippe Brucker 		ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &single);
132c9888d95SJean-Philippe Brucker 		if (ret < 0) {
133c9888d95SJean-Philippe Brucker 			perror("VFIO_DEVICE_SET_IRQS(single)");
134c9888d95SJean-Philippe Brucker 			break;
135c9888d95SJean-Philippe Brucker 		}
136c9888d95SJean-Philippe Brucker 
137c9888d95SJean-Philippe Brucker 		eventfds[i] = fd;
138c9888d95SJean-Philippe Brucker 
139c9888d95SJean-Philippe Brucker 		if (msi_is_empty(msis->phys_state) && fd >= 0)
140c9888d95SJean-Philippe Brucker 			msi_set_empty(msis->phys_state, false);
141c9888d95SJean-Philippe Brucker 	}
142c9888d95SJean-Philippe Brucker 
143c9888d95SJean-Philippe Brucker 	return ret;
144c9888d95SJean-Philippe Brucker }
145c9888d95SJean-Philippe Brucker 
1468dd28afeSJean-Philippe Brucker static int vfio_pci_disable_msis(struct kvm *kvm, struct vfio_device *vdev,
1478dd28afeSJean-Philippe Brucker 				 bool msix)
148c9888d95SJean-Philippe Brucker {
149c9888d95SJean-Philippe Brucker 	int ret;
150c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
1518dd28afeSJean-Philippe Brucker 	struct vfio_pci_msi_common *msis = msix ? &pdev->msix : &pdev->msi;
152c9888d95SJean-Philippe Brucker 	struct vfio_irq_set irq_set = {
153c9888d95SJean-Philippe Brucker 		.argsz	= sizeof(irq_set),
154c9888d95SJean-Philippe Brucker 		.flags 	= VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
155c9888d95SJean-Philippe Brucker 		.index 	= msis->info.index,
156c9888d95SJean-Philippe Brucker 		.start 	= 0,
157c9888d95SJean-Philippe Brucker 		.count	= 0,
158c9888d95SJean-Philippe Brucker 	};
159c9888d95SJean-Philippe Brucker 
160c9888d95SJean-Philippe Brucker 	if (!msi_is_enabled(msis->phys_state))
161c9888d95SJean-Philippe Brucker 		return 0;
162c9888d95SJean-Philippe Brucker 
163c9888d95SJean-Philippe Brucker 	ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
164c9888d95SJean-Philippe Brucker 	if (ret < 0) {
165c9888d95SJean-Philippe Brucker 		perror("VFIO_DEVICE_SET_IRQS(NONE)");
166c9888d95SJean-Philippe Brucker 		return ret;
167c9888d95SJean-Philippe Brucker 	}
168c9888d95SJean-Philippe Brucker 
169c9888d95SJean-Philippe Brucker 	msi_set_enabled(msis->phys_state, false);
170c9888d95SJean-Philippe Brucker 	msi_set_empty(msis->phys_state, true);
171c9888d95SJean-Philippe Brucker 
1727302327aSLeo Yan 	/*
1737302327aSLeo Yan 	 * When MSI or MSIX is disabled, this might be called when
1747302327aSLeo Yan 	 * PCI driver detects the MSI interrupt failure and wants to
1757302327aSLeo Yan 	 * rollback to INTx mode.  Thus enable INTx if the device
1767302327aSLeo Yan 	 * supports INTx mode in this case.
1777302327aSLeo Yan 	 */
1787302327aSLeo Yan 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_INTX)
1797302327aSLeo Yan 		ret = vfio_pci_enable_intx(kvm, vdev);
1807302327aSLeo Yan 
1817302327aSLeo Yan 	return ret >= 0 ? 0 : ret;
182c9888d95SJean-Philippe Brucker }
183c9888d95SJean-Philippe Brucker 
184c9888d95SJean-Philippe Brucker static int vfio_pci_update_msi_entry(struct kvm *kvm, struct vfio_device *vdev,
185c9888d95SJean-Philippe Brucker 				     struct vfio_pci_msi_entry *entry)
186c9888d95SJean-Philippe Brucker {
187c9888d95SJean-Philippe Brucker 	int ret;
188c9888d95SJean-Philippe Brucker 
189c9888d95SJean-Philippe Brucker 	if (entry->eventfd < 0) {
190c9888d95SJean-Philippe Brucker 		entry->eventfd = eventfd(0, 0);
191c9888d95SJean-Philippe Brucker 		if (entry->eventfd < 0) {
192c9888d95SJean-Philippe Brucker 			ret = -errno;
193c9888d95SJean-Philippe Brucker 			vfio_dev_err(vdev, "cannot create eventfd");
194c9888d95SJean-Philippe Brucker 			return ret;
195c9888d95SJean-Philippe Brucker 		}
196c9888d95SJean-Philippe Brucker 	}
197c9888d95SJean-Philippe Brucker 
198c9888d95SJean-Philippe Brucker 	/* Allocate IRQ if necessary */
199c9888d95SJean-Philippe Brucker 	if (entry->gsi < 0) {
200c9888d95SJean-Philippe Brucker 		int ret = irq__add_msix_route(kvm, &entry->config.msg,
201c9888d95SJean-Philippe Brucker 					      vdev->dev_hdr.dev_num << 3);
202c9888d95SJean-Philippe Brucker 		if (ret < 0) {
203c9888d95SJean-Philippe Brucker 			vfio_dev_err(vdev, "cannot create MSI-X route");
204c9888d95SJean-Philippe Brucker 			return ret;
205c9888d95SJean-Philippe Brucker 		}
206c9888d95SJean-Philippe Brucker 		entry->gsi = ret;
207c9888d95SJean-Philippe Brucker 	} else {
208c9888d95SJean-Philippe Brucker 		irq__update_msix_route(kvm, entry->gsi, &entry->config.msg);
209c9888d95SJean-Philippe Brucker 	}
210c9888d95SJean-Philippe Brucker 
211c9888d95SJean-Philippe Brucker 	/*
212c9888d95SJean-Philippe Brucker 	 * MSI masking is unimplemented in VFIO, so we have to handle it by
213c9888d95SJean-Philippe Brucker 	 * disabling/enabling IRQ route instead. We do it on the KVM side rather
214c9888d95SJean-Philippe Brucker 	 * than VFIO, because:
215c9888d95SJean-Philippe Brucker 	 * - it is 8x faster
216c9888d95SJean-Philippe Brucker 	 * - it allows to decouple masking logic from capability state.
217c9888d95SJean-Philippe Brucker 	 * - in masked state, after removing irqfd route, we could easily plug
218c9888d95SJean-Philippe Brucker 	 *   the eventfd in a local handler, in order to serve Pending Bit reads
219c9888d95SJean-Philippe Brucker 	 *   to the guest.
220c9888d95SJean-Philippe Brucker 	 *
221c9888d95SJean-Philippe Brucker 	 * So entry->phys_state is masked when there is no active irqfd route.
222c9888d95SJean-Philippe Brucker 	 */
223c9888d95SJean-Philippe Brucker 	if (msi_is_masked(entry->virt_state) == msi_is_masked(entry->phys_state))
224c9888d95SJean-Philippe Brucker 		return 0;
225c9888d95SJean-Philippe Brucker 
226c9888d95SJean-Philippe Brucker 	if (msi_is_masked(entry->phys_state)) {
227c9888d95SJean-Philippe Brucker 		ret = irq__add_irqfd(kvm, entry->gsi, entry->eventfd, -1);
228c9888d95SJean-Philippe Brucker 		if (ret < 0) {
229c9888d95SJean-Philippe Brucker 			vfio_dev_err(vdev, "cannot setup irqfd");
230c9888d95SJean-Philippe Brucker 			return ret;
231c9888d95SJean-Philippe Brucker 		}
232c9888d95SJean-Philippe Brucker 	} else {
233c9888d95SJean-Philippe Brucker 		irq__del_irqfd(kvm, entry->gsi, entry->eventfd);
234c9888d95SJean-Philippe Brucker 	}
235c9888d95SJean-Philippe Brucker 
236c9888d95SJean-Philippe Brucker 	msi_set_masked(entry->phys_state, msi_is_masked(entry->virt_state));
237c9888d95SJean-Philippe Brucker 
238c9888d95SJean-Philippe Brucker 	return 0;
239c9888d95SJean-Philippe Brucker }
240c9888d95SJean-Philippe Brucker 
241c9888d95SJean-Philippe Brucker static void vfio_pci_msix_pba_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
242c9888d95SJean-Philippe Brucker 				     u32 len, u8 is_write, void *ptr)
243c9888d95SJean-Philippe Brucker {
244c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = ptr;
245c9888d95SJean-Philippe Brucker 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
246c9888d95SJean-Philippe Brucker 	u64 offset = addr - pba->guest_phys_addr;
247c9888d95SJean-Philippe Brucker 	struct vfio_device *vdev = container_of(pdev, struct vfio_device, pci);
248c9888d95SJean-Philippe Brucker 
249c9888d95SJean-Philippe Brucker 	if (is_write)
250c9888d95SJean-Philippe Brucker 		return;
251c9888d95SJean-Philippe Brucker 
252c9888d95SJean-Philippe Brucker 	/*
253c9888d95SJean-Philippe Brucker 	 * TODO: emulate PBA. Hardware MSI-X is never masked, so reading the PBA
254c9888d95SJean-Philippe Brucker 	 * is completely useless here. Note that Linux doesn't use PBA.
255c9888d95SJean-Philippe Brucker 	 */
256c9888d95SJean-Philippe Brucker 	if (pread(vdev->fd, data, len, pba->offset + offset) != (ssize_t)len)
257c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "cannot access MSIX PBA\n");
258c9888d95SJean-Philippe Brucker }
259c9888d95SJean-Philippe Brucker 
260c9888d95SJean-Philippe Brucker static void vfio_pci_msix_table_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
261c9888d95SJean-Philippe Brucker 				       u32 len, u8 is_write, void *ptr)
262c9888d95SJean-Philippe Brucker {
263c9888d95SJean-Philippe Brucker 	struct kvm *kvm = vcpu->kvm;
264c9888d95SJean-Philippe Brucker 	struct vfio_pci_msi_entry *entry;
265c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = ptr;
266c9888d95SJean-Philippe Brucker 	struct vfio_device *vdev = container_of(pdev, struct vfio_device, pci);
267c9888d95SJean-Philippe Brucker 
268c9888d95SJean-Philippe Brucker 	u64 offset = addr - pdev->msix_table.guest_phys_addr;
269c9888d95SJean-Philippe Brucker 
270c9888d95SJean-Philippe Brucker 	size_t vector = offset / PCI_MSIX_ENTRY_SIZE;
271c9888d95SJean-Philippe Brucker 	off_t field = offset % PCI_MSIX_ENTRY_SIZE;
272c9888d95SJean-Philippe Brucker 
273c9888d95SJean-Philippe Brucker 	/*
274c9888d95SJean-Philippe Brucker 	 * PCI spec says that software must use aligned 4 or 8 bytes accesses
275c9888d95SJean-Philippe Brucker 	 * for the MSI-X tables.
276c9888d95SJean-Philippe Brucker 	 */
277c9888d95SJean-Philippe Brucker 	if ((len != 4 && len != 8) || addr & (len - 1)) {
278c9888d95SJean-Philippe Brucker 		vfio_dev_warn(vdev, "invalid MSI-X table access");
279c9888d95SJean-Philippe Brucker 		return;
280c9888d95SJean-Philippe Brucker 	}
281c9888d95SJean-Philippe Brucker 
282c9888d95SJean-Philippe Brucker 	entry = &pdev->msix.entries[vector];
283c9888d95SJean-Philippe Brucker 
284c9888d95SJean-Philippe Brucker 	mutex_lock(&pdev->msix.mutex);
285c9888d95SJean-Philippe Brucker 
286c9888d95SJean-Philippe Brucker 	if (!is_write) {
287c9888d95SJean-Philippe Brucker 		memcpy(data, (void *)&entry->config + field, len);
288c9888d95SJean-Philippe Brucker 		goto out_unlock;
289c9888d95SJean-Philippe Brucker 	}
290c9888d95SJean-Philippe Brucker 
291c9888d95SJean-Philippe Brucker 	memcpy((void *)&entry->config + field, data, len);
292c9888d95SJean-Philippe Brucker 
293c9888d95SJean-Philippe Brucker 	/*
294c9888d95SJean-Philippe Brucker 	 * Check if access touched the vector control register, which is at the
295c9888d95SJean-Philippe Brucker 	 * end of the MSI-X entry.
296c9888d95SJean-Philippe Brucker 	 */
297c9888d95SJean-Philippe Brucker 	if (field + len <= PCI_MSIX_ENTRY_VECTOR_CTRL)
298c9888d95SJean-Philippe Brucker 		goto out_unlock;
299c9888d95SJean-Philippe Brucker 
300c9888d95SJean-Philippe Brucker 	msi_set_masked(entry->virt_state, entry->config.ctrl &
301c9888d95SJean-Philippe Brucker 		       PCI_MSIX_ENTRY_CTRL_MASKBIT);
302c9888d95SJean-Philippe Brucker 
303c9888d95SJean-Philippe Brucker 	if (vfio_pci_update_msi_entry(kvm, vdev, entry) < 0)
304c9888d95SJean-Philippe Brucker 		/* Not much we can do here. */
305c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to configure MSIX vector %zu", vector);
306c9888d95SJean-Philippe Brucker 
307c9888d95SJean-Philippe Brucker 	/* Update the physical capability if necessary */
3088dd28afeSJean-Philippe Brucker 	if (vfio_pci_enable_msis(kvm, vdev, true))
309c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "cannot enable MSIX");
310c9888d95SJean-Philippe Brucker 
311c9888d95SJean-Philippe Brucker out_unlock:
312c9888d95SJean-Philippe Brucker 	mutex_unlock(&pdev->msix.mutex);
313c9888d95SJean-Philippe Brucker }
314c9888d95SJean-Philippe Brucker 
315c9888d95SJean-Philippe Brucker static void vfio_pci_msix_cap_write(struct kvm *kvm,
316*e69b7663SAlexandru Elisei 				    struct vfio_device *vdev, u16 off,
317c9888d95SJean-Philippe Brucker 				    void *data, int sz)
318c9888d95SJean-Philippe Brucker {
319c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
320c9888d95SJean-Philippe Brucker 	off_t enable_pos = PCI_MSIX_FLAGS + 1;
321c9888d95SJean-Philippe Brucker 	bool enable;
322c9888d95SJean-Philippe Brucker 	u16 flags;
323c9888d95SJean-Philippe Brucker 
324c9888d95SJean-Philippe Brucker 	off -= pdev->msix.pos;
325c9888d95SJean-Philippe Brucker 
326c9888d95SJean-Philippe Brucker 	/* Check if access intersects with the MSI-X Enable bit */
327c9888d95SJean-Philippe Brucker 	if (off > enable_pos || off + sz <= enable_pos)
328c9888d95SJean-Philippe Brucker 		return;
329c9888d95SJean-Philippe Brucker 
330c9888d95SJean-Philippe Brucker 	/* Read byte that contains the Enable bit */
331c9888d95SJean-Philippe Brucker 	flags = *(u8 *)(data + enable_pos - off) << 8;
332c9888d95SJean-Philippe Brucker 
333c9888d95SJean-Philippe Brucker 	mutex_lock(&pdev->msix.mutex);
334c9888d95SJean-Philippe Brucker 
335c9888d95SJean-Philippe Brucker 	msi_set_masked(pdev->msix.virt_state, flags & PCI_MSIX_FLAGS_MASKALL);
336c9888d95SJean-Philippe Brucker 	enable = flags & PCI_MSIX_FLAGS_ENABLE;
337c9888d95SJean-Philippe Brucker 	msi_set_enabled(pdev->msix.virt_state, enable);
338c9888d95SJean-Philippe Brucker 
3398dd28afeSJean-Philippe Brucker 	if (enable && vfio_pci_enable_msis(kvm, vdev, true))
340c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "cannot enable MSIX");
3418dd28afeSJean-Philippe Brucker 	else if (!enable && vfio_pci_disable_msis(kvm, vdev, true))
342c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "cannot disable MSIX");
343c9888d95SJean-Philippe Brucker 
344c9888d95SJean-Philippe Brucker 	mutex_unlock(&pdev->msix.mutex);
345c9888d95SJean-Philippe Brucker }
346c9888d95SJean-Philippe Brucker 
3478dd28afeSJean-Philippe Brucker static int vfio_pci_msi_vector_write(struct kvm *kvm, struct vfio_device *vdev,
348*e69b7663SAlexandru Elisei 				     u16 off, u8 *data, u32 sz)
3498dd28afeSJean-Philippe Brucker {
3508dd28afeSJean-Philippe Brucker 	size_t i;
3518dd28afeSJean-Philippe Brucker 	u32 mask = 0;
3528dd28afeSJean-Philippe Brucker 	size_t mask_pos, start, limit;
3538dd28afeSJean-Philippe Brucker 	struct vfio_pci_msi_entry *entry;
3548dd28afeSJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
3558dd28afeSJean-Philippe Brucker 	struct msi_cap_64 *msi_cap_64 = PCI_CAP(&pdev->hdr, pdev->msi.pos);
3568dd28afeSJean-Philippe Brucker 
3578dd28afeSJean-Philippe Brucker 	if (!(msi_cap_64->ctrl & PCI_MSI_FLAGS_MASKBIT))
3588dd28afeSJean-Philippe Brucker 		return 0;
3598dd28afeSJean-Philippe Brucker 
3608dd28afeSJean-Philippe Brucker 	if (msi_cap_64->ctrl & PCI_MSI_FLAGS_64BIT)
3618dd28afeSJean-Philippe Brucker 		mask_pos = PCI_MSI_MASK_64;
3628dd28afeSJean-Philippe Brucker 	else
3638dd28afeSJean-Philippe Brucker 		mask_pos = PCI_MSI_MASK_32;
3648dd28afeSJean-Philippe Brucker 
3658dd28afeSJean-Philippe Brucker 	if (off >= mask_pos + 4 || off + sz <= mask_pos)
3668dd28afeSJean-Philippe Brucker 		return 0;
3678dd28afeSJean-Philippe Brucker 
3688dd28afeSJean-Philippe Brucker 	/* Set mask to current state */
3698dd28afeSJean-Philippe Brucker 	for (i = 0; i < pdev->msi.nr_entries; i++) {
3708dd28afeSJean-Philippe Brucker 		entry = &pdev->msi.entries[i];
3718dd28afeSJean-Philippe Brucker 		mask |= !!msi_is_masked(entry->virt_state) << i;
3728dd28afeSJean-Philippe Brucker 	}
3738dd28afeSJean-Philippe Brucker 
3748dd28afeSJean-Philippe Brucker 	/* Update mask following the intersection of access and register */
3758dd28afeSJean-Philippe Brucker 	start = max_t(size_t, off, mask_pos);
3768dd28afeSJean-Philippe Brucker 	limit = min_t(size_t, off + sz, mask_pos + 4);
3778dd28afeSJean-Philippe Brucker 
3788dd28afeSJean-Philippe Brucker 	memcpy((void *)&mask + start - mask_pos, data + start - off,
3798dd28afeSJean-Philippe Brucker 	       limit - start);
3808dd28afeSJean-Philippe Brucker 
3818dd28afeSJean-Philippe Brucker 	/* Update states if necessary */
3828dd28afeSJean-Philippe Brucker 	for (i = 0; i < pdev->msi.nr_entries; i++) {
3838dd28afeSJean-Philippe Brucker 		bool masked = mask & (1 << i);
3848dd28afeSJean-Philippe Brucker 
3858dd28afeSJean-Philippe Brucker 		entry = &pdev->msi.entries[i];
3868dd28afeSJean-Philippe Brucker 		if (masked != msi_is_masked(entry->virt_state)) {
3878dd28afeSJean-Philippe Brucker 			msi_set_masked(entry->virt_state, masked);
3888dd28afeSJean-Philippe Brucker 			vfio_pci_update_msi_entry(kvm, vdev, entry);
3898dd28afeSJean-Philippe Brucker 		}
3908dd28afeSJean-Philippe Brucker 	}
3918dd28afeSJean-Philippe Brucker 
3928dd28afeSJean-Philippe Brucker 	return 1;
3938dd28afeSJean-Philippe Brucker }
3948dd28afeSJean-Philippe Brucker 
3958dd28afeSJean-Philippe Brucker static void vfio_pci_msi_cap_write(struct kvm *kvm, struct vfio_device *vdev,
396*e69b7663SAlexandru Elisei 				   u16 off, u8 *data, u32 sz)
3978dd28afeSJean-Philippe Brucker {
3988dd28afeSJean-Philippe Brucker 	u8 ctrl;
3998dd28afeSJean-Philippe Brucker 	struct msi_msg msg;
4008dd28afeSJean-Philippe Brucker 	size_t i, nr_vectors;
4018dd28afeSJean-Philippe Brucker 	struct vfio_pci_msi_entry *entry;
4028dd28afeSJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
4038dd28afeSJean-Philippe Brucker 	struct msi_cap_64 *msi_cap_64 = PCI_CAP(&pdev->hdr, pdev->msi.pos);
4048dd28afeSJean-Philippe Brucker 
4058dd28afeSJean-Philippe Brucker 	off -= pdev->msi.pos;
4068dd28afeSJean-Philippe Brucker 
4078dd28afeSJean-Philippe Brucker 	mutex_lock(&pdev->msi.mutex);
4088dd28afeSJean-Philippe Brucker 
4098dd28afeSJean-Philippe Brucker 	/* Check if the guest is trying to update mask bits */
4108dd28afeSJean-Philippe Brucker 	if (vfio_pci_msi_vector_write(kvm, vdev, off, data, sz))
4118dd28afeSJean-Philippe Brucker 		goto out_unlock;
4128dd28afeSJean-Philippe Brucker 
4138dd28afeSJean-Philippe Brucker 	/* Only modify routes when guest pokes the enable bit */
4148dd28afeSJean-Philippe Brucker 	if (off > PCI_MSI_FLAGS || off + sz <= PCI_MSI_FLAGS)
4158dd28afeSJean-Philippe Brucker 		goto out_unlock;
4168dd28afeSJean-Philippe Brucker 
4178dd28afeSJean-Philippe Brucker 	ctrl = *(u8 *)(data + PCI_MSI_FLAGS - off);
4188dd28afeSJean-Philippe Brucker 
4198dd28afeSJean-Philippe Brucker 	msi_set_enabled(pdev->msi.virt_state, ctrl & PCI_MSI_FLAGS_ENABLE);
4208dd28afeSJean-Philippe Brucker 
4218dd28afeSJean-Philippe Brucker 	if (!msi_is_enabled(pdev->msi.virt_state)) {
4228dd28afeSJean-Philippe Brucker 		vfio_pci_disable_msis(kvm, vdev, false);
4238dd28afeSJean-Philippe Brucker 		goto out_unlock;
4248dd28afeSJean-Philippe Brucker 	}
4258dd28afeSJean-Philippe Brucker 
4268dd28afeSJean-Philippe Brucker 	/* Create routes for the requested vectors */
4278dd28afeSJean-Philippe Brucker 	nr_vectors = 1 << ((ctrl & PCI_MSI_FLAGS_QSIZE) >> 4);
4288dd28afeSJean-Philippe Brucker 
4298dd28afeSJean-Philippe Brucker 	msg.address_lo = msi_cap_64->address_lo;
4308dd28afeSJean-Philippe Brucker 	if (msi_cap_64->ctrl & PCI_MSI_FLAGS_64BIT) {
4318dd28afeSJean-Philippe Brucker 		msg.address_hi = msi_cap_64->address_hi;
4328dd28afeSJean-Philippe Brucker 		msg.data = msi_cap_64->data;
4338dd28afeSJean-Philippe Brucker 	} else {
4348dd28afeSJean-Philippe Brucker 		struct msi_cap_32 *msi_cap_32 = (void *)msi_cap_64;
4358dd28afeSJean-Philippe Brucker 		msg.address_hi = 0;
4368dd28afeSJean-Philippe Brucker 		msg.data = msi_cap_32->data;
4378dd28afeSJean-Philippe Brucker 	}
4388dd28afeSJean-Philippe Brucker 
4398dd28afeSJean-Philippe Brucker 	for (i = 0; i < nr_vectors; i++) {
4408dd28afeSJean-Philippe Brucker 		entry = &pdev->msi.entries[i];
441e554aefdSLorenzo Pieralisi 
442e554aefdSLorenzo Pieralisi 		/*
443e554aefdSLorenzo Pieralisi 		 * Set the MSI data value as required by the PCI local
444e554aefdSLorenzo Pieralisi 		 * bus specifications, MSI capability, "Message Data".
445e554aefdSLorenzo Pieralisi 		 */
446e554aefdSLorenzo Pieralisi 		msg.data &= ~(nr_vectors - 1);
447e554aefdSLorenzo Pieralisi 		msg.data |= i;
448e554aefdSLorenzo Pieralisi 
4498dd28afeSJean-Philippe Brucker 		entry->config.msg = msg;
4508dd28afeSJean-Philippe Brucker 		vfio_pci_update_msi_entry(kvm, vdev, entry);
4518dd28afeSJean-Philippe Brucker 	}
4528dd28afeSJean-Philippe Brucker 
4538dd28afeSJean-Philippe Brucker 	/* Update the physical capability if necessary */
4548dd28afeSJean-Philippe Brucker 	if (vfio_pci_enable_msis(kvm, vdev, false))
4558dd28afeSJean-Philippe Brucker 		vfio_dev_err(vdev, "cannot enable MSI");
4568dd28afeSJean-Philippe Brucker 
4578dd28afeSJean-Philippe Brucker out_unlock:
4588dd28afeSJean-Philippe Brucker 	mutex_unlock(&pdev->msi.mutex);
4598dd28afeSJean-Philippe Brucker }
4608dd28afeSJean-Philippe Brucker 
4615a8e4f25SAlexandru Elisei static int vfio_pci_bar_activate(struct kvm *kvm,
4625a8e4f25SAlexandru Elisei 				 struct pci_device_header *pci_hdr,
4635a8e4f25SAlexandru Elisei 				 int bar_num, void *data)
4645a8e4f25SAlexandru Elisei {
4655a8e4f25SAlexandru Elisei 	struct vfio_device *vdev = data;
4665a8e4f25SAlexandru Elisei 	struct vfio_pci_device *pdev = &vdev->pci;
4675a8e4f25SAlexandru Elisei 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
4685a8e4f25SAlexandru Elisei 	struct vfio_pci_msix_table *table = &pdev->msix_table;
4695a8e4f25SAlexandru Elisei 	struct vfio_region *region;
470465edc9dSAlexandru Elisei 	u32 bar_addr;
4715a8e4f25SAlexandru Elisei 	bool has_msix;
4725a8e4f25SAlexandru Elisei 	int ret;
4735a8e4f25SAlexandru Elisei 
4745a8e4f25SAlexandru Elisei 	assert((u32)bar_num < vdev->info.num_regions);
4755a8e4f25SAlexandru Elisei 
4765a8e4f25SAlexandru Elisei 	region = &vdev->regions[bar_num];
4775a8e4f25SAlexandru Elisei 	has_msix = pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX;
4785a8e4f25SAlexandru Elisei 
479465edc9dSAlexandru Elisei 	bar_addr = pci__bar_address(pci_hdr, bar_num);
480465edc9dSAlexandru Elisei 	if (pci__bar_is_io(pci_hdr, bar_num))
481465edc9dSAlexandru Elisei 		region->port_base = bar_addr;
482465edc9dSAlexandru Elisei 	else
483465edc9dSAlexandru Elisei 		region->guest_phys_addr = bar_addr;
484465edc9dSAlexandru Elisei 
4855a8e4f25SAlexandru Elisei 	if (has_msix && (u32)bar_num == table->bar) {
486465edc9dSAlexandru Elisei 		table->guest_phys_addr = region->guest_phys_addr;
4875a8e4f25SAlexandru Elisei 		ret = kvm__register_mmio(kvm, table->guest_phys_addr,
4885a8e4f25SAlexandru Elisei 					 table->size, false,
4895a8e4f25SAlexandru Elisei 					 vfio_pci_msix_table_access, pdev);
4905a8e4f25SAlexandru Elisei 		/*
4915a8e4f25SAlexandru Elisei 		 * The MSIX table and the PBA structure can share the same BAR,
4925a8e4f25SAlexandru Elisei 		 * but for convenience we register different regions for mmio
4935a8e4f25SAlexandru Elisei 		 * emulation. We want to we update both if they share the same
4945a8e4f25SAlexandru Elisei 		 * BAR.
4955a8e4f25SAlexandru Elisei 		 */
4965a8e4f25SAlexandru Elisei 		if (ret < 0 || table->bar != pba->bar)
4975a8e4f25SAlexandru Elisei 			goto out;
4985a8e4f25SAlexandru Elisei 	}
4995a8e4f25SAlexandru Elisei 
5005a8e4f25SAlexandru Elisei 	if (has_msix && (u32)bar_num == pba->bar) {
501465edc9dSAlexandru Elisei 		if (pba->bar == table->bar)
502465edc9dSAlexandru Elisei 			pba->guest_phys_addr = table->guest_phys_addr + table->size;
503465edc9dSAlexandru Elisei 		else
504465edc9dSAlexandru Elisei 			pba->guest_phys_addr = region->guest_phys_addr;
5055a8e4f25SAlexandru Elisei 		ret = kvm__register_mmio(kvm, pba->guest_phys_addr,
5065a8e4f25SAlexandru Elisei 					 pba->size, false,
5075a8e4f25SAlexandru Elisei 					 vfio_pci_msix_pba_access, pdev);
5085a8e4f25SAlexandru Elisei 		goto out;
5095a8e4f25SAlexandru Elisei 	}
5105a8e4f25SAlexandru Elisei 
5115a8e4f25SAlexandru Elisei 	ret = vfio_map_region(kvm, vdev, region);
5125a8e4f25SAlexandru Elisei out:
5135a8e4f25SAlexandru Elisei 	return ret;
5145a8e4f25SAlexandru Elisei }
5155a8e4f25SAlexandru Elisei 
5165a8e4f25SAlexandru Elisei static int vfio_pci_bar_deactivate(struct kvm *kvm,
5175a8e4f25SAlexandru Elisei 				   struct pci_device_header *pci_hdr,
5185a8e4f25SAlexandru Elisei 				   int bar_num, void *data)
5195a8e4f25SAlexandru Elisei {
5205a8e4f25SAlexandru Elisei 	struct vfio_device *vdev = data;
5215a8e4f25SAlexandru Elisei 	struct vfio_pci_device *pdev = &vdev->pci;
5225a8e4f25SAlexandru Elisei 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
5235a8e4f25SAlexandru Elisei 	struct vfio_pci_msix_table *table = &pdev->msix_table;
5245a8e4f25SAlexandru Elisei 	struct vfio_region *region;
5255a8e4f25SAlexandru Elisei 	bool has_msix, success;
5265a8e4f25SAlexandru Elisei 	int ret;
5275a8e4f25SAlexandru Elisei 
5285a8e4f25SAlexandru Elisei 	assert((u32)bar_num < vdev->info.num_regions);
5295a8e4f25SAlexandru Elisei 
5305a8e4f25SAlexandru Elisei 	region = &vdev->regions[bar_num];
5315a8e4f25SAlexandru Elisei 	has_msix = pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX;
5325a8e4f25SAlexandru Elisei 
5335a8e4f25SAlexandru Elisei 	if (has_msix && (u32)bar_num == table->bar) {
5345a8e4f25SAlexandru Elisei 		success = kvm__deregister_mmio(kvm, table->guest_phys_addr);
5355a8e4f25SAlexandru Elisei 		/* kvm__deregister_mmio fails when the region is not found. */
5365a8e4f25SAlexandru Elisei 		ret = (success ? 0 : -ENOENT);
5375a8e4f25SAlexandru Elisei 		/* See vfio_pci_bar_activate(). */
5385a8e4f25SAlexandru Elisei 		if (ret < 0 || table->bar!= pba->bar)
5395a8e4f25SAlexandru Elisei 			goto out;
5405a8e4f25SAlexandru Elisei 	}
5415a8e4f25SAlexandru Elisei 
5425a8e4f25SAlexandru Elisei 	if (has_msix && (u32)bar_num == pba->bar) {
5435a8e4f25SAlexandru Elisei 		success = kvm__deregister_mmio(kvm, pba->guest_phys_addr);
5445a8e4f25SAlexandru Elisei 		ret = (success ? 0 : -ENOENT);
5455a8e4f25SAlexandru Elisei 		goto out;
5465a8e4f25SAlexandru Elisei 	}
5475a8e4f25SAlexandru Elisei 
5485a8e4f25SAlexandru Elisei 	vfio_unmap_region(kvm, region);
5495a8e4f25SAlexandru Elisei 	ret = 0;
5505a8e4f25SAlexandru Elisei 
5515a8e4f25SAlexandru Elisei out:
5525a8e4f25SAlexandru Elisei 	return ret;
5535a8e4f25SAlexandru Elisei }
5545a8e4f25SAlexandru Elisei 
5556078a454SJean-Philippe Brucker static void vfio_pci_cfg_read(struct kvm *kvm, struct pci_device_header *pci_hdr,
556*e69b7663SAlexandru Elisei 			      u16 offset, void *data, int sz)
5576078a454SJean-Philippe Brucker {
5586078a454SJean-Philippe Brucker 	struct vfio_region_info *info;
5596078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev;
5606078a454SJean-Philippe Brucker 	struct vfio_device *vdev;
5616078a454SJean-Philippe Brucker 	char base[sz];
5626078a454SJean-Philippe Brucker 
5636078a454SJean-Philippe Brucker 	pdev = container_of(pci_hdr, struct vfio_pci_device, hdr);
5646078a454SJean-Philippe Brucker 	vdev = container_of(pdev, struct vfio_device, pci);
5656078a454SJean-Philippe Brucker 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
5666078a454SJean-Philippe Brucker 
5676078a454SJean-Philippe Brucker 	/* Dummy read in case of side-effects */
5686078a454SJean-Philippe Brucker 	if (pread(vdev->fd, base, sz, info->offset + offset) != sz)
5696078a454SJean-Philippe Brucker 		vfio_dev_warn(vdev, "failed to read %d bytes from Configuration Space at 0x%x",
5706078a454SJean-Philippe Brucker 			      sz, offset);
5716078a454SJean-Philippe Brucker }
5726078a454SJean-Philippe Brucker 
5736078a454SJean-Philippe Brucker static void vfio_pci_cfg_write(struct kvm *kvm, struct pci_device_header *pci_hdr,
574*e69b7663SAlexandru Elisei 			       u16 offset, void *data, int sz)
5756078a454SJean-Philippe Brucker {
5766078a454SJean-Philippe Brucker 	struct vfio_region_info *info;
5776078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev;
5786078a454SJean-Philippe Brucker 	struct vfio_device *vdev;
579e1d0285cSAlexandru Elisei 	u32 tmp;
580e1d0285cSAlexandru Elisei 
581e1d0285cSAlexandru Elisei 	/* Make sure a larger size will not overrun tmp on the stack. */
582e1d0285cSAlexandru Elisei 	assert(sz <= 4);
5836078a454SJean-Philippe Brucker 
5845b7fef16SAlexandru Elisei 	if (offset == PCI_ROM_ADDRESS)
5855b7fef16SAlexandru Elisei 		return;
5865b7fef16SAlexandru Elisei 
5876078a454SJean-Philippe Brucker 	pdev = container_of(pci_hdr, struct vfio_pci_device, hdr);
5886078a454SJean-Philippe Brucker 	vdev = container_of(pdev, struct vfio_device, pci);
5896078a454SJean-Philippe Brucker 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
5906078a454SJean-Philippe Brucker 
5916078a454SJean-Philippe Brucker 	if (pwrite(vdev->fd, data, sz, info->offset + offset) != sz)
5926078a454SJean-Philippe Brucker 		vfio_dev_warn(vdev, "Failed to write %d bytes to Configuration Space at 0x%x",
5936078a454SJean-Philippe Brucker 			      sz, offset);
5946078a454SJean-Philippe Brucker 
595c9888d95SJean-Philippe Brucker 	/* Handle MSI write now, since it might update the hardware capability */
596c9888d95SJean-Philippe Brucker 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX)
597c9888d95SJean-Philippe Brucker 		vfio_pci_msix_cap_write(kvm, vdev, offset, data, sz);
598c9888d95SJean-Philippe Brucker 
5998dd28afeSJean-Philippe Brucker 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSI)
6008dd28afeSJean-Philippe Brucker 		vfio_pci_msi_cap_write(kvm, vdev, offset, data, sz);
6018dd28afeSJean-Philippe Brucker 
602e1d0285cSAlexandru Elisei 	if (pread(vdev->fd, &tmp, sz, info->offset + offset) != sz)
6036078a454SJean-Philippe Brucker 		vfio_dev_warn(vdev, "Failed to read %d bytes from Configuration Space at 0x%x",
6046078a454SJean-Philippe Brucker 			      sz, offset);
6056078a454SJean-Philippe Brucker }
6066078a454SJean-Philippe Brucker 
6078dd28afeSJean-Philippe Brucker static ssize_t vfio_pci_msi_cap_size(struct msi_cap_64 *cap_hdr)
6088dd28afeSJean-Philippe Brucker {
6098dd28afeSJean-Philippe Brucker 	size_t size = 10;
6108dd28afeSJean-Philippe Brucker 
6118dd28afeSJean-Philippe Brucker 	if (cap_hdr->ctrl & PCI_MSI_FLAGS_64BIT)
6128dd28afeSJean-Philippe Brucker 		size += 4;
6138dd28afeSJean-Philippe Brucker 	if (cap_hdr->ctrl & PCI_MSI_FLAGS_MASKBIT)
6148dd28afeSJean-Philippe Brucker 		size += 10;
6158dd28afeSJean-Philippe Brucker 
6168dd28afeSJean-Philippe Brucker 	return size;
6178dd28afeSJean-Philippe Brucker }
6188dd28afeSJean-Philippe Brucker 
619c9888d95SJean-Philippe Brucker static ssize_t vfio_pci_cap_size(struct pci_cap_hdr *cap_hdr)
620c9888d95SJean-Philippe Brucker {
621c9888d95SJean-Philippe Brucker 	switch (cap_hdr->type) {
622c9888d95SJean-Philippe Brucker 	case PCI_CAP_ID_MSIX:
623c9888d95SJean-Philippe Brucker 		return PCI_CAP_MSIX_SIZEOF;
6248dd28afeSJean-Philippe Brucker 	case PCI_CAP_ID_MSI:
6258dd28afeSJean-Philippe Brucker 		return vfio_pci_msi_cap_size((void *)cap_hdr);
626c9888d95SJean-Philippe Brucker 	default:
627c9888d95SJean-Philippe Brucker 		pr_err("unknown PCI capability 0x%x", cap_hdr->type);
628c9888d95SJean-Philippe Brucker 		return 0;
629c9888d95SJean-Philippe Brucker 	}
630c9888d95SJean-Philippe Brucker }
631c9888d95SJean-Philippe Brucker 
632c9888d95SJean-Philippe Brucker static int vfio_pci_add_cap(struct vfio_device *vdev, u8 *virt_hdr,
633c9888d95SJean-Philippe Brucker 			    struct pci_cap_hdr *cap, off_t pos)
634c9888d95SJean-Philippe Brucker {
635c9888d95SJean-Philippe Brucker 	struct pci_cap_hdr *last;
636c9888d95SJean-Philippe Brucker 	struct pci_device_header *hdr = &vdev->pci.hdr;
637c9888d95SJean-Philippe Brucker 
638c9888d95SJean-Philippe Brucker 	cap->next = 0;
639c9888d95SJean-Philippe Brucker 
640c9888d95SJean-Philippe Brucker 	if (!hdr->capabilities) {
641c9888d95SJean-Philippe Brucker 		hdr->capabilities = pos;
642c9888d95SJean-Philippe Brucker 		hdr->status |= PCI_STATUS_CAP_LIST;
643c9888d95SJean-Philippe Brucker 	} else {
644c9888d95SJean-Philippe Brucker 		last = PCI_CAP(virt_hdr, hdr->capabilities);
645c9888d95SJean-Philippe Brucker 
646c9888d95SJean-Philippe Brucker 		while (last->next)
647c9888d95SJean-Philippe Brucker 			last = PCI_CAP(virt_hdr, last->next);
648c9888d95SJean-Philippe Brucker 
649c9888d95SJean-Philippe Brucker 		last->next = pos;
650c9888d95SJean-Philippe Brucker 	}
651c9888d95SJean-Philippe Brucker 
652c9888d95SJean-Philippe Brucker 	memcpy(virt_hdr + pos, cap, vfio_pci_cap_size(cap));
653c9888d95SJean-Philippe Brucker 
654c9888d95SJean-Philippe Brucker 	return 0;
655c9888d95SJean-Philippe Brucker }
656c9888d95SJean-Philippe Brucker 
6576078a454SJean-Philippe Brucker static int vfio_pci_parse_caps(struct vfio_device *vdev)
6586078a454SJean-Philippe Brucker {
659c9888d95SJean-Philippe Brucker 	int ret;
660c9888d95SJean-Philippe Brucker 	size_t size;
661*e69b7663SAlexandru Elisei 	u16 pos, next;
662c9888d95SJean-Philippe Brucker 	struct pci_cap_hdr *cap;
663*e69b7663SAlexandru Elisei 	u8 virt_hdr[PCI_DEV_CFG_SIZE_LEGACY];
6646078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
6656078a454SJean-Philippe Brucker 
6666078a454SJean-Philippe Brucker 	if (!(pdev->hdr.status & PCI_STATUS_CAP_LIST))
6676078a454SJean-Philippe Brucker 		return 0;
6686078a454SJean-Philippe Brucker 
669*e69b7663SAlexandru Elisei 	memset(virt_hdr, 0, PCI_DEV_CFG_SIZE_LEGACY);
670c9888d95SJean-Philippe Brucker 
671c9888d95SJean-Philippe Brucker 	pos = pdev->hdr.capabilities & ~3;
672c9888d95SJean-Philippe Brucker 
6736078a454SJean-Philippe Brucker 	pdev->hdr.status &= ~PCI_STATUS_CAP_LIST;
6746078a454SJean-Philippe Brucker 	pdev->hdr.capabilities = 0;
6756078a454SJean-Philippe Brucker 
676c9888d95SJean-Philippe Brucker 	for (; pos; pos = next) {
677c9888d95SJean-Philippe Brucker 		cap = PCI_CAP(&pdev->hdr, pos);
678c9888d95SJean-Philippe Brucker 		next = cap->next;
679c9888d95SJean-Philippe Brucker 
680c9888d95SJean-Philippe Brucker 		switch (cap->type) {
681c9888d95SJean-Philippe Brucker 		case PCI_CAP_ID_MSIX:
682c9888d95SJean-Philippe Brucker 			ret = vfio_pci_add_cap(vdev, virt_hdr, cap, pos);
683c9888d95SJean-Philippe Brucker 			if (ret)
684c9888d95SJean-Philippe Brucker 				return ret;
685c9888d95SJean-Philippe Brucker 
686c9888d95SJean-Philippe Brucker 			pdev->msix.pos = pos;
687c9888d95SJean-Philippe Brucker 			pdev->irq_modes |= VFIO_PCI_IRQ_MODE_MSIX;
688c9888d95SJean-Philippe Brucker 			break;
6898dd28afeSJean-Philippe Brucker 		case PCI_CAP_ID_MSI:
6908dd28afeSJean-Philippe Brucker 			ret = vfio_pci_add_cap(vdev, virt_hdr, cap, pos);
6918dd28afeSJean-Philippe Brucker 			if (ret)
6928dd28afeSJean-Philippe Brucker 				return ret;
6938dd28afeSJean-Philippe Brucker 
6948dd28afeSJean-Philippe Brucker 			pdev->msi.pos = pos;
6958dd28afeSJean-Philippe Brucker 			pdev->irq_modes |= VFIO_PCI_IRQ_MODE_MSI;
6968dd28afeSJean-Philippe Brucker 			break;
697c9888d95SJean-Philippe Brucker 		}
698c9888d95SJean-Philippe Brucker 	}
699c9888d95SJean-Philippe Brucker 
700c9888d95SJean-Philippe Brucker 	/* Wipe remaining capabilities */
701c9888d95SJean-Philippe Brucker 	pos = PCI_STD_HEADER_SIZEOF;
702*e69b7663SAlexandru Elisei 	size = PCI_DEV_CFG_SIZE_LEGACY - PCI_STD_HEADER_SIZEOF;
703c9888d95SJean-Philippe Brucker 	memcpy((void *)&pdev->hdr + pos, virt_hdr + pos, size);
7046078a454SJean-Philippe Brucker 
7056078a454SJean-Philippe Brucker 	return 0;
7066078a454SJean-Philippe Brucker }
7076078a454SJean-Philippe Brucker 
7086078a454SJean-Philippe Brucker static int vfio_pci_parse_cfg_space(struct vfio_device *vdev)
7096078a454SJean-Philippe Brucker {
710*e69b7663SAlexandru Elisei 	ssize_t sz = PCI_DEV_CFG_SIZE_LEGACY;
7116078a454SJean-Philippe Brucker 	struct vfio_region_info *info;
7126078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
7136078a454SJean-Philippe Brucker 
7146078a454SJean-Philippe Brucker 	if (vdev->info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX) {
7156078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "Config Space not found");
7166078a454SJean-Philippe Brucker 		return -ENODEV;
7176078a454SJean-Philippe Brucker 	}
7186078a454SJean-Philippe Brucker 
7196078a454SJean-Philippe Brucker 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
7206078a454SJean-Philippe Brucker 	*info = (struct vfio_region_info) {
7216078a454SJean-Philippe Brucker 			.argsz = sizeof(*info),
7226078a454SJean-Philippe Brucker 			.index = VFIO_PCI_CONFIG_REGION_INDEX,
7236078a454SJean-Philippe Brucker 	};
7246078a454SJean-Philippe Brucker 
7256078a454SJean-Philippe Brucker 	ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
7266078a454SJean-Philippe Brucker 	if (!info->size) {
7276078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "Config Space has size zero?!");
7286078a454SJean-Philippe Brucker 		return -EINVAL;
7296078a454SJean-Philippe Brucker 	}
7306078a454SJean-Philippe Brucker 
731c9888d95SJean-Philippe Brucker 	/* Read standard headers and capabilities */
7326078a454SJean-Philippe Brucker 	if (pread(vdev->fd, &pdev->hdr, sz, info->offset) != sz) {
7336078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to read %zd bytes of Config Space", sz);
7346078a454SJean-Philippe Brucker 		return -EIO;
7356078a454SJean-Philippe Brucker 	}
7366078a454SJean-Philippe Brucker 
7376078a454SJean-Philippe Brucker 	/* Strip bit 7, that indicates multifunction */
7386078a454SJean-Philippe Brucker 	pdev->hdr.header_type &= 0x7f;
7396078a454SJean-Philippe Brucker 
7406078a454SJean-Philippe Brucker 	if (pdev->hdr.header_type != PCI_HEADER_TYPE_NORMAL) {
7416078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "unsupported header type %u",
7426078a454SJean-Philippe Brucker 			     pdev->hdr.header_type);
7436078a454SJean-Philippe Brucker 		return -EOPNOTSUPP;
7446078a454SJean-Philippe Brucker 	}
7456078a454SJean-Philippe Brucker 
746c9888d95SJean-Philippe Brucker 	if (pdev->hdr.irq_pin)
747c9888d95SJean-Philippe Brucker 		pdev->irq_modes |= VFIO_PCI_IRQ_MODE_INTX;
748c9888d95SJean-Philippe Brucker 
7496078a454SJean-Philippe Brucker 	vfio_pci_parse_caps(vdev);
7506078a454SJean-Philippe Brucker 
7516078a454SJean-Philippe Brucker 	return 0;
7526078a454SJean-Philippe Brucker }
7536078a454SJean-Philippe Brucker 
7546078a454SJean-Philippe Brucker static int vfio_pci_fixup_cfg_space(struct vfio_device *vdev)
7556078a454SJean-Philippe Brucker {
7566078a454SJean-Philippe Brucker 	int i;
7573665392aSAlexandru Elisei 	u64 base;
7586078a454SJean-Philippe Brucker 	ssize_t hdr_sz;
759c9888d95SJean-Philippe Brucker 	struct msix_cap *msix;
7606078a454SJean-Philippe Brucker 	struct vfio_region_info *info;
7616078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
7623665392aSAlexandru Elisei 	struct vfio_region *region;
7636078a454SJean-Philippe Brucker 
7646078a454SJean-Philippe Brucker 	/* Initialise the BARs */
7656078a454SJean-Philippe Brucker 	for (i = VFIO_PCI_BAR0_REGION_INDEX; i <= VFIO_PCI_BAR5_REGION_INDEX; ++i) {
7663665392aSAlexandru Elisei 		if ((u32)i == vdev->info.num_regions)
7673665392aSAlexandru Elisei 			break;
76882caa882SJean-Philippe Brucker 
7693665392aSAlexandru Elisei 		region = &vdev->regions[i];
77082caa882SJean-Philippe Brucker 		/* Construct a fake reg to match what we've mapped. */
77182caa882SJean-Philippe Brucker 		if (region->is_ioport) {
77282caa882SJean-Philippe Brucker 			base = (region->port_base & PCI_BASE_ADDRESS_IO_MASK) |
77382caa882SJean-Philippe Brucker 				PCI_BASE_ADDRESS_SPACE_IO;
77482caa882SJean-Philippe Brucker 		} else {
77582caa882SJean-Philippe Brucker 			base = (region->guest_phys_addr &
77682caa882SJean-Philippe Brucker 				PCI_BASE_ADDRESS_MEM_MASK) |
77782caa882SJean-Philippe Brucker 				PCI_BASE_ADDRESS_SPACE_MEMORY;
77882caa882SJean-Philippe Brucker 		}
77982caa882SJean-Philippe Brucker 
78082caa882SJean-Philippe Brucker 		pdev->hdr.bar[i] = base;
7816078a454SJean-Philippe Brucker 
7826078a454SJean-Philippe Brucker 		if (!base)
7836078a454SJean-Philippe Brucker 			continue;
7846078a454SJean-Philippe Brucker 
7856078a454SJean-Philippe Brucker 		pdev->hdr.bar_size[i] = region->info.size;
7866078a454SJean-Philippe Brucker 	}
7876078a454SJean-Philippe Brucker 
7886078a454SJean-Philippe Brucker 	/* I really can't be bothered to support cardbus. */
7896078a454SJean-Philippe Brucker 	pdev->hdr.card_bus = 0;
7906078a454SJean-Philippe Brucker 
7916078a454SJean-Philippe Brucker 	/*
7926078a454SJean-Philippe Brucker 	 * Nuke the expansion ROM for now. If we want to do this properly,
7936078a454SJean-Philippe Brucker 	 * we need to save its size somewhere and map into the guest.
7946078a454SJean-Philippe Brucker 	 */
7956078a454SJean-Philippe Brucker 	pdev->hdr.exp_rom_bar = 0;
7966078a454SJean-Philippe Brucker 
797c9888d95SJean-Philippe Brucker 	/* Plumb in our fake MSI-X capability, if we have it. */
798c9888d95SJean-Philippe Brucker 	msix = pci_find_cap(&pdev->hdr, PCI_CAP_ID_MSIX);
799c9888d95SJean-Philippe Brucker 	if (msix) {
800c9888d95SJean-Philippe Brucker 		/* Add a shortcut to the PBA region for the MMIO handler */
801c9888d95SJean-Philippe Brucker 		int pba_index = VFIO_PCI_BAR0_REGION_INDEX + pdev->msix_pba.bar;
802c9888d95SJean-Philippe Brucker 		pdev->msix_pba.offset = vdev->regions[pba_index].info.offset +
803c9888d95SJean-Philippe Brucker 					(msix->pba_offset & PCI_MSIX_PBA_OFFSET);
804c9888d95SJean-Philippe Brucker 
805c9888d95SJean-Philippe Brucker 		/* Tidy up the capability */
806c9888d95SJean-Philippe Brucker 		msix->table_offset &= PCI_MSIX_TABLE_BIR;
807c9888d95SJean-Philippe Brucker 		msix->pba_offset &= PCI_MSIX_PBA_BIR;
808c9888d95SJean-Philippe Brucker 		if (pdev->msix_table.bar == pdev->msix_pba.bar)
809c9888d95SJean-Philippe Brucker 			msix->pba_offset |= pdev->msix_table.size &
810c9888d95SJean-Philippe Brucker 					    PCI_MSIX_PBA_OFFSET;
811c9888d95SJean-Philippe Brucker 	}
812c9888d95SJean-Philippe Brucker 
8136078a454SJean-Philippe Brucker 	/* Install our fake Configuration Space */
8146078a454SJean-Philippe Brucker 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
815*e69b7663SAlexandru Elisei 	/*
816*e69b7663SAlexandru Elisei 	 * We don't touch the extended configuration space, let's be cautious
817*e69b7663SAlexandru Elisei 	 * and not overwrite it all with zeros, or bad things might happen.
818*e69b7663SAlexandru Elisei 	 */
819*e69b7663SAlexandru Elisei 	hdr_sz = PCI_DEV_CFG_SIZE_LEGACY;
8206078a454SJean-Philippe Brucker 	if (pwrite(vdev->fd, &pdev->hdr, hdr_sz, info->offset) != hdr_sz) {
8216078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to write %zd bytes to Config Space",
8226078a454SJean-Philippe Brucker 			     hdr_sz);
8236078a454SJean-Philippe Brucker 		return -EIO;
8246078a454SJean-Philippe Brucker 	}
8256078a454SJean-Philippe Brucker 
8266078a454SJean-Philippe Brucker 	/* Register callbacks for cfg accesses */
8276078a454SJean-Philippe Brucker 	pdev->hdr.cfg_ops = (struct pci_config_operations) {
8286078a454SJean-Philippe Brucker 		.read	= vfio_pci_cfg_read,
8296078a454SJean-Philippe Brucker 		.write	= vfio_pci_cfg_write,
8306078a454SJean-Philippe Brucker 	};
8316078a454SJean-Philippe Brucker 
8326078a454SJean-Philippe Brucker 	pdev->hdr.irq_type = IRQ_TYPE_LEVEL_HIGH;
8336078a454SJean-Philippe Brucker 
8346078a454SJean-Philippe Brucker 	return 0;
8356078a454SJean-Philippe Brucker }
8366078a454SJean-Philippe Brucker 
837ed01a603SAlexandru Elisei static int vfio_pci_get_region_info(struct vfio_device *vdev, u32 index,
838ed01a603SAlexandru Elisei 				    struct vfio_region_info *info)
839ed01a603SAlexandru Elisei {
840ed01a603SAlexandru Elisei 	int ret;
841ed01a603SAlexandru Elisei 
842ed01a603SAlexandru Elisei 	*info = (struct vfio_region_info) {
843ed01a603SAlexandru Elisei 		.argsz = sizeof(*info),
844ed01a603SAlexandru Elisei 		.index = index,
845ed01a603SAlexandru Elisei 	};
846ed01a603SAlexandru Elisei 
847ed01a603SAlexandru Elisei 	ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
848ed01a603SAlexandru Elisei 	if (ret) {
849ed01a603SAlexandru Elisei 		ret = -errno;
850ed01a603SAlexandru Elisei 		vfio_dev_err(vdev, "cannot get info for BAR %u", index);
851ed01a603SAlexandru Elisei 		return ret;
852ed01a603SAlexandru Elisei 	}
853ed01a603SAlexandru Elisei 
854ed01a603SAlexandru Elisei 	if (info->size && !is_power_of_two(info->size)) {
855ed01a603SAlexandru Elisei 		vfio_dev_err(vdev, "region is not power of two: 0x%llx",
856ed01a603SAlexandru Elisei 				info->size);
857ed01a603SAlexandru Elisei 		return -EINVAL;
858ed01a603SAlexandru Elisei 	}
859ed01a603SAlexandru Elisei 
860ed01a603SAlexandru Elisei 	return 0;
861ed01a603SAlexandru Elisei }
862ed01a603SAlexandru Elisei 
863ed01a603SAlexandru Elisei static int vfio_pci_create_msix_table(struct kvm *kvm, struct vfio_device *vdev)
864c9888d95SJean-Philippe Brucker {
865c9888d95SJean-Philippe Brucker 	int ret;
866c9888d95SJean-Philippe Brucker 	size_t i;
867ed01a603SAlexandru Elisei 	size_t map_size;
868c9888d95SJean-Philippe Brucker 	size_t nr_entries;
869c9888d95SJean-Philippe Brucker 	struct vfio_pci_msi_entry *entries;
870ed01a603SAlexandru Elisei 	struct vfio_pci_device *pdev = &vdev->pci;
871c9888d95SJean-Philippe Brucker 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
872c9888d95SJean-Philippe Brucker 	struct vfio_pci_msix_table *table = &pdev->msix_table;
873c9888d95SJean-Philippe Brucker 	struct msix_cap *msix = PCI_CAP(&pdev->hdr, pdev->msix.pos);
874ed01a603SAlexandru Elisei 	struct vfio_region_info info;
875c9888d95SJean-Philippe Brucker 
876c9888d95SJean-Philippe Brucker 	table->bar = msix->table_offset & PCI_MSIX_TABLE_BIR;
877c9888d95SJean-Philippe Brucker 	pba->bar = msix->pba_offset & PCI_MSIX_TABLE_BIR;
878c9888d95SJean-Philippe Brucker 
879c9888d95SJean-Philippe Brucker 	/*
880c9888d95SJean-Philippe Brucker 	 * KVM needs memory regions to be multiple of and aligned on PAGE_SIZE.
881c9888d95SJean-Philippe Brucker 	 */
882c9888d95SJean-Philippe Brucker 	nr_entries = (msix->ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
883c9888d95SJean-Philippe Brucker 	table->size = ALIGN(nr_entries * PCI_MSIX_ENTRY_SIZE, PAGE_SIZE);
884c9888d95SJean-Philippe Brucker 	pba->size = ALIGN(DIV_ROUND_UP(nr_entries, 64), PAGE_SIZE);
885c9888d95SJean-Philippe Brucker 
886c9888d95SJean-Philippe Brucker 	entries = calloc(nr_entries, sizeof(struct vfio_pci_msi_entry));
887c9888d95SJean-Philippe Brucker 	if (!entries)
888c9888d95SJean-Philippe Brucker 		return -ENOMEM;
889c9888d95SJean-Philippe Brucker 
890c9888d95SJean-Philippe Brucker 	for (i = 0; i < nr_entries; i++)
891c9888d95SJean-Philippe Brucker 		entries[i].config.ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
892c9888d95SJean-Philippe Brucker 
893ed01a603SAlexandru Elisei 	ret = vfio_pci_get_region_info(vdev, table->bar, &info);
894ed01a603SAlexandru Elisei 	if (ret)
895ed01a603SAlexandru Elisei 		return ret;
896ed01a603SAlexandru Elisei 	if (!info.size)
897ed01a603SAlexandru Elisei 		return -EINVAL;
898ed01a603SAlexandru Elisei 	map_size = info.size;
899ed01a603SAlexandru Elisei 
900ed01a603SAlexandru Elisei 	if (table->bar != pba->bar) {
901ed01a603SAlexandru Elisei 		ret = vfio_pci_get_region_info(vdev, pba->bar, &info);
902ed01a603SAlexandru Elisei 		if (ret)
903ed01a603SAlexandru Elisei 			return ret;
904ed01a603SAlexandru Elisei 		if (!info.size)
905ed01a603SAlexandru Elisei 			return -EINVAL;
906ed01a603SAlexandru Elisei 		map_size += info.size;
907ed01a603SAlexandru Elisei 	}
908ed01a603SAlexandru Elisei 
909c9888d95SJean-Philippe Brucker 	/*
910c9888d95SJean-Philippe Brucker 	 * To ease MSI-X cap configuration in case they share the same BAR,
911c9888d95SJean-Philippe Brucker 	 * collapse table and pending array. The size of the BAR regions must be
912c9888d95SJean-Philippe Brucker 	 * powers of two.
913c9888d95SJean-Philippe Brucker 	 */
914ed01a603SAlexandru Elisei 	map_size = ALIGN(map_size, PAGE_SIZE);
915ed01a603SAlexandru Elisei 	table->guest_phys_addr = pci_get_mmio_block(map_size);
916c9888d95SJean-Philippe Brucker 	if (!table->guest_phys_addr) {
917ed01a603SAlexandru Elisei 		pr_err("cannot allocate MMIO space");
918c9888d95SJean-Philippe Brucker 		ret = -ENOMEM;
919c9888d95SJean-Philippe Brucker 		goto out_free;
920c9888d95SJean-Philippe Brucker 	}
921c9888d95SJean-Philippe Brucker 
922c9888d95SJean-Philippe Brucker 	/*
923c9888d95SJean-Philippe Brucker 	 * We could map the physical PBA directly into the guest, but it's
924c9888d95SJean-Philippe Brucker 	 * likely smaller than a page, and we can only hand full pages to the
925c9888d95SJean-Philippe Brucker 	 * guest. Even though the PCI spec disallows sharing a page used for
926c9888d95SJean-Philippe Brucker 	 * MSI-X with any other resource, it allows to share the same page
927c9888d95SJean-Philippe Brucker 	 * between MSI-X table and PBA. For the sake of isolation, create a
928c9888d95SJean-Philippe Brucker 	 * virtual PBA.
929c9888d95SJean-Philippe Brucker 	 */
9305a8e4f25SAlexandru Elisei 	pba->guest_phys_addr = table->guest_phys_addr + table->size;
931c9888d95SJean-Philippe Brucker 
932c9888d95SJean-Philippe Brucker 	pdev->msix.entries = entries;
933c9888d95SJean-Philippe Brucker 	pdev->msix.nr_entries = nr_entries;
934c9888d95SJean-Philippe Brucker 
935c9888d95SJean-Philippe Brucker 	return 0;
936c9888d95SJean-Philippe Brucker 
937c9888d95SJean-Philippe Brucker out_free:
938c9888d95SJean-Philippe Brucker 	free(entries);
939c9888d95SJean-Philippe Brucker 
940c9888d95SJean-Philippe Brucker 	return ret;
941c9888d95SJean-Philippe Brucker }
942c9888d95SJean-Philippe Brucker 
9438dd28afeSJean-Philippe Brucker static int vfio_pci_create_msi_cap(struct kvm *kvm, struct vfio_pci_device *pdev)
9448dd28afeSJean-Philippe Brucker {
9458dd28afeSJean-Philippe Brucker 	struct msi_cap_64 *cap = PCI_CAP(&pdev->hdr, pdev->msi.pos);
9468dd28afeSJean-Philippe Brucker 
9478dd28afeSJean-Philippe Brucker 	pdev->msi.nr_entries = 1 << ((cap->ctrl & PCI_MSI_FLAGS_QMASK) >> 1),
9488dd28afeSJean-Philippe Brucker 	pdev->msi.entries = calloc(pdev->msi.nr_entries,
9498dd28afeSJean-Philippe Brucker 				   sizeof(struct vfio_pci_msi_entry));
9508dd28afeSJean-Philippe Brucker 	if (!pdev->msi.entries)
9518dd28afeSJean-Philippe Brucker 		return -ENOMEM;
9528dd28afeSJean-Philippe Brucker 
9538dd28afeSJean-Philippe Brucker 	return 0;
9548dd28afeSJean-Philippe Brucker }
9558dd28afeSJean-Philippe Brucker 
9566078a454SJean-Philippe Brucker static int vfio_pci_configure_bar(struct kvm *kvm, struct vfio_device *vdev,
9576078a454SJean-Philippe Brucker 				  size_t nr)
9586078a454SJean-Philippe Brucker {
9596078a454SJean-Philippe Brucker 	int ret;
96082caa882SJean-Philippe Brucker 	u32 bar;
9616078a454SJean-Philippe Brucker 	size_t map_size;
962c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
9633665392aSAlexandru Elisei 	struct vfio_region *region;
9646078a454SJean-Philippe Brucker 
9656078a454SJean-Philippe Brucker 	if (nr >= vdev->info.num_regions)
9666078a454SJean-Philippe Brucker 		return 0;
9676078a454SJean-Philippe Brucker 
9683665392aSAlexandru Elisei 	region = &vdev->regions[nr];
96982caa882SJean-Philippe Brucker 	bar = pdev->hdr.bar[nr];
97082caa882SJean-Philippe Brucker 
97182caa882SJean-Philippe Brucker 	region->vdev = vdev;
97282caa882SJean-Philippe Brucker 	region->is_ioport = !!(bar & PCI_BASE_ADDRESS_SPACE_IO);
9736078a454SJean-Philippe Brucker 
974ed01a603SAlexandru Elisei 	ret = vfio_pci_get_region_info(vdev, nr, &region->info);
975ed01a603SAlexandru Elisei 	if (ret)
9766078a454SJean-Philippe Brucker 		return ret;
9776078a454SJean-Philippe Brucker 
9786078a454SJean-Philippe Brucker 	/* Ignore invalid or unimplemented regions */
9796078a454SJean-Philippe Brucker 	if (!region->info.size)
9806078a454SJean-Philippe Brucker 		return 0;
9816078a454SJean-Philippe Brucker 
982c9888d95SJean-Philippe Brucker 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX) {
983c9888d95SJean-Philippe Brucker 		/* Trap and emulate MSI-X table */
984c9888d95SJean-Philippe Brucker 		if (nr == pdev->msix_table.bar) {
985c9888d95SJean-Philippe Brucker 			region->guest_phys_addr = pdev->msix_table.guest_phys_addr;
986c9888d95SJean-Philippe Brucker 			return 0;
987c9888d95SJean-Philippe Brucker 		} else if (nr == pdev->msix_pba.bar) {
988c9888d95SJean-Philippe Brucker 			region->guest_phys_addr = pdev->msix_pba.guest_phys_addr;
989c9888d95SJean-Philippe Brucker 			return 0;
990c9888d95SJean-Philippe Brucker 		}
991c9888d95SJean-Philippe Brucker 	}
992c9888d95SJean-Philippe Brucker 
993a05e576fSAlexandru Elisei 	if (region->is_ioport) {
994a05e576fSAlexandru Elisei 		region->port_base = pci_get_io_port_block(region->info.size);
995a05e576fSAlexandru Elisei 	} else {
9966078a454SJean-Philippe Brucker 		/* Grab some MMIO space in the guest */
9976078a454SJean-Philippe Brucker 		map_size = ALIGN(region->info.size, PAGE_SIZE);
998854aa2efSJulien Thierry 		region->guest_phys_addr = pci_get_mmio_block(map_size);
99982caa882SJean-Philippe Brucker 	}
10006078a454SJean-Philippe Brucker 
10016078a454SJean-Philippe Brucker 	return 0;
10026078a454SJean-Philippe Brucker }
10036078a454SJean-Philippe Brucker 
10046078a454SJean-Philippe Brucker static int vfio_pci_configure_dev_regions(struct kvm *kvm,
10056078a454SJean-Philippe Brucker 					  struct vfio_device *vdev)
10066078a454SJean-Philippe Brucker {
10076078a454SJean-Philippe Brucker 	int ret;
10086078a454SJean-Philippe Brucker 	u32 bar;
10096078a454SJean-Philippe Brucker 	size_t i;
10106078a454SJean-Philippe Brucker 	bool is_64bit = false;
10116078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
10126078a454SJean-Philippe Brucker 
10136078a454SJean-Philippe Brucker 	ret = vfio_pci_parse_cfg_space(vdev);
10146078a454SJean-Philippe Brucker 	if (ret)
10156078a454SJean-Philippe Brucker 		return ret;
10166078a454SJean-Philippe Brucker 
1017c9888d95SJean-Philippe Brucker 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX) {
1018ed01a603SAlexandru Elisei 		ret = vfio_pci_create_msix_table(kvm, vdev);
1019c9888d95SJean-Philippe Brucker 		if (ret)
1020c9888d95SJean-Philippe Brucker 			return ret;
1021c9888d95SJean-Philippe Brucker 	}
1022c9888d95SJean-Philippe Brucker 
10238dd28afeSJean-Philippe Brucker 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSI) {
10248dd28afeSJean-Philippe Brucker 		ret = vfio_pci_create_msi_cap(kvm, pdev);
10258dd28afeSJean-Philippe Brucker 		if (ret)
10268dd28afeSJean-Philippe Brucker 			return ret;
10278dd28afeSJean-Philippe Brucker 	}
10288dd28afeSJean-Philippe Brucker 
10296078a454SJean-Philippe Brucker 	for (i = VFIO_PCI_BAR0_REGION_INDEX; i <= VFIO_PCI_BAR5_REGION_INDEX; ++i) {
10306078a454SJean-Philippe Brucker 		/* Ignore top half of 64-bit BAR */
103184998f21SAlexandru Elisei 		if (is_64bit) {
103284998f21SAlexandru Elisei 			is_64bit = false;
10336078a454SJean-Philippe Brucker 			continue;
103484998f21SAlexandru Elisei 		}
10356078a454SJean-Philippe Brucker 
10366078a454SJean-Philippe Brucker 		ret = vfio_pci_configure_bar(kvm, vdev, i);
10376078a454SJean-Philippe Brucker 		if (ret)
10386078a454SJean-Philippe Brucker 			return ret;
10396078a454SJean-Philippe Brucker 
10406078a454SJean-Philippe Brucker 		bar = pdev->hdr.bar[i];
10416078a454SJean-Philippe Brucker 		is_64bit = (bar & PCI_BASE_ADDRESS_SPACE) ==
10426078a454SJean-Philippe Brucker 			   PCI_BASE_ADDRESS_SPACE_MEMORY &&
10436078a454SJean-Philippe Brucker 			   bar & PCI_BASE_ADDRESS_MEM_TYPE_64;
10446078a454SJean-Philippe Brucker 	}
10456078a454SJean-Philippe Brucker 
10466078a454SJean-Philippe Brucker 	/* We've configured the BARs, fake up a Configuration Space */
10475a8e4f25SAlexandru Elisei 	ret = vfio_pci_fixup_cfg_space(vdev);
10485a8e4f25SAlexandru Elisei 	if (ret)
10495a8e4f25SAlexandru Elisei 		return ret;
10505a8e4f25SAlexandru Elisei 
10515a8e4f25SAlexandru Elisei 	return pci__register_bar_regions(kvm, &pdev->hdr, vfio_pci_bar_activate,
10525a8e4f25SAlexandru Elisei 					 vfio_pci_bar_deactivate, vdev);
10536078a454SJean-Philippe Brucker }
10546078a454SJean-Philippe Brucker 
1055c9888d95SJean-Philippe Brucker /*
1056c9888d95SJean-Philippe Brucker  * Attempt to update the FD limit, if opening an eventfd for each IRQ vector
1057c9888d95SJean-Philippe Brucker  * would hit the limit. Which is likely to happen when a device uses 2048 MSIs.
1058c9888d95SJean-Philippe Brucker  */
1059c9888d95SJean-Philippe Brucker static int vfio_pci_reserve_irq_fds(size_t num)
1060c9888d95SJean-Philippe Brucker {
1061c9888d95SJean-Philippe Brucker 	/*
1062c9888d95SJean-Philippe Brucker 	 * I counted around 27 fds under normal load. Let's add 100 for good
1063c9888d95SJean-Philippe Brucker 	 * measure.
1064c9888d95SJean-Philippe Brucker 	 */
1065c9888d95SJean-Philippe Brucker 	static size_t needed = 128;
1066c9888d95SJean-Philippe Brucker 	struct rlimit fd_limit, new_limit;
1067c9888d95SJean-Philippe Brucker 
1068c9888d95SJean-Philippe Brucker 	needed += num;
1069c9888d95SJean-Philippe Brucker 
1070c9888d95SJean-Philippe Brucker 	if (getrlimit(RLIMIT_NOFILE, &fd_limit)) {
1071c9888d95SJean-Philippe Brucker 		perror("getrlimit(RLIMIT_NOFILE)");
1072c9888d95SJean-Philippe Brucker 		return 0;
1073c9888d95SJean-Philippe Brucker 	}
1074c9888d95SJean-Philippe Brucker 
1075c9888d95SJean-Philippe Brucker 	if (fd_limit.rlim_cur >= needed)
1076c9888d95SJean-Philippe Brucker 		return 0;
1077c9888d95SJean-Philippe Brucker 
1078c9888d95SJean-Philippe Brucker 	new_limit.rlim_cur = needed;
1079c9888d95SJean-Philippe Brucker 
1080c9888d95SJean-Philippe Brucker 	if (fd_limit.rlim_max < needed)
1081c9888d95SJean-Philippe Brucker 		/* Try to bump hard limit (root only) */
1082c9888d95SJean-Philippe Brucker 		new_limit.rlim_max = needed;
1083c9888d95SJean-Philippe Brucker 	else
1084c9888d95SJean-Philippe Brucker 		new_limit.rlim_max = fd_limit.rlim_max;
1085c9888d95SJean-Philippe Brucker 
1086c9888d95SJean-Philippe Brucker 	if (setrlimit(RLIMIT_NOFILE, &new_limit)) {
1087c9888d95SJean-Philippe Brucker 		perror("setrlimit(RLIMIT_NOFILE)");
1088c9888d95SJean-Philippe Brucker 		pr_warning("not enough FDs for full MSI-X support (estimated need: %zu)",
1089c9888d95SJean-Philippe Brucker 			   (size_t)(needed - fd_limit.rlim_cur));
1090c9888d95SJean-Philippe Brucker 	}
1091c9888d95SJean-Philippe Brucker 
1092c9888d95SJean-Philippe Brucker 	return 0;
1093c9888d95SJean-Philippe Brucker }
1094c9888d95SJean-Philippe Brucker 
1095c9888d95SJean-Philippe Brucker static int vfio_pci_init_msis(struct kvm *kvm, struct vfio_device *vdev,
1096c9888d95SJean-Philippe Brucker 			     struct vfio_pci_msi_common *msis)
1097c9888d95SJean-Philippe Brucker {
1098c9888d95SJean-Philippe Brucker 	int ret;
1099c9888d95SJean-Philippe Brucker 	size_t i;
1100c9888d95SJean-Philippe Brucker 	int *eventfds;
1101c9888d95SJean-Philippe Brucker 	size_t irq_set_size;
1102c9888d95SJean-Philippe Brucker 	struct vfio_pci_msi_entry *entry;
1103c9888d95SJean-Philippe Brucker 	size_t nr_entries = msis->nr_entries;
1104c9888d95SJean-Philippe Brucker 
1105c9888d95SJean-Philippe Brucker 	ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &msis->info);
110609533d3cSAndre Przywara 	if (ret || msis->info.count == 0) {
1107c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "no MSI reported by VFIO");
1108c9888d95SJean-Philippe Brucker 		return -ENODEV;
1109c9888d95SJean-Philippe Brucker 	}
1110c9888d95SJean-Philippe Brucker 
1111c9888d95SJean-Philippe Brucker 	if (!(msis->info.flags & VFIO_IRQ_INFO_EVENTFD)) {
1112c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "interrupt not EVENTFD capable");
1113c9888d95SJean-Philippe Brucker 		return -EINVAL;
1114c9888d95SJean-Philippe Brucker 	}
1115c9888d95SJean-Philippe Brucker 
1116c9888d95SJean-Philippe Brucker 	if (msis->info.count != nr_entries) {
1117c9888d95SJean-Philippe Brucker 		vfio_dev_err(vdev, "invalid number of MSIs reported by VFIO");
1118c9888d95SJean-Philippe Brucker 		return -EINVAL;
1119c9888d95SJean-Philippe Brucker 	}
1120c9888d95SJean-Philippe Brucker 
1121c9888d95SJean-Philippe Brucker 	mutex_init(&msis->mutex);
1122c9888d95SJean-Philippe Brucker 
1123c9888d95SJean-Philippe Brucker 	vfio_pci_reserve_irq_fds(nr_entries);
1124c9888d95SJean-Philippe Brucker 
1125c9888d95SJean-Philippe Brucker 	irq_set_size = sizeof(struct vfio_irq_set) + nr_entries * sizeof(int);
1126c9888d95SJean-Philippe Brucker 	msis->irq_set = malloc(irq_set_size);
1127c9888d95SJean-Philippe Brucker 	if (!msis->irq_set)
1128c9888d95SJean-Philippe Brucker 		return -ENOMEM;
1129c9888d95SJean-Philippe Brucker 
1130c9888d95SJean-Philippe Brucker 	*msis->irq_set = (struct vfio_irq_set) {
1131c9888d95SJean-Philippe Brucker 		.argsz	= irq_set_size,
1132c9888d95SJean-Philippe Brucker 		.flags 	= VFIO_IRQ_SET_DATA_EVENTFD |
1133c9888d95SJean-Philippe Brucker 			  VFIO_IRQ_SET_ACTION_TRIGGER,
1134c9888d95SJean-Philippe Brucker 		.index 	= msis->info.index,
1135c9888d95SJean-Philippe Brucker 		.start 	= 0,
1136c9888d95SJean-Philippe Brucker 		.count 	= nr_entries,
1137c9888d95SJean-Philippe Brucker 	};
1138c9888d95SJean-Philippe Brucker 
1139c9888d95SJean-Philippe Brucker 	eventfds = (void *)msis->irq_set + sizeof(struct vfio_irq_set);
1140c9888d95SJean-Philippe Brucker 
1141c9888d95SJean-Philippe Brucker 	for (i = 0; i < nr_entries; i++) {
1142c9888d95SJean-Philippe Brucker 		entry = &msis->entries[i];
1143c9888d95SJean-Philippe Brucker 		entry->gsi = -1;
1144c9888d95SJean-Philippe Brucker 		entry->eventfd = -1;
1145c9888d95SJean-Philippe Brucker 		msi_set_masked(entry->virt_state, true);
1146c9888d95SJean-Philippe Brucker 		msi_set_masked(entry->phys_state, true);
1147c9888d95SJean-Philippe Brucker 		eventfds[i] = -1;
1148c9888d95SJean-Philippe Brucker 	}
1149c9888d95SJean-Philippe Brucker 
1150c9888d95SJean-Philippe Brucker 	return 0;
1151c9888d95SJean-Philippe Brucker }
1152c9888d95SJean-Philippe Brucker 
1153c9888d95SJean-Philippe Brucker static void vfio_pci_disable_intx(struct kvm *kvm, struct vfio_device *vdev)
1154c9888d95SJean-Philippe Brucker {
1155c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
1156c9888d95SJean-Philippe Brucker 	int gsi = pdev->intx_gsi;
1157c9888d95SJean-Philippe Brucker 	struct vfio_irq_set irq_set = {
1158c9888d95SJean-Philippe Brucker 		.argsz	= sizeof(irq_set),
1159c9888d95SJean-Philippe Brucker 		.flags	= VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
1160c9888d95SJean-Philippe Brucker 		.index	= VFIO_PCI_INTX_IRQ_INDEX,
1161c9888d95SJean-Philippe Brucker 	};
1162c9888d95SJean-Philippe Brucker 
11637302327aSLeo Yan 	if (pdev->intx_fd == -1)
11647302327aSLeo Yan 		return;
11657302327aSLeo Yan 
1166c9888d95SJean-Philippe Brucker 	pr_debug("user requested MSI, disabling INTx %d", gsi);
1167c9888d95SJean-Philippe Brucker 
1168c9888d95SJean-Philippe Brucker 	ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
1169c9888d95SJean-Philippe Brucker 	irq__del_irqfd(kvm, gsi, pdev->intx_fd);
1170c9888d95SJean-Philippe Brucker 
1171c9888d95SJean-Philippe Brucker 	close(pdev->intx_fd);
1172a1ff6f87SLeo Yan 	close(pdev->unmask_fd);
11737302327aSLeo Yan 	pdev->intx_fd = -1;
1174c9888d95SJean-Philippe Brucker }
1175c9888d95SJean-Philippe Brucker 
11766078a454SJean-Philippe Brucker static int vfio_pci_enable_intx(struct kvm *kvm, struct vfio_device *vdev)
11776078a454SJean-Philippe Brucker {
11786078a454SJean-Philippe Brucker 	int ret;
11796078a454SJean-Philippe Brucker 	int trigger_fd, unmask_fd;
1180a3704b91SAndre Przywara 	union vfio_irq_eventfd	trigger;
1181a3704b91SAndre Przywara 	union vfio_irq_eventfd	unmask;
11826078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
118312bd7a16SLeo Yan 	int gsi = pdev->intx_gsi;
11846078a454SJean-Philippe Brucker 
11857302327aSLeo Yan 	if (pdev->intx_fd != -1)
11867302327aSLeo Yan 		return 0;
11877302327aSLeo Yan 
11886078a454SJean-Philippe Brucker 	/*
11896078a454SJean-Philippe Brucker 	 * PCI IRQ is level-triggered, so we use two eventfds. trigger_fd
11906078a454SJean-Philippe Brucker 	 * signals an interrupt from host to guest, and unmask_fd signals the
11916078a454SJean-Philippe Brucker 	 * deassertion of the line from guest to host.
11926078a454SJean-Philippe Brucker 	 */
11936078a454SJean-Philippe Brucker 	trigger_fd = eventfd(0, 0);
11946078a454SJean-Philippe Brucker 	if (trigger_fd < 0) {
11956078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to create trigger eventfd");
11966078a454SJean-Philippe Brucker 		return trigger_fd;
11976078a454SJean-Philippe Brucker 	}
11986078a454SJean-Philippe Brucker 
11996078a454SJean-Philippe Brucker 	unmask_fd = eventfd(0, 0);
12006078a454SJean-Philippe Brucker 	if (unmask_fd < 0) {
12016078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to create unmask eventfd");
12026078a454SJean-Philippe Brucker 		close(trigger_fd);
12036078a454SJean-Philippe Brucker 		return unmask_fd;
12046078a454SJean-Philippe Brucker 	}
12056078a454SJean-Philippe Brucker 
12066078a454SJean-Philippe Brucker 	ret = irq__add_irqfd(kvm, gsi, trigger_fd, unmask_fd);
12076078a454SJean-Philippe Brucker 	if (ret)
12086078a454SJean-Philippe Brucker 		goto err_close;
12096078a454SJean-Philippe Brucker 
12106078a454SJean-Philippe Brucker 	trigger.irq = (struct vfio_irq_set) {
12116078a454SJean-Philippe Brucker 		.argsz	= sizeof(trigger),
12126078a454SJean-Philippe Brucker 		.flags	= VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER,
12136078a454SJean-Philippe Brucker 		.index	= VFIO_PCI_INTX_IRQ_INDEX,
12146078a454SJean-Philippe Brucker 		.start	= 0,
12156078a454SJean-Philippe Brucker 		.count	= 1,
12166078a454SJean-Philippe Brucker 	};
1217a3704b91SAndre Przywara 	set_vfio_irq_eventd_payload(&trigger, trigger_fd);
12186078a454SJean-Philippe Brucker 
12196078a454SJean-Philippe Brucker 	ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &trigger);
12206078a454SJean-Philippe Brucker 	if (ret < 0) {
12216078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to setup VFIO IRQ");
12226078a454SJean-Philippe Brucker 		goto err_delete_line;
12236078a454SJean-Philippe Brucker 	}
12246078a454SJean-Philippe Brucker 
12256078a454SJean-Philippe Brucker 	unmask.irq = (struct vfio_irq_set) {
12266078a454SJean-Philippe Brucker 		.argsz	= sizeof(unmask),
12276078a454SJean-Philippe Brucker 		.flags	= VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK,
12286078a454SJean-Philippe Brucker 		.index	= VFIO_PCI_INTX_IRQ_INDEX,
12296078a454SJean-Philippe Brucker 		.start	= 0,
12306078a454SJean-Philippe Brucker 		.count	= 1,
12316078a454SJean-Philippe Brucker 	};
1232a3704b91SAndre Przywara 	set_vfio_irq_eventd_payload(&unmask, unmask_fd);
12336078a454SJean-Philippe Brucker 
12346078a454SJean-Philippe Brucker 	ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &unmask);
12356078a454SJean-Philippe Brucker 	if (ret < 0) {
12366078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to setup unmask IRQ");
12376078a454SJean-Philippe Brucker 		goto err_remove_event;
12386078a454SJean-Philippe Brucker 	}
12396078a454SJean-Philippe Brucker 
1240c9888d95SJean-Philippe Brucker 	pdev->intx_fd = trigger_fd;
1241a1ff6f87SLeo Yan 	pdev->unmask_fd = unmask_fd;
1242c9888d95SJean-Philippe Brucker 
12436078a454SJean-Philippe Brucker 	return 0;
12446078a454SJean-Philippe Brucker 
12456078a454SJean-Philippe Brucker err_remove_event:
12466078a454SJean-Philippe Brucker 	/* Remove trigger event */
12476078a454SJean-Philippe Brucker 	trigger.irq.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
12486078a454SJean-Philippe Brucker 	trigger.irq.count = 0;
12496078a454SJean-Philippe Brucker 	ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &trigger);
12506078a454SJean-Philippe Brucker 
12516078a454SJean-Philippe Brucker err_delete_line:
12526078a454SJean-Philippe Brucker 	irq__del_irqfd(kvm, gsi, trigger_fd);
12536078a454SJean-Philippe Brucker 
12546078a454SJean-Philippe Brucker err_close:
12556078a454SJean-Philippe Brucker 	close(trigger_fd);
12566078a454SJean-Philippe Brucker 	close(unmask_fd);
12576078a454SJean-Philippe Brucker 	return ret;
12586078a454SJean-Philippe Brucker }
12596078a454SJean-Philippe Brucker 
126012bd7a16SLeo Yan static int vfio_pci_init_intx(struct kvm *kvm, struct vfio_device *vdev)
126112bd7a16SLeo Yan {
126212bd7a16SLeo Yan 	int ret;
126312bd7a16SLeo Yan 	struct vfio_pci_device *pdev = &vdev->pci;
126412bd7a16SLeo Yan 	struct vfio_irq_info irq_info = {
126512bd7a16SLeo Yan 		.argsz = sizeof(irq_info),
126612bd7a16SLeo Yan 		.index = VFIO_PCI_INTX_IRQ_INDEX,
126712bd7a16SLeo Yan 	};
126812bd7a16SLeo Yan 
126912bd7a16SLeo Yan 	vfio_pci_reserve_irq_fds(2);
127012bd7a16SLeo Yan 
127112bd7a16SLeo Yan 	ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
127212bd7a16SLeo Yan 	if (ret || irq_info.count == 0) {
127312bd7a16SLeo Yan 		vfio_dev_err(vdev, "no INTx reported by VFIO");
127412bd7a16SLeo Yan 		return -ENODEV;
127512bd7a16SLeo Yan 	}
127612bd7a16SLeo Yan 
127712bd7a16SLeo Yan 	if (!(irq_info.flags & VFIO_IRQ_INFO_EVENTFD)) {
127812bd7a16SLeo Yan 		vfio_dev_err(vdev, "interrupt not eventfd capable");
127912bd7a16SLeo Yan 		return -EINVAL;
128012bd7a16SLeo Yan 	}
128112bd7a16SLeo Yan 
128212bd7a16SLeo Yan 	if (!(irq_info.flags & VFIO_IRQ_INFO_AUTOMASKED)) {
128312bd7a16SLeo Yan 		vfio_dev_err(vdev, "INTx interrupt not AUTOMASKED");
128412bd7a16SLeo Yan 		return -EINVAL;
128512bd7a16SLeo Yan 	}
128612bd7a16SLeo Yan 
128712bd7a16SLeo Yan 	/* Guest is going to ovewrite our irq_line... */
128812bd7a16SLeo Yan 	pdev->intx_gsi = pdev->hdr.irq_line - KVM_IRQ_OFFSET;
128912bd7a16SLeo Yan 
12907302327aSLeo Yan 	pdev->intx_fd = -1;
12917302327aSLeo Yan 
129212bd7a16SLeo Yan 	return 0;
129312bd7a16SLeo Yan }
129412bd7a16SLeo Yan 
12956078a454SJean-Philippe Brucker static int vfio_pci_configure_dev_irqs(struct kvm *kvm, struct vfio_device *vdev)
12966078a454SJean-Philippe Brucker {
1297c9888d95SJean-Philippe Brucker 	int ret = 0;
12986078a454SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
12996078a454SJean-Philippe Brucker 
1300c9888d95SJean-Philippe Brucker 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX) {
1301c9888d95SJean-Philippe Brucker 		pdev->msix.info = (struct vfio_irq_info) {
1302c9888d95SJean-Philippe Brucker 			.argsz = sizeof(pdev->msix.info),
1303c9888d95SJean-Philippe Brucker 			.index = VFIO_PCI_MSIX_IRQ_INDEX,
13046078a454SJean-Philippe Brucker 		};
1305c9888d95SJean-Philippe Brucker 		ret = vfio_pci_init_msis(kvm, vdev, &pdev->msix);
1306c9888d95SJean-Philippe Brucker 		if (ret)
1307c9888d95SJean-Philippe Brucker 			return ret;
13086078a454SJean-Philippe Brucker 	}
13096078a454SJean-Philippe Brucker 
13108dd28afeSJean-Philippe Brucker 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSI) {
13118dd28afeSJean-Philippe Brucker 		pdev->msi.info = (struct vfio_irq_info) {
13128dd28afeSJean-Philippe Brucker 			.argsz = sizeof(pdev->msi.info),
13138dd28afeSJean-Philippe Brucker 			.index = VFIO_PCI_MSI_IRQ_INDEX,
13148dd28afeSJean-Philippe Brucker 		};
13158dd28afeSJean-Philippe Brucker 		ret = vfio_pci_init_msis(kvm, vdev, &pdev->msi);
13168dd28afeSJean-Philippe Brucker 		if (ret)
13178dd28afeSJean-Philippe Brucker 			return ret;
13188dd28afeSJean-Philippe Brucker 	}
13198dd28afeSJean-Philippe Brucker 
132012bd7a16SLeo Yan 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_INTX) {
1321c0c45eedSAndre Przywara 		pci__assign_irq(&vdev->pci.hdr);
1322c0c45eedSAndre Przywara 
132312bd7a16SLeo Yan 		ret = vfio_pci_init_intx(kvm, vdev);
132412bd7a16SLeo Yan 		if (ret)
132512bd7a16SLeo Yan 			return ret;
132612bd7a16SLeo Yan 
1327c9888d95SJean-Philippe Brucker 		ret = vfio_pci_enable_intx(kvm, vdev);
132812bd7a16SLeo Yan 	}
1329c9888d95SJean-Philippe Brucker 
1330c9888d95SJean-Philippe Brucker 	return ret;
13316078a454SJean-Philippe Brucker }
13326078a454SJean-Philippe Brucker 
13336078a454SJean-Philippe Brucker int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *vdev)
13346078a454SJean-Philippe Brucker {
13356078a454SJean-Philippe Brucker 	int ret;
13366078a454SJean-Philippe Brucker 
13376078a454SJean-Philippe Brucker 	ret = vfio_pci_configure_dev_regions(kvm, vdev);
13386078a454SJean-Philippe Brucker 	if (ret) {
13396078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to configure regions");
13406078a454SJean-Philippe Brucker 		return ret;
13416078a454SJean-Philippe Brucker 	}
13426078a454SJean-Philippe Brucker 
13436078a454SJean-Philippe Brucker 	vdev->dev_hdr = (struct device_header) {
13446078a454SJean-Philippe Brucker 		.bus_type	= DEVICE_BUS_PCI,
13456078a454SJean-Philippe Brucker 		.data		= &vdev->pci.hdr,
13466078a454SJean-Philippe Brucker 	};
13476078a454SJean-Philippe Brucker 
13486078a454SJean-Philippe Brucker 	ret = device__register(&vdev->dev_hdr);
13496078a454SJean-Philippe Brucker 	if (ret) {
13506078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to register VFIO device");
13516078a454SJean-Philippe Brucker 		return ret;
13526078a454SJean-Philippe Brucker 	}
13536078a454SJean-Philippe Brucker 
13546078a454SJean-Philippe Brucker 	ret = vfio_pci_configure_dev_irqs(kvm, vdev);
13556078a454SJean-Philippe Brucker 	if (ret) {
13566078a454SJean-Philippe Brucker 		vfio_dev_err(vdev, "failed to configure IRQs");
13576078a454SJean-Philippe Brucker 		return ret;
13586078a454SJean-Philippe Brucker 	}
13596078a454SJean-Philippe Brucker 
13606078a454SJean-Philippe Brucker 	return 0;
13616078a454SJean-Philippe Brucker }
13626078a454SJean-Philippe Brucker 
13636078a454SJean-Philippe Brucker void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev)
13646078a454SJean-Philippe Brucker {
13656078a454SJean-Philippe Brucker 	size_t i;
1366c9888d95SJean-Philippe Brucker 	struct vfio_pci_device *pdev = &vdev->pci;
13676078a454SJean-Philippe Brucker 
13686078a454SJean-Philippe Brucker 	for (i = 0; i < vdev->info.num_regions; i++)
13696078a454SJean-Philippe Brucker 		vfio_unmap_region(kvm, &vdev->regions[i]);
13706078a454SJean-Philippe Brucker 
13716078a454SJean-Philippe Brucker 	device__unregister(&vdev->dev_hdr);
1372c9888d95SJean-Philippe Brucker 
1373c9888d95SJean-Philippe Brucker 	free(pdev->msix.irq_set);
1374c9888d95SJean-Philippe Brucker 	free(pdev->msix.entries);
13758dd28afeSJean-Philippe Brucker 	free(pdev->msi.irq_set);
13768dd28afeSJean-Philippe Brucker 	free(pdev->msi.entries);
13776078a454SJean-Philippe Brucker }
1378