xref: /kvmtool/vfio/pci.c (revision 5a8e4f25dd7b32228ff214b5d5a68a27d96c9a6c)
1 #include "kvm/irq.h"
2 #include "kvm/kvm.h"
3 #include "kvm/kvm-cpu.h"
4 #include "kvm/vfio.h"
5 
6 #include <assert.h>
7 
8 #include <sys/ioctl.h>
9 #include <sys/eventfd.h>
10 #include <sys/resource.h>
11 #include <sys/time.h>
12 
13 #include <assert.h>
14 
15 /* Wrapper around UAPI vfio_irq_set */
16 union vfio_irq_eventfd {
17 	struct vfio_irq_set	irq;
18 	u8 buffer[sizeof(struct vfio_irq_set) + sizeof(int)];
19 };
20 
21 static void set_vfio_irq_eventd_payload(union vfio_irq_eventfd *evfd, int fd)
22 {
23 	memcpy(&evfd->irq.data, &fd, sizeof(fd));
24 }
25 
26 #define msi_is_enabled(state)		((state) & VFIO_PCI_MSI_STATE_ENABLED)
27 #define msi_is_masked(state)		((state) & VFIO_PCI_MSI_STATE_MASKED)
28 #define msi_is_empty(state)		((state) & VFIO_PCI_MSI_STATE_EMPTY)
29 
30 #define msi_update_state(state, val, bit)				\
31 	(state) = (val) ? (state) | bit : (state) & ~bit;
32 #define msi_set_enabled(state, val)					\
33 	msi_update_state(state, val, VFIO_PCI_MSI_STATE_ENABLED)
34 #define msi_set_masked(state, val)					\
35 	msi_update_state(state, val, VFIO_PCI_MSI_STATE_MASKED)
36 #define msi_set_empty(state, val)					\
37 	msi_update_state(state, val, VFIO_PCI_MSI_STATE_EMPTY)
38 
39 static void vfio_pci_disable_intx(struct kvm *kvm, struct vfio_device *vdev);
40 static int vfio_pci_enable_intx(struct kvm *kvm, struct vfio_device *vdev);
41 
42 static int vfio_pci_enable_msis(struct kvm *kvm, struct vfio_device *vdev,
43 				bool msix)
44 {
45 	size_t i;
46 	int ret = 0;
47 	int *eventfds;
48 	struct vfio_pci_device *pdev = &vdev->pci;
49 	struct vfio_pci_msi_common *msis = msix ? &pdev->msix : &pdev->msi;
50 	union vfio_irq_eventfd single = {
51 		.irq = {
52 			.argsz	= sizeof(single),
53 			.flags	= VFIO_IRQ_SET_DATA_EVENTFD |
54 				  VFIO_IRQ_SET_ACTION_TRIGGER,
55 			.index	= msis->info.index,
56 			.count	= 1,
57 		},
58 	};
59 
60 	if (!msi_is_enabled(msis->virt_state))
61 		return 0;
62 
63 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_INTX)
64 		/*
65 		 * PCI (and VFIO) forbids enabling INTx, MSI or MSIX at the same
66 		 * time. Since INTx has to be enabled from the start (we don't
67 		 * have a reliable way to know when the guest starts using it),
68 		 * disable it now.
69 		 */
70 		vfio_pci_disable_intx(kvm, vdev);
71 
72 	eventfds = (void *)msis->irq_set + sizeof(struct vfio_irq_set);
73 
74 	/*
75 	 * Initial registration of the full range. This enables the physical
76 	 * MSI/MSI-X capability, which might have desired side effects. For
77 	 * instance when assigning virtio legacy devices, enabling the MSI
78 	 * capability modifies the config space layout!
79 	 *
80 	 * As an optimization, only update MSIs when guest unmasks the
81 	 * capability. This greatly reduces the initialization time for Linux
82 	 * guest with 2048+ MSIs. Linux guest starts by enabling the MSI-X cap
83 	 * masked, then fills individual vectors, then unmasks the whole
84 	 * function. So we only do one VFIO ioctl when enabling for the first
85 	 * time, and then one when unmasking.
86 	 *
87 	 * phys_state is empty when it is enabled but no vector has been
88 	 * registered via SET_IRQS yet.
89 	 */
90 	if (!msi_is_enabled(msis->phys_state) ||
91 	    (!msi_is_masked(msis->virt_state) &&
92 	     msi_is_empty(msis->phys_state))) {
93 		bool empty = true;
94 
95 		for (i = 0; i < msis->nr_entries; i++) {
96 			eventfds[i] = msis->entries[i].gsi >= 0 ?
97 				      msis->entries[i].eventfd : -1;
98 
99 			if (eventfds[i] >= 0)
100 				empty = false;
101 		}
102 
103 		ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, msis->irq_set);
104 		if (ret < 0) {
105 			perror("VFIO_DEVICE_SET_IRQS(multi)");
106 			return ret;
107 		}
108 
109 		msi_set_enabled(msis->phys_state, true);
110 		msi_set_empty(msis->phys_state, empty);
111 
112 		return 0;
113 	}
114 
115 	if (msi_is_masked(msis->virt_state)) {
116 		/* TODO: if phys_state is not empty nor masked, mask all vectors */
117 		return 0;
118 	}
119 
120 	/* Update individual vectors to avoid breaking those in use */
121 	for (i = 0; i < msis->nr_entries; i++) {
122 		struct vfio_pci_msi_entry *entry = &msis->entries[i];
123 		int fd = entry->gsi >= 0 ? entry->eventfd : -1;
124 
125 		if (fd == eventfds[i])
126 			continue;
127 
128 		single.irq.start = i;
129 		set_vfio_irq_eventd_payload(&single, fd);
130 
131 		ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &single);
132 		if (ret < 0) {
133 			perror("VFIO_DEVICE_SET_IRQS(single)");
134 			break;
135 		}
136 
137 		eventfds[i] = fd;
138 
139 		if (msi_is_empty(msis->phys_state) && fd >= 0)
140 			msi_set_empty(msis->phys_state, false);
141 	}
142 
143 	return ret;
144 }
145 
146 static int vfio_pci_disable_msis(struct kvm *kvm, struct vfio_device *vdev,
147 				 bool msix)
148 {
149 	int ret;
150 	struct vfio_pci_device *pdev = &vdev->pci;
151 	struct vfio_pci_msi_common *msis = msix ? &pdev->msix : &pdev->msi;
152 	struct vfio_irq_set irq_set = {
153 		.argsz	= sizeof(irq_set),
154 		.flags 	= VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
155 		.index 	= msis->info.index,
156 		.start 	= 0,
157 		.count	= 0,
158 	};
159 
160 	if (!msi_is_enabled(msis->phys_state))
161 		return 0;
162 
163 	ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
164 	if (ret < 0) {
165 		perror("VFIO_DEVICE_SET_IRQS(NONE)");
166 		return ret;
167 	}
168 
169 	msi_set_enabled(msis->phys_state, false);
170 	msi_set_empty(msis->phys_state, true);
171 
172 	/*
173 	 * When MSI or MSIX is disabled, this might be called when
174 	 * PCI driver detects the MSI interrupt failure and wants to
175 	 * rollback to INTx mode.  Thus enable INTx if the device
176 	 * supports INTx mode in this case.
177 	 */
178 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_INTX)
179 		ret = vfio_pci_enable_intx(kvm, vdev);
180 
181 	return ret >= 0 ? 0 : ret;
182 }
183 
184 static int vfio_pci_update_msi_entry(struct kvm *kvm, struct vfio_device *vdev,
185 				     struct vfio_pci_msi_entry *entry)
186 {
187 	int ret;
188 
189 	if (entry->eventfd < 0) {
190 		entry->eventfd = eventfd(0, 0);
191 		if (entry->eventfd < 0) {
192 			ret = -errno;
193 			vfio_dev_err(vdev, "cannot create eventfd");
194 			return ret;
195 		}
196 	}
197 
198 	/* Allocate IRQ if necessary */
199 	if (entry->gsi < 0) {
200 		int ret = irq__add_msix_route(kvm, &entry->config.msg,
201 					      vdev->dev_hdr.dev_num << 3);
202 		if (ret < 0) {
203 			vfio_dev_err(vdev, "cannot create MSI-X route");
204 			return ret;
205 		}
206 		entry->gsi = ret;
207 	} else {
208 		irq__update_msix_route(kvm, entry->gsi, &entry->config.msg);
209 	}
210 
211 	/*
212 	 * MSI masking is unimplemented in VFIO, so we have to handle it by
213 	 * disabling/enabling IRQ route instead. We do it on the KVM side rather
214 	 * than VFIO, because:
215 	 * - it is 8x faster
216 	 * - it allows to decouple masking logic from capability state.
217 	 * - in masked state, after removing irqfd route, we could easily plug
218 	 *   the eventfd in a local handler, in order to serve Pending Bit reads
219 	 *   to the guest.
220 	 *
221 	 * So entry->phys_state is masked when there is no active irqfd route.
222 	 */
223 	if (msi_is_masked(entry->virt_state) == msi_is_masked(entry->phys_state))
224 		return 0;
225 
226 	if (msi_is_masked(entry->phys_state)) {
227 		ret = irq__add_irqfd(kvm, entry->gsi, entry->eventfd, -1);
228 		if (ret < 0) {
229 			vfio_dev_err(vdev, "cannot setup irqfd");
230 			return ret;
231 		}
232 	} else {
233 		irq__del_irqfd(kvm, entry->gsi, entry->eventfd);
234 	}
235 
236 	msi_set_masked(entry->phys_state, msi_is_masked(entry->virt_state));
237 
238 	return 0;
239 }
240 
241 static void vfio_pci_msix_pba_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
242 				     u32 len, u8 is_write, void *ptr)
243 {
244 	struct vfio_pci_device *pdev = ptr;
245 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
246 	u64 offset = addr - pba->guest_phys_addr;
247 	struct vfio_device *vdev = container_of(pdev, struct vfio_device, pci);
248 
249 	if (is_write)
250 		return;
251 
252 	/*
253 	 * TODO: emulate PBA. Hardware MSI-X is never masked, so reading the PBA
254 	 * is completely useless here. Note that Linux doesn't use PBA.
255 	 */
256 	if (pread(vdev->fd, data, len, pba->offset + offset) != (ssize_t)len)
257 		vfio_dev_err(vdev, "cannot access MSIX PBA\n");
258 }
259 
260 static void vfio_pci_msix_table_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
261 				       u32 len, u8 is_write, void *ptr)
262 {
263 	struct kvm *kvm = vcpu->kvm;
264 	struct vfio_pci_msi_entry *entry;
265 	struct vfio_pci_device *pdev = ptr;
266 	struct vfio_device *vdev = container_of(pdev, struct vfio_device, pci);
267 
268 	u64 offset = addr - pdev->msix_table.guest_phys_addr;
269 
270 	size_t vector = offset / PCI_MSIX_ENTRY_SIZE;
271 	off_t field = offset % PCI_MSIX_ENTRY_SIZE;
272 
273 	/*
274 	 * PCI spec says that software must use aligned 4 or 8 bytes accesses
275 	 * for the MSI-X tables.
276 	 */
277 	if ((len != 4 && len != 8) || addr & (len - 1)) {
278 		vfio_dev_warn(vdev, "invalid MSI-X table access");
279 		return;
280 	}
281 
282 	entry = &pdev->msix.entries[vector];
283 
284 	mutex_lock(&pdev->msix.mutex);
285 
286 	if (!is_write) {
287 		memcpy(data, (void *)&entry->config + field, len);
288 		goto out_unlock;
289 	}
290 
291 	memcpy((void *)&entry->config + field, data, len);
292 
293 	/*
294 	 * Check if access touched the vector control register, which is at the
295 	 * end of the MSI-X entry.
296 	 */
297 	if (field + len <= PCI_MSIX_ENTRY_VECTOR_CTRL)
298 		goto out_unlock;
299 
300 	msi_set_masked(entry->virt_state, entry->config.ctrl &
301 		       PCI_MSIX_ENTRY_CTRL_MASKBIT);
302 
303 	if (vfio_pci_update_msi_entry(kvm, vdev, entry) < 0)
304 		/* Not much we can do here. */
305 		vfio_dev_err(vdev, "failed to configure MSIX vector %zu", vector);
306 
307 	/* Update the physical capability if necessary */
308 	if (vfio_pci_enable_msis(kvm, vdev, true))
309 		vfio_dev_err(vdev, "cannot enable MSIX");
310 
311 out_unlock:
312 	mutex_unlock(&pdev->msix.mutex);
313 }
314 
315 static void vfio_pci_msix_cap_write(struct kvm *kvm,
316 				    struct vfio_device *vdev, u8 off,
317 				    void *data, int sz)
318 {
319 	struct vfio_pci_device *pdev = &vdev->pci;
320 	off_t enable_pos = PCI_MSIX_FLAGS + 1;
321 	bool enable;
322 	u16 flags;
323 
324 	off -= pdev->msix.pos;
325 
326 	/* Check if access intersects with the MSI-X Enable bit */
327 	if (off > enable_pos || off + sz <= enable_pos)
328 		return;
329 
330 	/* Read byte that contains the Enable bit */
331 	flags = *(u8 *)(data + enable_pos - off) << 8;
332 
333 	mutex_lock(&pdev->msix.mutex);
334 
335 	msi_set_masked(pdev->msix.virt_state, flags & PCI_MSIX_FLAGS_MASKALL);
336 	enable = flags & PCI_MSIX_FLAGS_ENABLE;
337 	msi_set_enabled(pdev->msix.virt_state, enable);
338 
339 	if (enable && vfio_pci_enable_msis(kvm, vdev, true))
340 		vfio_dev_err(vdev, "cannot enable MSIX");
341 	else if (!enable && vfio_pci_disable_msis(kvm, vdev, true))
342 		vfio_dev_err(vdev, "cannot disable MSIX");
343 
344 	mutex_unlock(&pdev->msix.mutex);
345 }
346 
347 static int vfio_pci_msi_vector_write(struct kvm *kvm, struct vfio_device *vdev,
348 				     u8 off, u8 *data, u32 sz)
349 {
350 	size_t i;
351 	u32 mask = 0;
352 	size_t mask_pos, start, limit;
353 	struct vfio_pci_msi_entry *entry;
354 	struct vfio_pci_device *pdev = &vdev->pci;
355 	struct msi_cap_64 *msi_cap_64 = PCI_CAP(&pdev->hdr, pdev->msi.pos);
356 
357 	if (!(msi_cap_64->ctrl & PCI_MSI_FLAGS_MASKBIT))
358 		return 0;
359 
360 	if (msi_cap_64->ctrl & PCI_MSI_FLAGS_64BIT)
361 		mask_pos = PCI_MSI_MASK_64;
362 	else
363 		mask_pos = PCI_MSI_MASK_32;
364 
365 	if (off >= mask_pos + 4 || off + sz <= mask_pos)
366 		return 0;
367 
368 	/* Set mask to current state */
369 	for (i = 0; i < pdev->msi.nr_entries; i++) {
370 		entry = &pdev->msi.entries[i];
371 		mask |= !!msi_is_masked(entry->virt_state) << i;
372 	}
373 
374 	/* Update mask following the intersection of access and register */
375 	start = max_t(size_t, off, mask_pos);
376 	limit = min_t(size_t, off + sz, mask_pos + 4);
377 
378 	memcpy((void *)&mask + start - mask_pos, data + start - off,
379 	       limit - start);
380 
381 	/* Update states if necessary */
382 	for (i = 0; i < pdev->msi.nr_entries; i++) {
383 		bool masked = mask & (1 << i);
384 
385 		entry = &pdev->msi.entries[i];
386 		if (masked != msi_is_masked(entry->virt_state)) {
387 			msi_set_masked(entry->virt_state, masked);
388 			vfio_pci_update_msi_entry(kvm, vdev, entry);
389 		}
390 	}
391 
392 	return 1;
393 }
394 
395 static void vfio_pci_msi_cap_write(struct kvm *kvm, struct vfio_device *vdev,
396 				   u8 off, u8 *data, u32 sz)
397 {
398 	u8 ctrl;
399 	struct msi_msg msg;
400 	size_t i, nr_vectors;
401 	struct vfio_pci_msi_entry *entry;
402 	struct vfio_pci_device *pdev = &vdev->pci;
403 	struct msi_cap_64 *msi_cap_64 = PCI_CAP(&pdev->hdr, pdev->msi.pos);
404 
405 	off -= pdev->msi.pos;
406 
407 	mutex_lock(&pdev->msi.mutex);
408 
409 	/* Check if the guest is trying to update mask bits */
410 	if (vfio_pci_msi_vector_write(kvm, vdev, off, data, sz))
411 		goto out_unlock;
412 
413 	/* Only modify routes when guest pokes the enable bit */
414 	if (off > PCI_MSI_FLAGS || off + sz <= PCI_MSI_FLAGS)
415 		goto out_unlock;
416 
417 	ctrl = *(u8 *)(data + PCI_MSI_FLAGS - off);
418 
419 	msi_set_enabled(pdev->msi.virt_state, ctrl & PCI_MSI_FLAGS_ENABLE);
420 
421 	if (!msi_is_enabled(pdev->msi.virt_state)) {
422 		vfio_pci_disable_msis(kvm, vdev, false);
423 		goto out_unlock;
424 	}
425 
426 	/* Create routes for the requested vectors */
427 	nr_vectors = 1 << ((ctrl & PCI_MSI_FLAGS_QSIZE) >> 4);
428 
429 	msg.address_lo = msi_cap_64->address_lo;
430 	if (msi_cap_64->ctrl & PCI_MSI_FLAGS_64BIT) {
431 		msg.address_hi = msi_cap_64->address_hi;
432 		msg.data = msi_cap_64->data;
433 	} else {
434 		struct msi_cap_32 *msi_cap_32 = (void *)msi_cap_64;
435 		msg.address_hi = 0;
436 		msg.data = msi_cap_32->data;
437 	}
438 
439 	for (i = 0; i < nr_vectors; i++) {
440 		entry = &pdev->msi.entries[i];
441 
442 		/*
443 		 * Set the MSI data value as required by the PCI local
444 		 * bus specifications, MSI capability, "Message Data".
445 		 */
446 		msg.data &= ~(nr_vectors - 1);
447 		msg.data |= i;
448 
449 		entry->config.msg = msg;
450 		vfio_pci_update_msi_entry(kvm, vdev, entry);
451 	}
452 
453 	/* Update the physical capability if necessary */
454 	if (vfio_pci_enable_msis(kvm, vdev, false))
455 		vfio_dev_err(vdev, "cannot enable MSI");
456 
457 out_unlock:
458 	mutex_unlock(&pdev->msi.mutex);
459 }
460 
461 static int vfio_pci_bar_activate(struct kvm *kvm,
462 				 struct pci_device_header *pci_hdr,
463 				 int bar_num, void *data)
464 {
465 	struct vfio_device *vdev = data;
466 	struct vfio_pci_device *pdev = &vdev->pci;
467 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
468 	struct vfio_pci_msix_table *table = &pdev->msix_table;
469 	struct vfio_region *region;
470 	bool has_msix;
471 	int ret;
472 
473 	assert((u32)bar_num < vdev->info.num_regions);
474 
475 	region = &vdev->regions[bar_num];
476 	has_msix = pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX;
477 
478 	if (has_msix && (u32)bar_num == table->bar) {
479 		ret = kvm__register_mmio(kvm, table->guest_phys_addr,
480 					 table->size, false,
481 					 vfio_pci_msix_table_access, pdev);
482 		/*
483 		 * The MSIX table and the PBA structure can share the same BAR,
484 		 * but for convenience we register different regions for mmio
485 		 * emulation. We want to we update both if they share the same
486 		 * BAR.
487 		 */
488 		if (ret < 0 || table->bar != pba->bar)
489 			goto out;
490 	}
491 
492 	if (has_msix && (u32)bar_num == pba->bar) {
493 		ret = kvm__register_mmio(kvm, pba->guest_phys_addr,
494 					 pba->size, false,
495 					 vfio_pci_msix_pba_access, pdev);
496 		goto out;
497 	}
498 
499 	ret = vfio_map_region(kvm, vdev, region);
500 out:
501 	return ret;
502 }
503 
504 static int vfio_pci_bar_deactivate(struct kvm *kvm,
505 				   struct pci_device_header *pci_hdr,
506 				   int bar_num, void *data)
507 {
508 	struct vfio_device *vdev = data;
509 	struct vfio_pci_device *pdev = &vdev->pci;
510 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
511 	struct vfio_pci_msix_table *table = &pdev->msix_table;
512 	struct vfio_region *region;
513 	bool has_msix, success;
514 	int ret;
515 
516 	assert((u32)bar_num < vdev->info.num_regions);
517 
518 	region = &vdev->regions[bar_num];
519 	has_msix = pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX;
520 
521 	if (has_msix && (u32)bar_num == table->bar) {
522 		success = kvm__deregister_mmio(kvm, table->guest_phys_addr);
523 		/* kvm__deregister_mmio fails when the region is not found. */
524 		ret = (success ? 0 : -ENOENT);
525 		/* See vfio_pci_bar_activate(). */
526 		if (ret < 0 || table->bar!= pba->bar)
527 			goto out;
528 	}
529 
530 	if (has_msix && (u32)bar_num == pba->bar) {
531 		success = kvm__deregister_mmio(kvm, pba->guest_phys_addr);
532 		ret = (success ? 0 : -ENOENT);
533 		goto out;
534 	}
535 
536 	vfio_unmap_region(kvm, region);
537 	ret = 0;
538 
539 out:
540 	return ret;
541 }
542 
543 static void vfio_pci_cfg_read(struct kvm *kvm, struct pci_device_header *pci_hdr,
544 			      u8 offset, void *data, int sz)
545 {
546 	struct vfio_region_info *info;
547 	struct vfio_pci_device *pdev;
548 	struct vfio_device *vdev;
549 	char base[sz];
550 
551 	pdev = container_of(pci_hdr, struct vfio_pci_device, hdr);
552 	vdev = container_of(pdev, struct vfio_device, pci);
553 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
554 
555 	/* Dummy read in case of side-effects */
556 	if (pread(vdev->fd, base, sz, info->offset + offset) != sz)
557 		vfio_dev_warn(vdev, "failed to read %d bytes from Configuration Space at 0x%x",
558 			      sz, offset);
559 }
560 
561 static void vfio_pci_cfg_write(struct kvm *kvm, struct pci_device_header *pci_hdr,
562 			       u8 offset, void *data, int sz)
563 {
564 	struct vfio_region_info *info;
565 	struct vfio_pci_device *pdev;
566 	struct vfio_device *vdev;
567 	u32 tmp;
568 
569 	/* Make sure a larger size will not overrun tmp on the stack. */
570 	assert(sz <= 4);
571 
572 	if (offset == PCI_ROM_ADDRESS)
573 		return;
574 
575 	pdev = container_of(pci_hdr, struct vfio_pci_device, hdr);
576 	vdev = container_of(pdev, struct vfio_device, pci);
577 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
578 
579 	if (pwrite(vdev->fd, data, sz, info->offset + offset) != sz)
580 		vfio_dev_warn(vdev, "Failed to write %d bytes to Configuration Space at 0x%x",
581 			      sz, offset);
582 
583 	/* Handle MSI write now, since it might update the hardware capability */
584 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX)
585 		vfio_pci_msix_cap_write(kvm, vdev, offset, data, sz);
586 
587 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSI)
588 		vfio_pci_msi_cap_write(kvm, vdev, offset, data, sz);
589 
590 	if (pread(vdev->fd, &tmp, sz, info->offset + offset) != sz)
591 		vfio_dev_warn(vdev, "Failed to read %d bytes from Configuration Space at 0x%x",
592 			      sz, offset);
593 }
594 
595 static ssize_t vfio_pci_msi_cap_size(struct msi_cap_64 *cap_hdr)
596 {
597 	size_t size = 10;
598 
599 	if (cap_hdr->ctrl & PCI_MSI_FLAGS_64BIT)
600 		size += 4;
601 	if (cap_hdr->ctrl & PCI_MSI_FLAGS_MASKBIT)
602 		size += 10;
603 
604 	return size;
605 }
606 
607 static ssize_t vfio_pci_cap_size(struct pci_cap_hdr *cap_hdr)
608 {
609 	switch (cap_hdr->type) {
610 	case PCI_CAP_ID_MSIX:
611 		return PCI_CAP_MSIX_SIZEOF;
612 	case PCI_CAP_ID_MSI:
613 		return vfio_pci_msi_cap_size((void *)cap_hdr);
614 	default:
615 		pr_err("unknown PCI capability 0x%x", cap_hdr->type);
616 		return 0;
617 	}
618 }
619 
620 static int vfio_pci_add_cap(struct vfio_device *vdev, u8 *virt_hdr,
621 			    struct pci_cap_hdr *cap, off_t pos)
622 {
623 	struct pci_cap_hdr *last;
624 	struct pci_device_header *hdr = &vdev->pci.hdr;
625 
626 	cap->next = 0;
627 
628 	if (!hdr->capabilities) {
629 		hdr->capabilities = pos;
630 		hdr->status |= PCI_STATUS_CAP_LIST;
631 	} else {
632 		last = PCI_CAP(virt_hdr, hdr->capabilities);
633 
634 		while (last->next)
635 			last = PCI_CAP(virt_hdr, last->next);
636 
637 		last->next = pos;
638 	}
639 
640 	memcpy(virt_hdr + pos, cap, vfio_pci_cap_size(cap));
641 
642 	return 0;
643 }
644 
645 static int vfio_pci_parse_caps(struct vfio_device *vdev)
646 {
647 	int ret;
648 	size_t size;
649 	u8 pos, next;
650 	struct pci_cap_hdr *cap;
651 	u8 virt_hdr[PCI_DEV_CFG_SIZE];
652 	struct vfio_pci_device *pdev = &vdev->pci;
653 
654 	if (!(pdev->hdr.status & PCI_STATUS_CAP_LIST))
655 		return 0;
656 
657 	memset(virt_hdr, 0, PCI_DEV_CFG_SIZE);
658 
659 	pos = pdev->hdr.capabilities & ~3;
660 
661 	pdev->hdr.status &= ~PCI_STATUS_CAP_LIST;
662 	pdev->hdr.capabilities = 0;
663 
664 	for (; pos; pos = next) {
665 		cap = PCI_CAP(&pdev->hdr, pos);
666 		next = cap->next;
667 
668 		switch (cap->type) {
669 		case PCI_CAP_ID_MSIX:
670 			ret = vfio_pci_add_cap(vdev, virt_hdr, cap, pos);
671 			if (ret)
672 				return ret;
673 
674 			pdev->msix.pos = pos;
675 			pdev->irq_modes |= VFIO_PCI_IRQ_MODE_MSIX;
676 			break;
677 		case PCI_CAP_ID_MSI:
678 			ret = vfio_pci_add_cap(vdev, virt_hdr, cap, pos);
679 			if (ret)
680 				return ret;
681 
682 			pdev->msi.pos = pos;
683 			pdev->irq_modes |= VFIO_PCI_IRQ_MODE_MSI;
684 			break;
685 		}
686 	}
687 
688 	/* Wipe remaining capabilities */
689 	pos = PCI_STD_HEADER_SIZEOF;
690 	size = PCI_DEV_CFG_SIZE - PCI_STD_HEADER_SIZEOF;
691 	memcpy((void *)&pdev->hdr + pos, virt_hdr + pos, size);
692 
693 	return 0;
694 }
695 
696 static int vfio_pci_parse_cfg_space(struct vfio_device *vdev)
697 {
698 	ssize_t sz = PCI_DEV_CFG_SIZE;
699 	struct vfio_region_info *info;
700 	struct vfio_pci_device *pdev = &vdev->pci;
701 
702 	if (vdev->info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX) {
703 		vfio_dev_err(vdev, "Config Space not found");
704 		return -ENODEV;
705 	}
706 
707 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
708 	*info = (struct vfio_region_info) {
709 			.argsz = sizeof(*info),
710 			.index = VFIO_PCI_CONFIG_REGION_INDEX,
711 	};
712 
713 	ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
714 	if (!info->size) {
715 		vfio_dev_err(vdev, "Config Space has size zero?!");
716 		return -EINVAL;
717 	}
718 
719 	/* Read standard headers and capabilities */
720 	if (pread(vdev->fd, &pdev->hdr, sz, info->offset) != sz) {
721 		vfio_dev_err(vdev, "failed to read %zd bytes of Config Space", sz);
722 		return -EIO;
723 	}
724 
725 	/* Strip bit 7, that indicates multifunction */
726 	pdev->hdr.header_type &= 0x7f;
727 
728 	if (pdev->hdr.header_type != PCI_HEADER_TYPE_NORMAL) {
729 		vfio_dev_err(vdev, "unsupported header type %u",
730 			     pdev->hdr.header_type);
731 		return -EOPNOTSUPP;
732 	}
733 
734 	if (pdev->hdr.irq_pin)
735 		pdev->irq_modes |= VFIO_PCI_IRQ_MODE_INTX;
736 
737 	vfio_pci_parse_caps(vdev);
738 
739 	return 0;
740 }
741 
742 static int vfio_pci_fixup_cfg_space(struct vfio_device *vdev)
743 {
744 	int i;
745 	u64 base;
746 	ssize_t hdr_sz;
747 	struct msix_cap *msix;
748 	struct vfio_region_info *info;
749 	struct vfio_pci_device *pdev = &vdev->pci;
750 	struct vfio_region *region;
751 
752 	/* Initialise the BARs */
753 	for (i = VFIO_PCI_BAR0_REGION_INDEX; i <= VFIO_PCI_BAR5_REGION_INDEX; ++i) {
754 		if ((u32)i == vdev->info.num_regions)
755 			break;
756 
757 		region = &vdev->regions[i];
758 		/* Construct a fake reg to match what we've mapped. */
759 		if (region->is_ioport) {
760 			base = (region->port_base & PCI_BASE_ADDRESS_IO_MASK) |
761 				PCI_BASE_ADDRESS_SPACE_IO;
762 		} else {
763 			base = (region->guest_phys_addr &
764 				PCI_BASE_ADDRESS_MEM_MASK) |
765 				PCI_BASE_ADDRESS_SPACE_MEMORY;
766 		}
767 
768 		pdev->hdr.bar[i] = base;
769 
770 		if (!base)
771 			continue;
772 
773 		pdev->hdr.bar_size[i] = region->info.size;
774 	}
775 
776 	/* I really can't be bothered to support cardbus. */
777 	pdev->hdr.card_bus = 0;
778 
779 	/*
780 	 * Nuke the expansion ROM for now. If we want to do this properly,
781 	 * we need to save its size somewhere and map into the guest.
782 	 */
783 	pdev->hdr.exp_rom_bar = 0;
784 
785 	/* Plumb in our fake MSI-X capability, if we have it. */
786 	msix = pci_find_cap(&pdev->hdr, PCI_CAP_ID_MSIX);
787 	if (msix) {
788 		/* Add a shortcut to the PBA region for the MMIO handler */
789 		int pba_index = VFIO_PCI_BAR0_REGION_INDEX + pdev->msix_pba.bar;
790 		pdev->msix_pba.offset = vdev->regions[pba_index].info.offset +
791 					(msix->pba_offset & PCI_MSIX_PBA_OFFSET);
792 
793 		/* Tidy up the capability */
794 		msix->table_offset &= PCI_MSIX_TABLE_BIR;
795 		msix->pba_offset &= PCI_MSIX_PBA_BIR;
796 		if (pdev->msix_table.bar == pdev->msix_pba.bar)
797 			msix->pba_offset |= pdev->msix_table.size &
798 					    PCI_MSIX_PBA_OFFSET;
799 	}
800 
801 	/* Install our fake Configuration Space */
802 	info = &vdev->regions[VFIO_PCI_CONFIG_REGION_INDEX].info;
803 	hdr_sz = PCI_DEV_CFG_SIZE;
804 	if (pwrite(vdev->fd, &pdev->hdr, hdr_sz, info->offset) != hdr_sz) {
805 		vfio_dev_err(vdev, "failed to write %zd bytes to Config Space",
806 			     hdr_sz);
807 		return -EIO;
808 	}
809 
810 	/* Register callbacks for cfg accesses */
811 	pdev->hdr.cfg_ops = (struct pci_config_operations) {
812 		.read	= vfio_pci_cfg_read,
813 		.write	= vfio_pci_cfg_write,
814 	};
815 
816 	pdev->hdr.irq_type = IRQ_TYPE_LEVEL_HIGH;
817 
818 	return 0;
819 }
820 
821 static int vfio_pci_get_region_info(struct vfio_device *vdev, u32 index,
822 				    struct vfio_region_info *info)
823 {
824 	int ret;
825 
826 	*info = (struct vfio_region_info) {
827 		.argsz = sizeof(*info),
828 		.index = index,
829 	};
830 
831 	ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, info);
832 	if (ret) {
833 		ret = -errno;
834 		vfio_dev_err(vdev, "cannot get info for BAR %u", index);
835 		return ret;
836 	}
837 
838 	if (info->size && !is_power_of_two(info->size)) {
839 		vfio_dev_err(vdev, "region is not power of two: 0x%llx",
840 				info->size);
841 		return -EINVAL;
842 	}
843 
844 	return 0;
845 }
846 
847 static int vfio_pci_create_msix_table(struct kvm *kvm, struct vfio_device *vdev)
848 {
849 	int ret;
850 	size_t i;
851 	size_t map_size;
852 	size_t nr_entries;
853 	struct vfio_pci_msi_entry *entries;
854 	struct vfio_pci_device *pdev = &vdev->pci;
855 	struct vfio_pci_msix_pba *pba = &pdev->msix_pba;
856 	struct vfio_pci_msix_table *table = &pdev->msix_table;
857 	struct msix_cap *msix = PCI_CAP(&pdev->hdr, pdev->msix.pos);
858 	struct vfio_region_info info;
859 
860 	table->bar = msix->table_offset & PCI_MSIX_TABLE_BIR;
861 	pba->bar = msix->pba_offset & PCI_MSIX_TABLE_BIR;
862 
863 	/*
864 	 * KVM needs memory regions to be multiple of and aligned on PAGE_SIZE.
865 	 */
866 	nr_entries = (msix->ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
867 	table->size = ALIGN(nr_entries * PCI_MSIX_ENTRY_SIZE, PAGE_SIZE);
868 	pba->size = ALIGN(DIV_ROUND_UP(nr_entries, 64), PAGE_SIZE);
869 
870 	entries = calloc(nr_entries, sizeof(struct vfio_pci_msi_entry));
871 	if (!entries)
872 		return -ENOMEM;
873 
874 	for (i = 0; i < nr_entries; i++)
875 		entries[i].config.ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
876 
877 	ret = vfio_pci_get_region_info(vdev, table->bar, &info);
878 	if (ret)
879 		return ret;
880 	if (!info.size)
881 		return -EINVAL;
882 	map_size = info.size;
883 
884 	if (table->bar != pba->bar) {
885 		ret = vfio_pci_get_region_info(vdev, pba->bar, &info);
886 		if (ret)
887 			return ret;
888 		if (!info.size)
889 			return -EINVAL;
890 		map_size += info.size;
891 	}
892 
893 	/*
894 	 * To ease MSI-X cap configuration in case they share the same BAR,
895 	 * collapse table and pending array. The size of the BAR regions must be
896 	 * powers of two.
897 	 */
898 	map_size = ALIGN(map_size, PAGE_SIZE);
899 	table->guest_phys_addr = pci_get_mmio_block(map_size);
900 	if (!table->guest_phys_addr) {
901 		pr_err("cannot allocate MMIO space");
902 		ret = -ENOMEM;
903 		goto out_free;
904 	}
905 
906 	/*
907 	 * We could map the physical PBA directly into the guest, but it's
908 	 * likely smaller than a page, and we can only hand full pages to the
909 	 * guest. Even though the PCI spec disallows sharing a page used for
910 	 * MSI-X with any other resource, it allows to share the same page
911 	 * between MSI-X table and PBA. For the sake of isolation, create a
912 	 * virtual PBA.
913 	 */
914 	pba->guest_phys_addr = table->guest_phys_addr + table->size;
915 
916 	pdev->msix.entries = entries;
917 	pdev->msix.nr_entries = nr_entries;
918 
919 	return 0;
920 
921 out_free:
922 	free(entries);
923 
924 	return ret;
925 }
926 
927 static int vfio_pci_create_msi_cap(struct kvm *kvm, struct vfio_pci_device *pdev)
928 {
929 	struct msi_cap_64 *cap = PCI_CAP(&pdev->hdr, pdev->msi.pos);
930 
931 	pdev->msi.nr_entries = 1 << ((cap->ctrl & PCI_MSI_FLAGS_QMASK) >> 1),
932 	pdev->msi.entries = calloc(pdev->msi.nr_entries,
933 				   sizeof(struct vfio_pci_msi_entry));
934 	if (!pdev->msi.entries)
935 		return -ENOMEM;
936 
937 	return 0;
938 }
939 
940 static int vfio_pci_configure_bar(struct kvm *kvm, struct vfio_device *vdev,
941 				  size_t nr)
942 {
943 	int ret;
944 	u32 bar;
945 	size_t map_size;
946 	struct vfio_pci_device *pdev = &vdev->pci;
947 	struct vfio_region *region;
948 
949 	if (nr >= vdev->info.num_regions)
950 		return 0;
951 
952 	region = &vdev->regions[nr];
953 	bar = pdev->hdr.bar[nr];
954 
955 	region->vdev = vdev;
956 	region->is_ioport = !!(bar & PCI_BASE_ADDRESS_SPACE_IO);
957 
958 	ret = vfio_pci_get_region_info(vdev, nr, &region->info);
959 	if (ret)
960 		return ret;
961 
962 	/* Ignore invalid or unimplemented regions */
963 	if (!region->info.size)
964 		return 0;
965 
966 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX) {
967 		/* Trap and emulate MSI-X table */
968 		if (nr == pdev->msix_table.bar) {
969 			region->guest_phys_addr = pdev->msix_table.guest_phys_addr;
970 			return 0;
971 		} else if (nr == pdev->msix_pba.bar) {
972 			region->guest_phys_addr = pdev->msix_pba.guest_phys_addr;
973 			return 0;
974 		}
975 	}
976 
977 	if (region->is_ioport) {
978 		region->port_base = pci_get_io_port_block(region->info.size);
979 	} else {
980 		/* Grab some MMIO space in the guest */
981 		map_size = ALIGN(region->info.size, PAGE_SIZE);
982 		region->guest_phys_addr = pci_get_mmio_block(map_size);
983 	}
984 
985 	return 0;
986 }
987 
988 static int vfio_pci_configure_dev_regions(struct kvm *kvm,
989 					  struct vfio_device *vdev)
990 {
991 	int ret;
992 	u32 bar;
993 	size_t i;
994 	bool is_64bit = false;
995 	struct vfio_pci_device *pdev = &vdev->pci;
996 
997 	ret = vfio_pci_parse_cfg_space(vdev);
998 	if (ret)
999 		return ret;
1000 
1001 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX) {
1002 		ret = vfio_pci_create_msix_table(kvm, vdev);
1003 		if (ret)
1004 			return ret;
1005 	}
1006 
1007 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSI) {
1008 		ret = vfio_pci_create_msi_cap(kvm, pdev);
1009 		if (ret)
1010 			return ret;
1011 	}
1012 
1013 	for (i = VFIO_PCI_BAR0_REGION_INDEX; i <= VFIO_PCI_BAR5_REGION_INDEX; ++i) {
1014 		/* Ignore top half of 64-bit BAR */
1015 		if (is_64bit) {
1016 			is_64bit = false;
1017 			continue;
1018 		}
1019 
1020 		ret = vfio_pci_configure_bar(kvm, vdev, i);
1021 		if (ret)
1022 			return ret;
1023 
1024 		bar = pdev->hdr.bar[i];
1025 		is_64bit = (bar & PCI_BASE_ADDRESS_SPACE) ==
1026 			   PCI_BASE_ADDRESS_SPACE_MEMORY &&
1027 			   bar & PCI_BASE_ADDRESS_MEM_TYPE_64;
1028 	}
1029 
1030 	/* We've configured the BARs, fake up a Configuration Space */
1031 	ret = vfio_pci_fixup_cfg_space(vdev);
1032 	if (ret)
1033 		return ret;
1034 
1035 	return pci__register_bar_regions(kvm, &pdev->hdr, vfio_pci_bar_activate,
1036 					 vfio_pci_bar_deactivate, vdev);
1037 }
1038 
1039 /*
1040  * Attempt to update the FD limit, if opening an eventfd for each IRQ vector
1041  * would hit the limit. Which is likely to happen when a device uses 2048 MSIs.
1042  */
1043 static int vfio_pci_reserve_irq_fds(size_t num)
1044 {
1045 	/*
1046 	 * I counted around 27 fds under normal load. Let's add 100 for good
1047 	 * measure.
1048 	 */
1049 	static size_t needed = 128;
1050 	struct rlimit fd_limit, new_limit;
1051 
1052 	needed += num;
1053 
1054 	if (getrlimit(RLIMIT_NOFILE, &fd_limit)) {
1055 		perror("getrlimit(RLIMIT_NOFILE)");
1056 		return 0;
1057 	}
1058 
1059 	if (fd_limit.rlim_cur >= needed)
1060 		return 0;
1061 
1062 	new_limit.rlim_cur = needed;
1063 
1064 	if (fd_limit.rlim_max < needed)
1065 		/* Try to bump hard limit (root only) */
1066 		new_limit.rlim_max = needed;
1067 	else
1068 		new_limit.rlim_max = fd_limit.rlim_max;
1069 
1070 	if (setrlimit(RLIMIT_NOFILE, &new_limit)) {
1071 		perror("setrlimit(RLIMIT_NOFILE)");
1072 		pr_warning("not enough FDs for full MSI-X support (estimated need: %zu)",
1073 			   (size_t)(needed - fd_limit.rlim_cur));
1074 	}
1075 
1076 	return 0;
1077 }
1078 
1079 static int vfio_pci_init_msis(struct kvm *kvm, struct vfio_device *vdev,
1080 			     struct vfio_pci_msi_common *msis)
1081 {
1082 	int ret;
1083 	size_t i;
1084 	int *eventfds;
1085 	size_t irq_set_size;
1086 	struct vfio_pci_msi_entry *entry;
1087 	size_t nr_entries = msis->nr_entries;
1088 
1089 	ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &msis->info);
1090 	if (ret || msis->info.count == 0) {
1091 		vfio_dev_err(vdev, "no MSI reported by VFIO");
1092 		return -ENODEV;
1093 	}
1094 
1095 	if (!(msis->info.flags & VFIO_IRQ_INFO_EVENTFD)) {
1096 		vfio_dev_err(vdev, "interrupt not EVENTFD capable");
1097 		return -EINVAL;
1098 	}
1099 
1100 	if (msis->info.count != nr_entries) {
1101 		vfio_dev_err(vdev, "invalid number of MSIs reported by VFIO");
1102 		return -EINVAL;
1103 	}
1104 
1105 	mutex_init(&msis->mutex);
1106 
1107 	vfio_pci_reserve_irq_fds(nr_entries);
1108 
1109 	irq_set_size = sizeof(struct vfio_irq_set) + nr_entries * sizeof(int);
1110 	msis->irq_set = malloc(irq_set_size);
1111 	if (!msis->irq_set)
1112 		return -ENOMEM;
1113 
1114 	*msis->irq_set = (struct vfio_irq_set) {
1115 		.argsz	= irq_set_size,
1116 		.flags 	= VFIO_IRQ_SET_DATA_EVENTFD |
1117 			  VFIO_IRQ_SET_ACTION_TRIGGER,
1118 		.index 	= msis->info.index,
1119 		.start 	= 0,
1120 		.count 	= nr_entries,
1121 	};
1122 
1123 	eventfds = (void *)msis->irq_set + sizeof(struct vfio_irq_set);
1124 
1125 	for (i = 0; i < nr_entries; i++) {
1126 		entry = &msis->entries[i];
1127 		entry->gsi = -1;
1128 		entry->eventfd = -1;
1129 		msi_set_masked(entry->virt_state, true);
1130 		msi_set_masked(entry->phys_state, true);
1131 		eventfds[i] = -1;
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 static void vfio_pci_disable_intx(struct kvm *kvm, struct vfio_device *vdev)
1138 {
1139 	struct vfio_pci_device *pdev = &vdev->pci;
1140 	int gsi = pdev->intx_gsi;
1141 	struct vfio_irq_set irq_set = {
1142 		.argsz	= sizeof(irq_set),
1143 		.flags	= VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
1144 		.index	= VFIO_PCI_INTX_IRQ_INDEX,
1145 	};
1146 
1147 	if (pdev->intx_fd == -1)
1148 		return;
1149 
1150 	pr_debug("user requested MSI, disabling INTx %d", gsi);
1151 
1152 	ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
1153 	irq__del_irqfd(kvm, gsi, pdev->intx_fd);
1154 
1155 	close(pdev->intx_fd);
1156 	close(pdev->unmask_fd);
1157 	pdev->intx_fd = -1;
1158 }
1159 
1160 static int vfio_pci_enable_intx(struct kvm *kvm, struct vfio_device *vdev)
1161 {
1162 	int ret;
1163 	int trigger_fd, unmask_fd;
1164 	union vfio_irq_eventfd	trigger;
1165 	union vfio_irq_eventfd	unmask;
1166 	struct vfio_pci_device *pdev = &vdev->pci;
1167 	int gsi = pdev->intx_gsi;
1168 
1169 	if (pdev->intx_fd != -1)
1170 		return 0;
1171 
1172 	/*
1173 	 * PCI IRQ is level-triggered, so we use two eventfds. trigger_fd
1174 	 * signals an interrupt from host to guest, and unmask_fd signals the
1175 	 * deassertion of the line from guest to host.
1176 	 */
1177 	trigger_fd = eventfd(0, 0);
1178 	if (trigger_fd < 0) {
1179 		vfio_dev_err(vdev, "failed to create trigger eventfd");
1180 		return trigger_fd;
1181 	}
1182 
1183 	unmask_fd = eventfd(0, 0);
1184 	if (unmask_fd < 0) {
1185 		vfio_dev_err(vdev, "failed to create unmask eventfd");
1186 		close(trigger_fd);
1187 		return unmask_fd;
1188 	}
1189 
1190 	ret = irq__add_irqfd(kvm, gsi, trigger_fd, unmask_fd);
1191 	if (ret)
1192 		goto err_close;
1193 
1194 	trigger.irq = (struct vfio_irq_set) {
1195 		.argsz	= sizeof(trigger),
1196 		.flags	= VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER,
1197 		.index	= VFIO_PCI_INTX_IRQ_INDEX,
1198 		.start	= 0,
1199 		.count	= 1,
1200 	};
1201 	set_vfio_irq_eventd_payload(&trigger, trigger_fd);
1202 
1203 	ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &trigger);
1204 	if (ret < 0) {
1205 		vfio_dev_err(vdev, "failed to setup VFIO IRQ");
1206 		goto err_delete_line;
1207 	}
1208 
1209 	unmask.irq = (struct vfio_irq_set) {
1210 		.argsz	= sizeof(unmask),
1211 		.flags	= VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK,
1212 		.index	= VFIO_PCI_INTX_IRQ_INDEX,
1213 		.start	= 0,
1214 		.count	= 1,
1215 	};
1216 	set_vfio_irq_eventd_payload(&unmask, unmask_fd);
1217 
1218 	ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &unmask);
1219 	if (ret < 0) {
1220 		vfio_dev_err(vdev, "failed to setup unmask IRQ");
1221 		goto err_remove_event;
1222 	}
1223 
1224 	pdev->intx_fd = trigger_fd;
1225 	pdev->unmask_fd = unmask_fd;
1226 
1227 	return 0;
1228 
1229 err_remove_event:
1230 	/* Remove trigger event */
1231 	trigger.irq.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
1232 	trigger.irq.count = 0;
1233 	ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &trigger);
1234 
1235 err_delete_line:
1236 	irq__del_irqfd(kvm, gsi, trigger_fd);
1237 
1238 err_close:
1239 	close(trigger_fd);
1240 	close(unmask_fd);
1241 	return ret;
1242 }
1243 
1244 static int vfio_pci_init_intx(struct kvm *kvm, struct vfio_device *vdev)
1245 {
1246 	int ret;
1247 	struct vfio_pci_device *pdev = &vdev->pci;
1248 	struct vfio_irq_info irq_info = {
1249 		.argsz = sizeof(irq_info),
1250 		.index = VFIO_PCI_INTX_IRQ_INDEX,
1251 	};
1252 
1253 	vfio_pci_reserve_irq_fds(2);
1254 
1255 	ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
1256 	if (ret || irq_info.count == 0) {
1257 		vfio_dev_err(vdev, "no INTx reported by VFIO");
1258 		return -ENODEV;
1259 	}
1260 
1261 	if (!(irq_info.flags & VFIO_IRQ_INFO_EVENTFD)) {
1262 		vfio_dev_err(vdev, "interrupt not eventfd capable");
1263 		return -EINVAL;
1264 	}
1265 
1266 	if (!(irq_info.flags & VFIO_IRQ_INFO_AUTOMASKED)) {
1267 		vfio_dev_err(vdev, "INTx interrupt not AUTOMASKED");
1268 		return -EINVAL;
1269 	}
1270 
1271 	/* Guest is going to ovewrite our irq_line... */
1272 	pdev->intx_gsi = pdev->hdr.irq_line - KVM_IRQ_OFFSET;
1273 
1274 	pdev->intx_fd = -1;
1275 
1276 	return 0;
1277 }
1278 
1279 static int vfio_pci_configure_dev_irqs(struct kvm *kvm, struct vfio_device *vdev)
1280 {
1281 	int ret = 0;
1282 	struct vfio_pci_device *pdev = &vdev->pci;
1283 
1284 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSIX) {
1285 		pdev->msix.info = (struct vfio_irq_info) {
1286 			.argsz = sizeof(pdev->msix.info),
1287 			.index = VFIO_PCI_MSIX_IRQ_INDEX,
1288 		};
1289 		ret = vfio_pci_init_msis(kvm, vdev, &pdev->msix);
1290 		if (ret)
1291 			return ret;
1292 	}
1293 
1294 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_MSI) {
1295 		pdev->msi.info = (struct vfio_irq_info) {
1296 			.argsz = sizeof(pdev->msi.info),
1297 			.index = VFIO_PCI_MSI_IRQ_INDEX,
1298 		};
1299 		ret = vfio_pci_init_msis(kvm, vdev, &pdev->msi);
1300 		if (ret)
1301 			return ret;
1302 	}
1303 
1304 	if (pdev->irq_modes & VFIO_PCI_IRQ_MODE_INTX) {
1305 		pci__assign_irq(&vdev->pci.hdr);
1306 
1307 		ret = vfio_pci_init_intx(kvm, vdev);
1308 		if (ret)
1309 			return ret;
1310 
1311 		ret = vfio_pci_enable_intx(kvm, vdev);
1312 	}
1313 
1314 	return ret;
1315 }
1316 
1317 int vfio_pci_setup_device(struct kvm *kvm, struct vfio_device *vdev)
1318 {
1319 	int ret;
1320 
1321 	ret = vfio_pci_configure_dev_regions(kvm, vdev);
1322 	if (ret) {
1323 		vfio_dev_err(vdev, "failed to configure regions");
1324 		return ret;
1325 	}
1326 
1327 	vdev->dev_hdr = (struct device_header) {
1328 		.bus_type	= DEVICE_BUS_PCI,
1329 		.data		= &vdev->pci.hdr,
1330 	};
1331 
1332 	ret = device__register(&vdev->dev_hdr);
1333 	if (ret) {
1334 		vfio_dev_err(vdev, "failed to register VFIO device");
1335 		return ret;
1336 	}
1337 
1338 	ret = vfio_pci_configure_dev_irqs(kvm, vdev);
1339 	if (ret) {
1340 		vfio_dev_err(vdev, "failed to configure IRQs");
1341 		return ret;
1342 	}
1343 
1344 	return 0;
1345 }
1346 
1347 void vfio_pci_teardown_device(struct kvm *kvm, struct vfio_device *vdev)
1348 {
1349 	size_t i;
1350 	struct vfio_pci_device *pdev = &vdev->pci;
1351 
1352 	for (i = 0; i < vdev->info.num_regions; i++)
1353 		vfio_unmap_region(kvm, &vdev->regions[i]);
1354 
1355 	device__unregister(&vdev->dev_hdr);
1356 
1357 	free(pdev->msix.irq_set);
1358 	free(pdev->msix.entries);
1359 	free(pdev->msi.irq_set);
1360 	free(pdev->msi.entries);
1361 }
1362