Lines Matching +full:remove +full:- +full:item

4  * Copyright 2016 - 2018 Red Hat, Inc.
10 * See the COPYING file in the top-level directory.
18 #include "exec/cpu-common.h"
21 #include "qemu/error-report.h"
22 #include "standard-headers/linux/pci_regs.h"
24 #include "qemu/vfio-helpers.h"
65 * --------------- <= 0
67 * |-------------| <= QEMU_VFIO_IOVA_MIN
71 * |-------------| <= low_water_mark
75 * |-------------| <= high_water_mark
79 * |-------------| <= QEMU_VFIO_IOVA_MAX
82 * ---------------
84 * - Addresses lower than QEMU_VFIO_IOVA_MIN are reserved as invalid;
86 * - Fixed mappings of HVAs are assigned "low" IOVAs in the range of
88 * reclaimed - low_water_mark never shrinks;
90 * - IOVAs in range [low_water_mark, high_water_mark) are free;
92 * - IOVAs in range [high_water_mark, QEMU_VFIO_IOVA_MAX) are volatile
119 gerr->message); in sysfs_find_group_file()
137 assert(index >= 0 && index < ARRAY_SIZE(s->bar_region_info)); in assert_bar_index_valid()
144 s->bar_region_info[index] = (struct vfio_region_info) { in qemu_vfio_pci_init_bar()
148 if (ioctl(s->device, VFIO_DEVICE_GET_REGION_INFO, &s->bar_region_info[index])) { in qemu_vfio_pci_init_bar()
150 return -errno; in qemu_vfio_pci_init_bar()
153 trace_qemu_vfio_region_info(barname, s->bar_region_info[index].offset, in qemu_vfio_pci_init_bar()
154 s->bar_region_info[index].size, in qemu_vfio_pci_init_bar()
155 s->bar_region_info[index].cap_offset); in qemu_vfio_pci_init_bar()
170 p = mmap(NULL, MIN(size, s->bar_region_info[index].size - offset), in qemu_vfio_pci_map_bar()
172 s->device, s->bar_region_info[index].offset + offset); in qemu_vfio_pci_map_bar()
173 trace_qemu_vfio_pci_map_bar(index, s->bar_region_info[index].offset , in qemu_vfio_pci_map_bar()
189 munmap(bar, MIN(size, s->bar_region_info[index].size - offset)); in qemu_vfio_pci_unmap_bar()
205 if (ioctl(s->device, VFIO_DEVICE_GET_IRQ_INFO, &irq_info)) { in qemu_vfio_pci_init_irq()
207 return -errno; in qemu_vfio_pci_init_irq()
211 return -EINVAL; in qemu_vfio_pci_init_irq()
226 *(int *)&irq_set->data = event_notifier_get_fd(e); in qemu_vfio_pci_init_irq()
227 r = ioctl(s->device, VFIO_DEVICE_SET_IRQS, irq_set); in qemu_vfio_pci_init_irq()
231 return -errno; in qemu_vfio_pci_init_irq()
242 s->config_region_info.offset, in qemu_vfio_pci_read_config()
243 s->config_region_info.size); in qemu_vfio_pci_read_config()
244 assert(QEMU_IS_ALIGNED(s->config_region_info.offset + ofs, size)); in qemu_vfio_pci_read_config()
246 pread(s->device, buf, size, s->config_region_info.offset + ofs) in qemu_vfio_pci_read_config()
248 return ret == size ? 0 : -errno; in qemu_vfio_pci_read_config()
256 s->config_region_info.offset, in qemu_vfio_pci_write_config()
257 s->config_region_info.size); in qemu_vfio_pci_write_config()
258 assert(QEMU_IS_ALIGNED(s->config_region_info.offset + ofs, size)); in qemu_vfio_pci_write_config()
260 pwrite(s->device, buf, size, s->config_region_info.offset + ofs) in qemu_vfio_pci_write_config()
262 return ret == size ? 0 : -errno; in qemu_vfio_pci_write_config()
268 struct vfio_info_cap_header *cap = (void *)buf + info->cap_offset; in collect_usable_iova_ranges()
272 while (cap->id != VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE) { in collect_usable_iova_ranges()
273 if (!cap->next) { in collect_usable_iova_ranges()
276 cap = buf + cap->next; in collect_usable_iova_ranges()
281 s->nb_iova_ranges = cap_iova_range->nr_iovas; in collect_usable_iova_ranges()
282 if (s->nb_iova_ranges > 1) { in collect_usable_iova_ranges()
283 s->usable_iova_ranges = in collect_usable_iova_ranges()
284 g_renew(struct IOVARange, s->usable_iova_ranges, in collect_usable_iova_ranges()
285 s->nb_iova_ranges); in collect_usable_iova_ranges()
288 for (i = 0; i < s->nb_iova_ranges; i++) { in collect_usable_iova_ranges()
289 s->usable_iova_ranges[i].start = cap_iova_range->iova_ranges[i].start; in collect_usable_iova_ranges()
290 s->usable_iova_ranges[i].end = cap_iova_range->iova_ranges[i].end; in collect_usable_iova_ranges()
306 s->usable_iova_ranges = NULL; in qemu_vfio_init_pci()
309 s->container = open("/dev/vfio/vfio", O_RDWR); in qemu_vfio_init_pci()
311 if (s->container == -1) { in qemu_vfio_init_pci()
313 return -errno; in qemu_vfio_init_pci()
315 if (ioctl(s->container, VFIO_GET_API_VERSION) != VFIO_API_VERSION) { in qemu_vfio_init_pci()
317 ret = -EINVAL; in qemu_vfio_init_pci()
321 if (!ioctl(s->container, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) { in qemu_vfio_init_pci()
323 ret = -EINVAL; in qemu_vfio_init_pci()
330 ret = -EINVAL; in qemu_vfio_init_pci()
334 s->group = open(group_file, O_RDWR); in qemu_vfio_init_pci()
335 if (s->group == -1) { in qemu_vfio_init_pci()
339 ret = -errno; in qemu_vfio_init_pci()
345 if (ioctl(s->group, VFIO_GROUP_GET_STATUS, &group_status)) { in qemu_vfio_init_pci()
347 ret = -errno; in qemu_vfio_init_pci()
353 ret = -EINVAL; in qemu_vfio_init_pci()
358 if (ioctl(s->group, VFIO_GROUP_SET_CONTAINER, &s->container)) { in qemu_vfio_init_pci()
360 ret = -errno; in qemu_vfio_init_pci()
365 if (ioctl(s->container, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU)) { in qemu_vfio_init_pci()
367 ret = -errno; in qemu_vfio_init_pci()
372 iommu_info->argsz = iommu_info_size; in qemu_vfio_init_pci()
375 if (ioctl(s->container, VFIO_IOMMU_GET_INFO, iommu_info)) { in qemu_vfio_init_pci()
377 ret = -errno; in qemu_vfio_init_pci()
383 * the legacy [QEMU_VFIO_IOVA_MIN, QEMU_VFIO_IOVA_MAX -1] region in qemu_vfio_init_pci()
385 s->nb_iova_ranges = 1; in qemu_vfio_init_pci()
386 s->usable_iova_ranges = g_new0(struct IOVARange, 1); in qemu_vfio_init_pci()
387 s->usable_iova_ranges[0].start = QEMU_VFIO_IOVA_MIN; in qemu_vfio_init_pci()
388 s->usable_iova_ranges[0].end = QEMU_VFIO_IOVA_MAX - 1; in qemu_vfio_init_pci()
390 if (iommu_info->argsz > iommu_info_size) { in qemu_vfio_init_pci()
391 iommu_info_size = iommu_info->argsz; in qemu_vfio_init_pci()
393 if (ioctl(s->container, VFIO_IOMMU_GET_INFO, iommu_info)) { in qemu_vfio_init_pci()
394 ret = -errno; in qemu_vfio_init_pci()
400 s->device = ioctl(s->group, VFIO_GROUP_GET_DEVICE_FD, device); in qemu_vfio_init_pci()
402 if (s->device < 0) { in qemu_vfio_init_pci()
404 ret = -errno; in qemu_vfio_init_pci()
409 if (ioctl(s->device, VFIO_DEVICE_GET_INFO, &device_info)) { in qemu_vfio_init_pci()
411 ret = -errno; in qemu_vfio_init_pci()
417 ret = -EINVAL; in qemu_vfio_init_pci()
421 s->config_region_info = (struct vfio_region_info) { in qemu_vfio_init_pci()
425 if (ioctl(s->device, VFIO_DEVICE_GET_REGION_INFO, &s->config_region_info)) { in qemu_vfio_init_pci()
427 ret = -errno; in qemu_vfio_init_pci()
430 trace_qemu_vfio_region_info("config", s->config_region_info.offset, in qemu_vfio_init_pci()
431 s->config_region_info.size, in qemu_vfio_init_pci()
432 s->config_region_info.cap_offset); in qemu_vfio_init_pci()
434 for (i = 0; i < ARRAY_SIZE(s->bar_region_info); i++) { in qemu_vfio_init_pci()
454 g_free(s->usable_iova_ranges); in qemu_vfio_init_pci()
455 s->usable_iova_ranges = NULL; in qemu_vfio_init_pci()
456 s->nb_iova_ranges = 0; in qemu_vfio_init_pci()
458 close(s->group); in qemu_vfio_init_pci()
460 close(s->container); in qemu_vfio_init_pci()
492 qemu_mutex_init(&s->lock); in qemu_vfio_open_common()
493 s->ram_notifier.ram_block_added = qemu_vfio_ram_block_added; in qemu_vfio_open_common()
494 s->ram_notifier.ram_block_removed = qemu_vfio_ram_block_removed; in qemu_vfio_open_common()
495 s->low_water_mark = QEMU_VFIO_IOVA_MIN; in qemu_vfio_open_common()
496 s->high_water_mark = QEMU_VFIO_IOVA_MAX; in qemu_vfio_open_common()
497 ram_block_notifier_add(&s->ram_notifier); in qemu_vfio_open_common()
514 error_setg_errno(errp, -r, "Cannot set discarding of RAM broken"); in qemu_vfio_open_pci()
531 for (int i = 0; i < s->nr_mappings; ++i) { in qemu_vfio_dump_mappings()
532 trace_qemu_vfio_dump_mapping(s->mappings[i].host, in qemu_vfio_dump_mappings()
533 s->mappings[i].iova, in qemu_vfio_dump_mappings()
534 s->mappings[i].size); in qemu_vfio_dump_mappings()
542 * is smaller than @host, or -1 if no entry is.
547 IOVAMapping *p = s->mappings; in qemu_vfio_find_mapping()
548 IOVAMapping *q = p ? p + s->nr_mappings - 1 : NULL; in qemu_vfio_find_mapping()
552 *index = -1; in qemu_vfio_find_mapping()
556 mid = p + (q - p) / 2; in qemu_vfio_find_mapping()
560 if (mid->host > host) { in qemu_vfio_find_mapping()
562 } else if (mid->host < host) { in qemu_vfio_find_mapping()
568 if (mid->host > host) { in qemu_vfio_find_mapping()
569 mid--; in qemu_vfio_find_mapping()
570 } else if (mid < &s->mappings[s->nr_mappings - 1] in qemu_vfio_find_mapping()
571 && (mid + 1)->host <= host) { in qemu_vfio_find_mapping()
574 *index = mid - &s->mappings[0]; in qemu_vfio_find_mapping()
575 if (mid >= &s->mappings[0] && in qemu_vfio_find_mapping()
576 mid->host <= host && mid->host + mid->size > host) { in qemu_vfio_find_mapping()
577 assert(mid < &s->mappings[s->nr_mappings]); in qemu_vfio_find_mapping()
597 assert(QEMU_IS_ALIGNED(s->low_water_mark, qemu_real_host_page_size())); in qemu_vfio_add_mapping()
598 assert(QEMU_IS_ALIGNED(s->high_water_mark, qemu_real_host_page_size())); in qemu_vfio_add_mapping()
602 s->nr_mappings++; in qemu_vfio_add_mapping()
603 s->mappings = g_renew(IOVAMapping, s->mappings, s->nr_mappings); in qemu_vfio_add_mapping()
604 insert = &s->mappings[index]; in qemu_vfio_add_mapping()
605 shift = s->nr_mappings - index - 1; in qemu_vfio_add_mapping()
607 memmove(insert + 1, insert, shift * sizeof(s->mappings[0])); in qemu_vfio_add_mapping()
626 if (ioctl(s->container, VFIO_IOMMU_MAP_DMA, &dma_map)) { in qemu_vfio_do_mapping()
628 return -errno; in qemu_vfio_do_mapping()
634 * Undo the DMA mapping from @s with VFIO, and remove from mapping list.
643 .iova = mapping->iova, in qemu_vfio_undo_mapping()
644 .size = mapping->size, in qemu_vfio_undo_mapping()
647 index = mapping - s->mappings; in qemu_vfio_undo_mapping()
648 assert(mapping->size > 0); in qemu_vfio_undo_mapping()
649 assert(QEMU_IS_ALIGNED(mapping->size, qemu_real_host_page_size())); in qemu_vfio_undo_mapping()
650 assert(index >= 0 && index < s->nr_mappings); in qemu_vfio_undo_mapping()
651 if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) { in qemu_vfio_undo_mapping()
654 memmove(mapping, &s->mappings[index + 1], in qemu_vfio_undo_mapping()
655 sizeof(s->mappings[0]) * (s->nr_mappings - index - 1)); in qemu_vfio_undo_mapping()
656 s->nr_mappings--; in qemu_vfio_undo_mapping()
657 s->mappings = g_renew(IOVAMapping, s->mappings, s->nr_mappings); in qemu_vfio_undo_mapping()
665 for (i = 0; i < s->nr_mappings - 1; ++i) { in qemu_vfio_verify_mappings()
666 if (!(s->mappings[i].host < s->mappings[i + 1].host)) { in qemu_vfio_verify_mappings()
667 error_report("item %d not sorted!", i); in qemu_vfio_verify_mappings()
671 if (!(s->mappings[i].host + s->mappings[i].size <= in qemu_vfio_verify_mappings()
672 s->mappings[i + 1].host)) { in qemu_vfio_verify_mappings()
673 error_report("item %d overlap with next!", i); in qemu_vfio_verify_mappings()
687 for (i = 0; i < s->nb_iova_ranges; i++) { in qemu_vfio_find_fixed_iova()
688 if (s->usable_iova_ranges[i].end < s->low_water_mark) { in qemu_vfio_find_fixed_iova()
691 s->low_water_mark = in qemu_vfio_find_fixed_iova()
692 MAX(s->low_water_mark, s->usable_iova_ranges[i].start); in qemu_vfio_find_fixed_iova()
694 if (s->usable_iova_ranges[i].end - s->low_water_mark + 1 >= size || in qemu_vfio_find_fixed_iova()
695 s->usable_iova_ranges[i].end - s->low_water_mark + 1 == 0) { in qemu_vfio_find_fixed_iova()
696 *iova = s->low_water_mark; in qemu_vfio_find_fixed_iova()
697 s->low_water_mark += size; in qemu_vfio_find_fixed_iova()
711 for (i = s->nb_iova_ranges - 1; i >= 0; i--) { in qemu_vfio_find_temp_iova()
712 if (s->usable_iova_ranges[i].start > s->high_water_mark) { in qemu_vfio_find_temp_iova()
715 s->high_water_mark = in qemu_vfio_find_temp_iova()
716 MIN(s->high_water_mark, s->usable_iova_ranges[i].end + 1); in qemu_vfio_find_temp_iova()
718 if (s->high_water_mark - s->usable_iova_ranges[i].start + 1 >= size || in qemu_vfio_find_temp_iova()
719 s->high_water_mark - s->usable_iova_ranges[i].start + 1 == 0) { in qemu_vfio_find_temp_iova()
720 *iova = s->high_water_mark - size; in qemu_vfio_find_temp_iova()
721 s->high_water_mark = *iova; in qemu_vfio_find_temp_iova()
738 if (s->high_water_mark - s->low_water_mark + 1 < size) { in qemu_vfio_water_mark_reached()
760 QEMU_LOCK_GUARD(&s->lock); in qemu_vfio_dma_map()
763 iova0 = mapping->iova + ((uint8_t *)host - (uint8_t *)mapping->host); in qemu_vfio_dma_map()
768 return -ENOMEM; in qemu_vfio_dma_map()
772 return -ENOMEM; in qemu_vfio_dma_map()
785 return -ENOMEM; in qemu_vfio_dma_map()
806 .iova = s->high_water_mark, in qemu_vfio_dma_reset_temporary()
807 .size = QEMU_VFIO_IOVA_MAX - s->high_water_mark, in qemu_vfio_dma_reset_temporary()
810 QEMU_LOCK_GUARD(&s->lock); in qemu_vfio_dma_reset_temporary()
811 if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) { in qemu_vfio_dma_reset_temporary()
813 return -errno; in qemu_vfio_dma_reset_temporary()
815 s->high_water_mark = QEMU_VFIO_IOVA_MAX; in qemu_vfio_dma_reset_temporary()
831 QEMU_LOCK_GUARD(&s->lock); in qemu_vfio_dma_unmap()
841 ioctl(s->device, VFIO_DEVICE_RESET); in qemu_vfio_reset()
853 ram_block_notifier_remove(&s->ram_notifier); in qemu_vfio_close()
855 for (i = 0; i < s->nr_mappings; ++i) { in qemu_vfio_close()
856 qemu_vfio_undo_mapping(s, &s->mappings[i], NULL); in qemu_vfio_close()
859 g_free(s->usable_iova_ranges); in qemu_vfio_close()
860 s->nb_iova_ranges = 0; in qemu_vfio_close()
862 close(s->device); in qemu_vfio_close()
863 close(s->group); in qemu_vfio_close()
864 close(s->container); in qemu_vfio_close()