Lines Matching +full:iommu +full:- +full:map +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
15 #include <linux/dma-direct.h>
16 #include <linux/dma-map-ops.h>
19 #include <linux/iommu.h>
20 #include <linux/iommu-dma.h>
36 #include "dma-iommu.h"
37 #include "iommu-pages.h"
74 /* Options for dma-iommu use */
94 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
112 /* Per-CPU flush queue structure */
121 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
125 assert_spin_locked(&fq->lock); in fq_full()
126 return (((fq->tail + 1) & fq->mod_mask) == fq->head); in fq_full()
131 unsigned int idx = fq->tail; in fq_ring_add()
133 assert_spin_locked(&fq->lock); in fq_ring_add()
135 fq->tail = (idx + 1) & fq->mod_mask; in fq_ring_add()
142 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); in fq_ring_free_locked()
145 assert_spin_locked(&fq->lock); in fq_ring_free_locked()
149 if (fq->entries[idx].counter >= counter) in fq_ring_free_locked()
152 iommu_put_pages_list(&fq->entries[idx].freelist); in fq_ring_free_locked()
153 free_iova_fast(&cookie->iovad, in fq_ring_free_locked()
154 fq->entries[idx].iova_pfn, in fq_ring_free_locked()
155 fq->entries[idx].pages); in fq_ring_free_locked()
157 fq->head = (fq->head + 1) & fq->mod_mask; in fq_ring_free_locked()
165 spin_lock_irqsave(&fq->lock, flags); in fq_ring_free()
167 spin_unlock_irqrestore(&fq->lock, flags); in fq_ring_free()
172 atomic64_inc(&cookie->fq_flush_start_cnt); in fq_flush_iotlb()
173 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); in fq_flush_iotlb()
174 atomic64_inc(&cookie->fq_flush_finish_cnt); in fq_flush_iotlb()
182 atomic_set(&cookie->fq_timer_on, 0); in fq_flush_timeout()
185 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) { in fq_flush_timeout()
186 fq_ring_free(cookie, cookie->single_fq); in fq_flush_timeout()
189 fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu)); in fq_flush_timeout()
202 * Order against the IOMMU driver's pagetable update from unmapping in queue_iova()
210 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in queue_iova()
211 fq = cookie->single_fq; in queue_iova()
213 fq = raw_cpu_ptr(cookie->percpu_fq); in queue_iova()
215 spin_lock_irqsave(&fq->lock, flags); in queue_iova()
231 fq->entries[idx].iova_pfn = pfn; in queue_iova()
232 fq->entries[idx].pages = pages; in queue_iova()
233 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); in queue_iova()
234 list_splice(freelist, &fq->entries[idx].freelist); in queue_iova()
236 spin_unlock_irqrestore(&fq->lock, flags); in queue_iova()
239 if (!atomic_read(&cookie->fq_timer_on) && in queue_iova()
240 !atomic_xchg(&cookie->fq_timer_on, 1)) in queue_iova()
241 mod_timer(&cookie->fq_timer, in queue_iova()
242 jiffies + msecs_to_jiffies(cookie->options.fq_timeout)); in queue_iova()
250 iommu_put_pages_list(&fq->entries[idx].freelist); in iommu_dma_free_fq_single()
263 iommu_put_pages_list(&fq->entries[idx].freelist); in iommu_dma_free_fq_percpu()
271 if (!cookie->fq_domain) in iommu_dma_free_fq()
274 timer_delete_sync(&cookie->fq_timer); in iommu_dma_free_fq()
275 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in iommu_dma_free_fq()
276 iommu_dma_free_fq_single(cookie->single_fq); in iommu_dma_free_fq()
278 iommu_dma_free_fq_percpu(cookie->percpu_fq); in iommu_dma_free_fq()
285 fq->head = 0; in iommu_dma_init_one_fq()
286 fq->tail = 0; in iommu_dma_init_one_fq()
287 fq->mod_mask = fq_size - 1; in iommu_dma_init_one_fq()
289 spin_lock_init(&fq->lock); in iommu_dma_init_one_fq()
292 INIT_LIST_HEAD(&fq->entries[i].freelist); in iommu_dma_init_one_fq()
297 size_t fq_size = cookie->options.fq_size; in iommu_dma_init_fq_single()
302 return -ENOMEM; in iommu_dma_init_fq_single()
304 cookie->single_fq = queue; in iommu_dma_init_fq_single()
311 size_t fq_size = cookie->options.fq_size; in iommu_dma_init_fq_percpu()
318 return -ENOMEM; in iommu_dma_init_fq_percpu()
322 cookie->percpu_fq = queue; in iommu_dma_init_fq_percpu()
329 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_fq()
332 if (cookie->fq_domain) in iommu_dma_init_fq()
335 atomic64_set(&cookie->fq_flush_start_cnt, 0); in iommu_dma_init_fq()
336 atomic64_set(&cookie->fq_flush_finish_cnt, 0); in iommu_dma_init_fq()
338 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in iommu_dma_init_fq()
345 return -ENOMEM; in iommu_dma_init_fq()
348 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); in iommu_dma_init_fq()
349 atomic_set(&cookie->fq_timer_on, 0); in iommu_dma_init_fq()
355 WRITE_ONCE(cookie->fq_domain, domain); in iommu_dma_init_fq()
360 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
361 * @domain: IOMMU domain to prepare for DMA-API usage
367 if (domain->cookie_type != IOMMU_COOKIE_NONE) in iommu_get_dma_cookie()
368 return -EEXIST; in iommu_get_dma_cookie()
372 return -ENOMEM; in iommu_get_dma_cookie()
374 INIT_LIST_HEAD(&cookie->msi_page_list); in iommu_get_dma_cookie()
375 domain->cookie_type = IOMMU_COOKIE_DMA_IOVA; in iommu_get_dma_cookie()
376 domain->iova_cookie = cookie; in iommu_get_dma_cookie()
381 * iommu_get_msi_cookie - Acquire just MSI remapping resources
382 * @domain: IOMMU domain to prepare
396 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_get_msi_cookie()
397 return -EINVAL; in iommu_get_msi_cookie()
399 if (domain->cookie_type != IOMMU_COOKIE_NONE) in iommu_get_msi_cookie()
400 return -EEXIST; in iommu_get_msi_cookie()
404 return -ENOMEM; in iommu_get_msi_cookie()
406 cookie->msi_iova = base; in iommu_get_msi_cookie()
407 INIT_LIST_HEAD(&cookie->msi_page_list); in iommu_get_msi_cookie()
408 domain->cookie_type = IOMMU_COOKIE_DMA_MSI; in iommu_get_msi_cookie()
409 domain->msi_cookie = cookie; in iommu_get_msi_cookie()
415 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
416 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
420 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_put_dma_cookie()
423 if (cookie->iovad.granule) { in iommu_put_dma_cookie()
425 put_iova_domain(&cookie->iovad); in iommu_put_dma_cookie()
427 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) in iommu_put_dma_cookie()
433 * iommu_put_msi_cookie - Release a domain's MSI mapping resources
434 * @domain: IOMMU domain previously prepared by iommu_get_msi_cookie()
438 struct iommu_dma_msi_cookie *cookie = domain->msi_cookie; in iommu_put_msi_cookie()
441 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) in iommu_put_msi_cookie()
447 * iommu_dma_get_resv_regions - Reserved region driver helper
451 * IOMMU drivers can use this to implement their .get_resv_regions callback
452 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
459 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) in iommu_dma_get_resv_regions()
462 if (dev->of_node) in iommu_dma_get_resv_regions()
470 struct iova_domain *iovad = &cookie->iovad; in cookie_init_hw_msi_region()
474 start -= iova_offset(iovad, start); in cookie_init_hw_msi_region()
475 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); in cookie_init_hw_msi_region()
480 return -ENOMEM; in cookie_init_hw_msi_region()
482 msi_page->phys = start; in cookie_init_hw_msi_region()
483 msi_page->iova = start; in cookie_init_hw_msi_region()
484 INIT_LIST_HEAD(&msi_page->list); in cookie_init_hw_msi_region()
485 list_add(&msi_page->list, &cookie->msi_page_list); in cookie_init_hw_msi_region()
486 start += iovad->granule; in cookie_init_hw_msi_region()
498 return res_a->res->start > res_b->res->start; in iommu_dma_ranges_sort()
504 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); in iova_reserve_pci_windows()
509 resource_list_for_each_entry(window, &bridge->windows) { in iova_reserve_pci_windows()
510 if (resource_type(window->res) != IORESOURCE_MEM) in iova_reserve_pci_windows()
513 lo = iova_pfn(iovad, window->res->start - window->offset); in iova_reserve_pci_windows()
514 hi = iova_pfn(iovad, window->res->end - window->offset); in iova_reserve_pci_windows()
519 list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); in iova_reserve_pci_windows()
520 resource_list_for_each_entry(window, &bridge->dma_ranges) { in iova_reserve_pci_windows()
521 end = window->res->start - window->offset; in iova_reserve_pci_windows()
528 /* DMA ranges should be non-overlapping */ in iova_reserve_pci_windows()
529 dev_err(&dev->dev, in iova_reserve_pci_windows()
530 "Failed to reserve IOVA [%pa-%pa]\n", in iova_reserve_pci_windows()
532 return -EINVAL; in iova_reserve_pci_windows()
535 start = window->res->end - window->offset + 1; in iova_reserve_pci_windows()
537 if (window->node.next == &bridge->dma_ranges && in iova_reserve_pci_windows()
550 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iova_reserve_iommu_regions()
551 struct iova_domain *iovad = &cookie->iovad; in iova_reserve_iommu_regions()
567 if (region->type == IOMMU_RESV_SW_MSI) in iova_reserve_iommu_regions()
570 lo = iova_pfn(iovad, region->start); in iova_reserve_iommu_regions()
571 hi = iova_pfn(iovad, region->start + region->length - 1); in iova_reserve_iommu_regions()
574 if (region->type == IOMMU_RESV_MSI) in iova_reserve_iommu_regions()
575 ret = cookie_init_hw_msi_region(cookie, region->start, in iova_reserve_iommu_regions()
576 region->start + region->length); in iova_reserve_iommu_regions()
587 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; in dev_is_untrusted()
611 * If kmalloc() buffers are not DMA-safe for this device and in dev_use_sg_swiotlb()
617 if (!dma_kmalloc_size_aligned(s->length)) in dev_use_sg_swiotlb()
625 * iommu_dma_init_options - Initialize dma-iommu options
629 * This allows tuning dma-iommu specific to device properties
635 if (dev->iommu->shadow_on_flush) { in iommu_dma_init_options()
636 options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE; in iommu_dma_init_options()
637 options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT; in iommu_dma_init_options()
638 options->fq_size = IOVA_SINGLE_FQ_SIZE; in iommu_dma_init_options()
640 options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE; in iommu_dma_init_options()
641 options->fq_size = IOVA_DEFAULT_FQ_SIZE; in iommu_dma_init_options()
642 options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT; in iommu_dma_init_options()
647 * iommu_dma_init_domain - Initialise a DMA mapping domain
648 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
657 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_domain()
658 const struct bus_dma_region *map = dev->dma_range_map; in iommu_dma_init_domain() local
663 if (!cookie || domain->cookie_type != IOMMU_COOKIE_DMA_IOVA) in iommu_dma_init_domain()
664 return -EINVAL; in iommu_dma_init_domain()
666 iovad = &cookie->iovad; in iommu_dma_init_domain()
669 order = __ffs(domain->pgsize_bitmap); in iommu_dma_init_domain()
673 if (map) { in iommu_dma_init_domain()
674 if (dma_range_map_min(map) > domain->geometry.aperture_end || in iommu_dma_init_domain()
675 dma_range_map_max(map) < domain->geometry.aperture_start) { in iommu_dma_init_domain()
676 pr_warn("specified DMA range outside IOMMU capability\n"); in iommu_dma_init_domain()
677 return -EFAULT; in iommu_dma_init_domain()
682 domain->geometry.aperture_start >> order); in iommu_dma_init_domain()
684 /* start_pfn is always nonzero for an already-initialised domain */ in iommu_dma_init_domain()
685 if (iovad->start_pfn) { in iommu_dma_init_domain()
686 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
687 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
689 return -EFAULT; in iommu_dma_init_domain()
700 iommu_dma_init_options(&cookie->options, dev); in iommu_dma_init_domain()
703 if (domain->type == IOMMU_DOMAIN_DMA_FQ && in iommu_dma_init_domain()
705 domain->type = IOMMU_DOMAIN_DMA; in iommu_dma_init_domain()
711 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
714 * @coherent: Is the DMA master cache-coherent?
717 * Return: corresponding IOMMU API page protection flags
742 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_iova()
743 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_iova()
746 if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI) { in iommu_dma_alloc_iova()
747 domain->msi_cookie->msi_iova += size; in iommu_dma_alloc_iova()
748 return domain->msi_cookie->msi_iova - size; in iommu_dma_alloc_iova()
754 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); in iommu_dma_alloc_iova()
756 if (domain->geometry.force_aperture) in iommu_dma_alloc_iova()
757 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); in iommu_dma_alloc_iova()
760 * Try to use all the 32-bit PCI addresses first. The original SAC vs. in iommu_dma_alloc_iova()
763 * venture into the 64-bit space until necessary. in iommu_dma_alloc_iova()
767 * some inherent bug in handling >32-bit addresses, or not all the in iommu_dma_alloc_iova()
768 * expected address bits are wired up between the device and the IOMMU. in iommu_dma_alloc_iova()
770 if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) { in iommu_dma_alloc_iova()
776 dev->iommu->pci_32bit_workaround = false; in iommu_dma_alloc_iova()
777 dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit)); in iommu_dma_alloc_iova()
788 struct iova_domain *iovad = &domain->iova_cookie->iovad; in iommu_dma_free_iova()
791 if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI) in iommu_dma_free_iova()
792 domain->msi_cookie->msi_iova -= size; in iommu_dma_free_iova()
793 else if (gather && gather->queued) in iommu_dma_free_iova()
794 queue_iova(domain->iova_cookie, iova_pfn(iovad, iova), in iommu_dma_free_iova()
796 &gather->freelist); in iommu_dma_free_iova()
806 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_unmap()
807 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_unmap()
812 dma_addr -= iova_off; in __iommu_dma_unmap()
815 iotlb_gather.queued = READ_ONCE(cookie->fq_domain); in __iommu_dma_unmap()
829 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_map()
830 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_map()
849 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) { in __iommu_dma_map()
858 while (count--) in __iommu_dma_free_pages()
877 /* IOMMU can map any pages, so himem can also be used here */ in __iommu_dma_alloc_pages()
885 * Higher-order allocations are a convenience rather in __iommu_dma_alloc_pages()
887 * falling back to minimum-order allocations. in __iommu_dma_alloc_pages()
908 count -= order_size; in __iommu_dma_alloc_pages()
909 while (order_size--) in __iommu_dma_alloc_pages()
917 * but an IOMMU which supports smaller pages might not map the whole thing.
923 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_alloc_noncontiguous()
924 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_alloc_noncontiguous()
927 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; in __iommu_dma_alloc_noncontiguous()
936 min_size = alloc_sizes & -alloc_sizes; in __iommu_dma_alloc_noncontiguous()
953 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); in __iommu_dma_alloc_noncontiguous()
958 * Remove the zone/policy flags from the GFP - these are applied to the in __iommu_dma_alloc_noncontiguous()
971 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) in __iommu_dma_alloc_noncontiguous()
972 arch_dma_prep_coherent(sg_page(sg), sg->length); in __iommu_dma_alloc_noncontiguous()
975 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot, in __iommu_dma_alloc_noncontiguous()
980 sgt->sgl->dma_address = iova; in __iommu_dma_alloc_noncontiguous()
981 sgt->sgl->dma_length = size; in __iommu_dma_alloc_noncontiguous()
1004 *dma_handle = sgt.sgl->dma_address; in iommu_dma_alloc_remap()
1022 * the DMA-API internal vmaping and freeing easier we stash away the page
1024 * e.g. when a vmap-variant that takes a scatterlist comes along.
1042 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, attrs); in iommu_dma_alloc_noncontiguous()
1043 if (!sh->pages) { in iommu_dma_alloc_noncontiguous()
1047 return &sh->sgt; in iommu_dma_alloc_noncontiguous()
1055 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); in iommu_dma_free_noncontiguous()
1056 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); in iommu_dma_free_noncontiguous()
1057 sg_free_table(&sh->sgt); in iommu_dma_free_noncontiguous()
1066 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); in iommu_dma_vmap_noncontiguous()
1074 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in iommu_dma_mmap_noncontiguous()
1075 return -ENXIO; in iommu_dma_mmap_noncontiguous()
1076 return vm_map_pages(vma, sgt_handle(sgt)->pages, count); in iommu_dma_mmap_noncontiguous()
1118 sg->length, dir); in iommu_dma_sync_sg_for_cpu()
1121 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_cpu()
1134 sg->length, dir); in iommu_dma_sync_sg_for_device()
1137 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_device()
1148 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_page()
1149 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_page()
1159 dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); in iommu_dma_map_page()
1173 * leftover kernel data, so zero the pre- and post-padding. in iommu_dma_map_page()
1180 /* Pre-padding */ in iommu_dma_map_page()
1182 memset((void *)start, 0, virt - start); in iommu_dma_map_page()
1184 /* Post-padding */ in iommu_dma_map_page()
1187 iova_align(iovad, start) - start); in iommu_dma_map_page()
1219 * Prepare a successfully-mapped scatterlist to give back to the caller.
1238 unsigned int s_iova_len = s->length; in __finalise_sg()
1256 s->offset += s_iova_off; in __finalise_sg()
1257 s->length = s_length; in __finalise_sg()
1261 * - there is a valid output segment to append to in __finalise_sg()
1262 * - and this segment starts on an IOVA page boundary in __finalise_sg()
1263 * - but doesn't fall at a segment boundary in __finalise_sg()
1264 * - and wouldn't make the resulting output segment too long in __finalise_sg()
1267 (max_len - cur_len >= s_length)) { in __finalise_sg()
1303 s->offset += sg_dma_address(s); in __invalidate_sg()
1305 s->length = sg_dma_len(s); in __invalidate_sg()
1333 s->offset, s->length, dir, attrs); in iommu_dma_map_sg_swiotlb()
1336 sg_dma_len(s) = s->length; in iommu_dma_map_sg_swiotlb()
1343 return -EIO; in iommu_dma_map_sg_swiotlb()
1348 * any old buffer layout, but the IOMMU API requires everything to be
1349 * aligned to IOMMU pages. Hence the need for this complicated bit of
1350 * impedance-matching, to be able to hand off a suitably-aligned list,
1357 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_sg()
1358 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_sg()
1362 enum pci_p2pdma_map_type map; in iommu_dma_map_sg() local
1365 unsigned long mask = dma_get_seg_boundary(dev); in iommu_dma_map_sg() local
1383 * IOVA granules for the IOMMU driver to handle. With some clever in iommu_dma_map_sg()
1384 * trickery we can modify the list in-place, but reversibly, by in iommu_dma_map_sg()
1385 * stashing the unaligned parts in the as-yet-unused DMA fields. in iommu_dma_map_sg()
1388 size_t s_iova_off = iova_offset(iovad, s->offset); in iommu_dma_map_sg()
1389 size_t s_length = s->length; in iommu_dma_map_sg()
1390 size_t pad_len = (mask - iova_len + 1) & mask; in iommu_dma_map_sg()
1393 map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); in iommu_dma_map_sg()
1394 switch (map) { in iommu_dma_map_sg()
1411 ret = -EREMOTEIO; in iommu_dma_map_sg()
1418 s->offset -= s_iova_off; in iommu_dma_map_sg()
1420 s->length = s_length; in iommu_dma_map_sg()
1424 * depend on these assumptions about the segment boundary mask: in iommu_dma_map_sg()
1425 * - If mask size >= IOVA size, then the IOVA range cannot in iommu_dma_map_sg()
1427 * - If mask size < IOVA size, then the IOVA range must start in iommu_dma_map_sg()
1431 * - The mask must be a power of 2, so pad_len == 0 if in iommu_dma_map_sg()
1435 if (pad_len && pad_len < s_length - 1) { in iommu_dma_map_sg()
1436 prev->length += pad_len; in iommu_dma_map_sg()
1449 ret = -ENOMEM; in iommu_dma_map_sg()
1454 * We'll leave any physical concatenation to the IOMMU driver's in iommu_dma_map_sg()
1455 * implementation - it knows better than we do. in iommu_dma_map_sg()
1468 if (ret != -ENOMEM && ret != -EREMOTEIO) in iommu_dma_map_sg()
1469 return -EINVAL; in iommu_dma_map_sg()
1506 nents -= i; in iommu_dma_unmap_sg()
1520 __iommu_dma_unmap(dev, start, end - start); in iommu_dma_unmap_sg()
1543 /* Non-coherent atomic allocation? Easy */ in __iommu_dma_free()
1550 * If it the address is remapped, then it's either non-coherent in __iommu_dma_free()
1637 dev->coherent_dma_mask); in iommu_dma_alloc()
1651 unsigned long pfn, off = vma->vm_pgoff; in iommu_dma_mmap()
1654 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in iommu_dma_mmap()
1659 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) in iommu_dma_mmap()
1660 return -ENXIO; in iommu_dma_mmap()
1672 return remap_pfn_range(vma, vma->vm_start, pfn + off, in iommu_dma_mmap()
1673 vma->vm_end - vma->vm_start, in iommu_dma_mmap()
1674 vma->vm_page_prot); in iommu_dma_mmap()
1700 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in iommu_dma_get_sgtable()
1708 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; in iommu_dma_get_merge_boundary()
1729 dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; in iommu_setup_dma_ops()
1731 dev->dma_iommu = iommu_is_dma_domain(domain); in iommu_setup_dma_ops()
1732 if (dev->dma_iommu && iommu_dma_init_domain(domain, dev)) in iommu_setup_dma_ops()
1737 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", in iommu_setup_dma_ops()
1739 dev->dma_iommu = false; in iommu_setup_dma_ops()
1744 return domain && (domain->cookie_type == IOMMU_COOKIE_DMA_IOVA || in has_msi_cookie()
1745 domain->cookie_type == IOMMU_COOKIE_DMA_MSI); in has_msi_cookie()
1750 switch (domain->cookie_type) { in cookie_msi_granule()
1752 return domain->iova_cookie->iovad.granule; in cookie_msi_granule()
1762 switch (domain->cookie_type) { in cookie_msi_pages()
1764 return &domain->iova_cookie->msi_page_list; in cookie_msi_pages()
1766 return &domain->msi_cookie->msi_page_list; in cookie_msi_pages()
1781 msi_addr &= ~(phys_addr_t)(size - 1); in iommu_dma_get_msi_page()
1783 if (msi_page->phys == msi_addr) in iommu_dma_get_msi_page()
1797 INIT_LIST_HEAD(&msi_page->list); in iommu_dma_get_msi_page()
1798 msi_page->phys = msi_addr; in iommu_dma_get_msi_page()
1799 msi_page->iova = iova; in iommu_dma_get_msi_page()
1800 list_add(&msi_page->list, msi_page_list); in iommu_dma_get_msi_page()
1824 return -ENOMEM; in iommu_dma_sw_msi()
1826 msi_desc_set_iommu_msi_iova(desc, msi_page->iova, in iommu_dma_sw_msi()