Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
33 #include <asm/dma-iommu.h>
36 #include <xen/swiotlb-xen.h>
43 size_t size; member
53 size_t size; member
55 struct page *page; member
64 struct page **ret_page);
84 if (buf->virt == virt) { in arm_dma_buffer_find()
85 list_del(&buf->list); in arm_dma_buffer_find()
106 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
108 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
112 * arm_dma_map_page - map a portion of a page for streaming DMA
113 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
114 * @page: page that buffer resides in
115 * @offset: offset into page for start of buffer
116 * @size: size of buffer to map
125 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, in arm_dma_map_page() argument
126 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_dma_map_page() argument
130 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_map_page()
131 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_dma_map_page()
134 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, in arm_coherent_dma_map_page() argument
135 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_coherent_dma_map_page() argument
138 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_coherent_dma_map_page()
142 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
143 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
145 * @size: size of buffer (same as passed to dma_map_page)
148 * Unmap a page streaming mode DMA translation. The handle and size
156 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_dma_unmap_page() argument
160 handle & ~PAGE_MASK, size, dir); in arm_dma_unmap_page()
164 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_dma_sync_single_for_cpu() argument
166 unsigned int offset = handle & (PAGE_SIZE - 1); in arm_dma_sync_single_for_cpu()
167 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_cpu() local
168 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_dma_sync_single_for_cpu()
172 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_dma_sync_single_for_device() argument
174 unsigned int offset = handle & (PAGE_SIZE - 1); in arm_dma_sync_single_for_device()
175 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_device() local
176 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_sync_single_for_device()
181 * properly. For example, if your device can only drive the low 24-bits
187 unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); in arm_dma_supported()
191 * PFN number includes the page which we can DMA to. in arm_dma_supported()
217 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
219 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
222 void *cpu_addr, dma_addr_t dma_addr, size_t size,
240 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) in __dma_clear_buffer() argument
244 * lurking in the kernel direct-mapped region is invalidated. in __dma_clear_buffer()
246 if (PageHighMem(page)) { in __dma_clear_buffer()
247 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); in __dma_clear_buffer()
248 phys_addr_t end = base + size; in __dma_clear_buffer()
249 while (size > 0) { in __dma_clear_buffer()
250 void *ptr = kmap_atomic(page); in __dma_clear_buffer()
255 page++; in __dma_clear_buffer()
256 size -= PAGE_SIZE; in __dma_clear_buffer()
261 void *ptr = page_address(page); in __dma_clear_buffer()
262 memset(ptr, 0, size); in __dma_clear_buffer()
264 dmac_flush_range(ptr, ptr + size); in __dma_clear_buffer()
265 outer_flush_range(__pa(ptr), __pa(ptr) + size); in __dma_clear_buffer()
271 * Allocate a DMA buffer for 'dev' of size 'size' using the
272 * specified gfp mask. Note that 'size' must be page aligned.
274 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, in __dma_alloc_buffer() argument
277 unsigned long order = get_order(size); in __dma_alloc_buffer()
278 struct page *page, *p, *e; in __dma_alloc_buffer() local
280 page = alloc_pages(gfp, order); in __dma_alloc_buffer()
281 if (!page) in __dma_alloc_buffer()
285 * Now split the huge page and free the excess pages in __dma_alloc_buffer()
287 split_page(page, order); in __dma_alloc_buffer()
288 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
291 __dma_clear_buffer(page, size, coherent_flag); in __dma_alloc_buffer()
293 return page; in __dma_alloc_buffer()
297 * Free a DMA buffer. 'size' must be page aligned.
299 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
301 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
303 while (page < e) { in __dma_free_buffer()
304 __free_page(page); in __dma_free_buffer()
305 page++; in __dma_free_buffer()
309 static void *__alloc_from_contiguous(struct device *dev, size_t size,
310 pgprot_t prot, struct page **ret_page,
314 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
315 pgprot_t prot, struct page **ret_page,
337 struct page *page; in atomic_pool_init() local
340 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); in atomic_pool_init()
344 * The atomic pool is only used for non-coherent allocations in atomic_pool_init()
349 &page, atomic_pool_init, true, NORMAL, in atomic_pool_init()
353 &page, atomic_pool_init, true); in atomic_pool_init()
358 page_to_phys(page), in atomic_pool_init()
359 atomic_pool_size, -1); in atomic_pool_init()
377 return -ENOMEM; in atomic_pool_init()
386 unsigned long size; member
393 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument
396 dma_mmu_remap[dma_mmu_remap_num].size = size; in dma_contiguous_early_fixup()
405 phys_addr_t end = start + dma_mmu_remap[i].size; in dma_contiguous_remap()
416 map.length = end - start; in dma_contiguous_remap()
420 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap()
441 struct page *page = virt_to_page(addr); in __dma_update_pte() local
444 set_pte_ext(pte, mk_pte(page, prot), 0); in __dma_update_pte()
448 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
450 unsigned long start = (unsigned long) page_address(page); in __dma_remap()
451 unsigned end = start + size; in __dma_remap()
453 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); in __dma_remap()
457 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_remap_buffer() argument
458 pgprot_t prot, struct page **ret_page, in __alloc_remap_buffer()
461 struct page *page; in __alloc_remap_buffer() local
465 * non-coherent in __alloc_remap_buffer()
467 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); in __alloc_remap_buffer()
468 if (!page) in __alloc_remap_buffer()
473 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_remap_buffer()
475 __dma_free_buffer(page, size); in __alloc_remap_buffer()
480 *ret_page = page; in __alloc_remap_buffer()
484 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool() argument
494 val = gen_pool_alloc(atomic_pool, size); in __alloc_from_pool()
505 static bool __in_atomic_pool(void *start, size_t size) in __in_atomic_pool() argument
507 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); in __in_atomic_pool()
510 static int __free_from_pool(void *start, size_t size) in __free_from_pool() argument
512 if (!__in_atomic_pool(start, size)) in __free_from_pool()
515 gen_pool_free(atomic_pool, (unsigned long)start, size); in __free_from_pool()
520 static void *__alloc_from_contiguous(struct device *dev, size_t size, in __alloc_from_contiguous() argument
521 pgprot_t prot, struct page **ret_page, in __alloc_from_contiguous()
525 unsigned long order = get_order(size); in __alloc_from_contiguous()
526 size_t count = size >> PAGE_SHIFT; in __alloc_from_contiguous()
527 struct page *page; in __alloc_from_contiguous() local
530 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); in __alloc_from_contiguous()
531 if (!page) in __alloc_from_contiguous()
534 __dma_clear_buffer(page, size, coherent_flag); in __alloc_from_contiguous()
539 if (PageHighMem(page)) { in __alloc_from_contiguous()
540 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_from_contiguous()
542 dma_release_from_contiguous(dev, page, count); in __alloc_from_contiguous()
546 __dma_remap(page, size, prot); in __alloc_from_contiguous()
547 ptr = page_address(page); in __alloc_from_contiguous()
551 *ret_page = page; in __alloc_from_contiguous()
555 static void __free_from_contiguous(struct device *dev, struct page *page, in __free_from_contiguous() argument
556 void *cpu_addr, size_t size, bool want_vaddr) in __free_from_contiguous() argument
559 if (PageHighMem(page)) in __free_from_contiguous()
560 dma_common_free_remap(cpu_addr, size); in __free_from_contiguous()
562 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
564 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
575 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_simple_buffer() argument
576 struct page **ret_page) in __alloc_simple_buffer()
578 struct page *page; in __alloc_simple_buffer() local
580 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); in __alloc_simple_buffer()
581 if (!page) in __alloc_simple_buffer()
584 *ret_page = page; in __alloc_simple_buffer()
585 return page_address(page); in __alloc_simple_buffer()
589 struct page **ret_page) in simple_allocator_alloc()
591 return __alloc_simple_buffer(args->dev, args->size, args->gfp, in simple_allocator_alloc()
597 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
606 struct page **ret_page) in cma_allocator_alloc()
608 return __alloc_from_contiguous(args->dev, args->size, args->prot, in cma_allocator_alloc()
609 ret_page, args->caller, in cma_allocator_alloc()
610 args->want_vaddr, args->coherent_flag, in cma_allocator_alloc()
611 args->gfp); in cma_allocator_alloc()
616 __free_from_contiguous(args->dev, args->page, args->cpu_addr, in cma_allocator_free()
617 args->size, args->want_vaddr); in cma_allocator_free()
626 struct page **ret_page) in pool_allocator_alloc()
628 return __alloc_from_pool(args->size, ret_page); in pool_allocator_alloc()
633 __free_from_pool(args->cpu_addr, args->size); in pool_allocator_free()
642 struct page **ret_page) in remap_allocator_alloc()
644 return __alloc_remap_buffer(args->dev, args->size, args->gfp, in remap_allocator_alloc()
645 args->prot, ret_page, args->caller, in remap_allocator_alloc()
646 args->want_vaddr); in remap_allocator_alloc()
651 if (args->want_vaddr) in remap_allocator_free()
652 dma_common_free_remap(args->cpu_addr, args->size); in remap_allocator_free()
654 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
662 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in __dma_alloc() argument
666 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in __dma_alloc()
667 struct page *page = NULL; in __dma_alloc() local
673 .size = PAGE_ALIGN(size), in __dma_alloc()
683 if (limit && size >= limit) { in __dma_alloc()
685 size, mask); in __dma_alloc()
699 * Following is a work-around (a.k.a. hack) to prevent pages in __dma_alloc()
713 buf->allocator = &cma_allocator; in __dma_alloc()
715 buf->allocator = &simple_allocator; in __dma_alloc()
717 buf->allocator = &remap_allocator; in __dma_alloc()
719 buf->allocator = &pool_allocator; in __dma_alloc()
721 addr = buf->allocator->alloc(&args, &page); in __dma_alloc()
723 if (page) { in __dma_alloc()
726 *handle = pfn_to_dma(dev, page_to_pfn(page)); in __dma_alloc()
727 buf->virt = args.want_vaddr ? addr : page; in __dma_alloc()
730 list_add(&buf->list, &arm_dma_bufs); in __dma_alloc()
736 return args.want_vaddr ? addr : page; in __dma_alloc()
740 * Allocate DMA-coherent memory space and return both the kernel remapped
743 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in arm_dma_alloc() argument
748 return __dma_alloc(dev, size, handle, gfp, prot, false, in arm_dma_alloc()
752 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, in arm_coherent_dma_alloc() argument
755 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, in arm_coherent_dma_alloc()
760 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __arm_dma_mmap() argument
763 int ret = -ENXIO; in __arm_dma_mmap()
765 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __arm_dma_mmap()
767 unsigned long off = vma->vm_pgoff; in __arm_dma_mmap()
769 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in __arm_dma_mmap()
772 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { in __arm_dma_mmap()
773 ret = remap_pfn_range(vma, vma->vm_start, in __arm_dma_mmap()
775 vma->vm_end - vma->vm_start, in __arm_dma_mmap()
776 vma->vm_page_prot); in __arm_dma_mmap()
783 * Create userspace mapping for the DMA-coherent memory.
786 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_coherent_dma_mmap() argument
789 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_coherent_dma_mmap()
793 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_dma_mmap() argument
796 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); in arm_dma_mmap()
797 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_dma_mmap()
803 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in __arm_dma_free() argument
807 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); in __arm_dma_free() local
811 .size = PAGE_ALIGN(size), in __arm_dma_free()
813 .page = page, in __arm_dma_free()
821 buf->allocator->free(&args); in __arm_dma_free()
825 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in arm_dma_free() argument
828 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); in arm_dma_free()
831 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, in arm_coherent_dma_free() argument
834 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); in arm_coherent_dma_free()
838 void *cpu_addr, dma_addr_t handle, size_t size, in arm_dma_get_sgtable() argument
842 struct page *page; in arm_dma_get_sgtable() local
845 /* If the PFN is not valid, we do not have a struct page */ in arm_dma_get_sgtable()
847 return -ENXIO; in arm_dma_get_sgtable()
849 page = pfn_to_page(pfn); in arm_dma_get_sgtable()
855 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in arm_dma_get_sgtable()
859 static void dma_cache_maint_page(struct page *page, unsigned long offset, in dma_cache_maint_page() argument
860 size_t size, enum dma_data_direction dir, in dma_cache_maint_page() argument
864 size_t left = size; in dma_cache_maint_page()
866 pfn = page_to_pfn(page) + offset / PAGE_SIZE; in dma_cache_maint_page()
879 page = pfn_to_page(pfn); in dma_cache_maint_page()
881 if (PageHighMem(page)) { in dma_cache_maint_page()
883 len = PAGE_SIZE - offset; in dma_cache_maint_page()
886 vaddr = kmap_atomic(page); in dma_cache_maint_page()
890 vaddr = kmap_high_get(page); in dma_cache_maint_page()
893 kunmap_high(page); in dma_cache_maint_page()
897 vaddr = page_address(page) + offset; in dma_cache_maint_page()
902 left -= len; in dma_cache_maint_page()
910 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
912 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, in __dma_page_cpu_to_dev() argument
913 size_t size, enum dma_data_direction dir) in __dma_page_cpu_to_dev() argument
917 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
919 paddr = page_to_phys(page) + off; in __dma_page_cpu_to_dev()
921 outer_inv_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
923 outer_clean_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
925 /* FIXME: non-speculating: flush on bidirectional mappings? */ in __dma_page_cpu_to_dev()
928 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, in __dma_page_dev_to_cpu() argument
929 size_t size, enum dma_data_direction dir) in __dma_page_dev_to_cpu() argument
931 phys_addr_t paddr = page_to_phys(page) + off; in __dma_page_dev_to_cpu()
933 /* FIXME: non-speculating: not required */ in __dma_page_dev_to_cpu()
936 outer_inv_range(paddr, paddr + size); in __dma_page_dev_to_cpu()
938 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
942 * Mark the D-cache clean for these pages to avoid extra flushing. in __dma_page_dev_to_cpu()
944 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { in __dma_page_dev_to_cpu()
946 size_t left = size; in __dma_page_dev_to_cpu()
948 pfn = page_to_pfn(page) + off / PAGE_SIZE; in __dma_page_dev_to_cpu()
952 left -= PAGE_SIZE - off; in __dma_page_dev_to_cpu()
955 page = pfn_to_page(pfn++); in __dma_page_dev_to_cpu()
956 set_bit(PG_dcache_clean, &page->flags); in __dma_page_dev_to_cpu()
957 left -= PAGE_SIZE; in __dma_page_dev_to_cpu()
963 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
964 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
970 * This is the scatter-gather version of the dma_map_single interface.
987 s->dma_length = s->length; in arm_dma_map_sg()
989 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, in arm_dma_map_sg()
990 s->length, dir, attrs); in arm_dma_map_sg()
991 if (dma_mapping_error(dev, s->dma_address)) in arm_dma_map_sg()
998 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); in arm_dma_map_sg()
1003 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1004 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1021 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); in arm_dma_unmap_sg()
1026 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1039 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, in arm_dma_sync_sg_for_cpu()
1045 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1058 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, in arm_dma_sync_sg_for_device()
1066 * 32-bits, which then can't be addressed by devices that only support in arm_get_dma_map_ops()
1067 * 32-bit DMA. in arm_get_dma_map_ops()
1068 * Use the generic dma-direct / swiotlb ops code in that case, as that in arm_get_dma_map_ops()
1102 size_t size) in __alloc_iova() argument
1104 unsigned int order = get_order(size); in __alloc_iova()
1107 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
1115 count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __alloc_iova()
1116 align = (1 << order) - 1; in __alloc_iova()
1118 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
1119 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
1120 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
1121 mapping->bits, 0, count, align); in __alloc_iova()
1123 if (start > mapping->bits) in __alloc_iova()
1126 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
1133 * address range of size bytes. in __alloc_iova()
1135 if (i == mapping->nr_bitmaps) { in __alloc_iova()
1137 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1141 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
1142 mapping->bits, 0, count, align); in __alloc_iova()
1144 if (start > mapping->bits) { in __alloc_iova()
1145 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1149 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
1151 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1153 iova = mapping->base + (mapping_size * i); in __alloc_iova()
1160 dma_addr_t addr, size_t size) in __free_iova() argument
1163 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __free_iova()
1168 if (!size) in __free_iova()
1171 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; in __free_iova()
1172 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova()
1174 bitmap_base = mapping->base + mapping_size * bitmap_index; in __free_iova()
1176 start = (addr - bitmap_base) >> PAGE_SHIFT; in __free_iova()
1178 if (addr + size > bitmap_base + mapping_size) { in __free_iova()
1187 count = size >> PAGE_SHIFT; in __free_iova()
1189 spin_lock_irqsave(&mapping->lock, flags); in __free_iova()
1190 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); in __free_iova()
1191 spin_unlock_irqrestore(&mapping->lock, flags); in __free_iova()
1197 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer() argument
1201 struct page **pages; in __iommu_alloc_buffer()
1202 int count = size >> PAGE_SHIFT; in __iommu_alloc_buffer()
1203 int array_size = count * sizeof(struct page *); in __iommu_alloc_buffer()
1216 unsigned long order = get_order(size); in __iommu_alloc_buffer()
1217 struct page *page; in __iommu_alloc_buffer() local
1219 page = dma_alloc_from_contiguous(dev, count, order, in __iommu_alloc_buffer()
1221 if (!page) in __iommu_alloc_buffer()
1224 __dma_clear_buffer(page, size, coherent_flag); in __iommu_alloc_buffer()
1227 pages[i] = page + i; in __iommu_alloc_buffer()
1234 order_idx = ARRAY_SIZE(iommu_order_array) - 1; in __iommu_alloc_buffer()
1253 /* See if it's easy to allocate a high-order chunk */ in __iommu_alloc_buffer()
1270 while (--j) in __iommu_alloc_buffer()
1276 count -= 1 << order; in __iommu_alloc_buffer()
1281 while (i--) in __iommu_alloc_buffer()
1288 static int __iommu_free_buffer(struct device *dev, struct page **pages, in __iommu_free_buffer()
1289 size_t size, unsigned long attrs) in __iommu_free_buffer() argument
1291 int count = size >> PAGE_SHIFT; in __iommu_free_buffer()
1310 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, in __iommu_create_mapping() argument
1314 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __iommu_create_mapping()
1318 dma_addr = __alloc_iova(mapping, size); in __iommu_create_mapping()
1334 len = (j - i) << PAGE_SHIFT; in __iommu_create_mapping()
1335 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
1344 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
1345 __free_iova(mapping, dma_addr, size); in __iommu_create_mapping()
1349 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) in __iommu_remove_mapping() argument
1354 * add optional in-page offset from iova to size and align in __iommu_remove_mapping()
1355 * result to page size in __iommu_remove_mapping()
1357 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); in __iommu_remove_mapping()
1360 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1361 __free_iova(mapping, iova, size); in __iommu_remove_mapping()
1365 static struct page **__atomic_get_pages(void *addr) in __atomic_get_pages()
1367 struct page *page; in __atomic_get_pages() local
1371 page = phys_to_page(phys); in __atomic_get_pages()
1373 return (struct page **)page; in __atomic_get_pages()
1376 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) in __iommu_get_pages()
1387 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, in __iommu_alloc_simple() argument
1391 struct page *page; in __iommu_alloc_simple() local
1395 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __iommu_alloc_simple()
1397 addr = __alloc_from_pool(size, &page); in __iommu_alloc_simple()
1401 *handle = __iommu_create_mapping(dev, &page, size, attrs); in __iommu_alloc_simple()
1408 __free_from_pool(addr, size); in __iommu_alloc_simple()
1413 dma_addr_t handle, size_t size, int coherent_flag) in __iommu_free_atomic() argument
1415 __iommu_remove_mapping(dev, handle, size); in __iommu_free_atomic()
1417 __dma_free_buffer(virt_to_page(cpu_addr), size); in __iommu_free_atomic()
1419 __free_from_pool(cpu_addr, size); in __iommu_free_atomic()
1422 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, in __arm_iommu_alloc_attrs() argument
1427 struct page **pages; in __arm_iommu_alloc_attrs()
1431 size = PAGE_ALIGN(size); in __arm_iommu_alloc_attrs()
1434 return __iommu_alloc_simple(dev, size, gfp, handle, in __arm_iommu_alloc_attrs()
1438 * Following is a work-around (a.k.a. hack) to prevent pages in __arm_iommu_alloc_attrs()
1446 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); in __arm_iommu_alloc_attrs()
1450 *handle = __iommu_create_mapping(dev, pages, size, attrs); in __arm_iommu_alloc_attrs()
1457 addr = dma_common_pages_remap(pages, size, prot, in __arm_iommu_alloc_attrs()
1465 __iommu_remove_mapping(dev, *handle, size); in __arm_iommu_alloc_attrs()
1467 __iommu_free_buffer(dev, pages, size, attrs); in __arm_iommu_alloc_attrs()
1471 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, in arm_iommu_alloc_attrs() argument
1474 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); in arm_iommu_alloc_attrs()
1477 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, in arm_coherent_iommu_alloc_attrs() argument
1480 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); in arm_coherent_iommu_alloc_attrs()
1484 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __arm_iommu_mmap_attrs() argument
1487 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in __arm_iommu_mmap_attrs()
1488 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __arm_iommu_mmap_attrs()
1492 return -ENXIO; in __arm_iommu_mmap_attrs()
1494 if (vma->vm_pgoff >= nr_pages) in __arm_iommu_mmap_attrs()
1495 return -ENXIO; in __arm_iommu_mmap_attrs()
1505 dma_addr_t dma_addr, size_t size, unsigned long attrs) in arm_iommu_mmap_attrs() argument
1507 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); in arm_iommu_mmap_attrs()
1509 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_iommu_mmap_attrs()
1514 dma_addr_t dma_addr, size_t size, unsigned long attrs) in arm_coherent_iommu_mmap_attrs() argument
1516 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_coherent_iommu_mmap_attrs()
1520 * free a page as defined by the above mapping.
1523 static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, in __arm_iommu_free_attrs() argument
1526 struct page **pages; in __arm_iommu_free_attrs()
1527 size = PAGE_ALIGN(size); in __arm_iommu_free_attrs()
1529 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { in __arm_iommu_free_attrs()
1530 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); in __arm_iommu_free_attrs()
1541 dma_common_free_remap(cpu_addr, size); in __arm_iommu_free_attrs()
1543 __iommu_remove_mapping(dev, handle, size); in __arm_iommu_free_attrs()
1544 __iommu_free_buffer(dev, pages, size, attrs); in __arm_iommu_free_attrs()
1547 static void arm_iommu_free_attrs(struct device *dev, size_t size, in arm_iommu_free_attrs() argument
1551 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); in arm_iommu_free_attrs()
1554 static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, in arm_coherent_iommu_free_attrs() argument
1557 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); in arm_coherent_iommu_free_attrs()
1562 size_t size, unsigned long attrs) in arm_iommu_get_sgtable() argument
1564 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_get_sgtable()
1565 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_get_sgtable()
1568 return -ENXIO; in arm_iommu_get_sgtable()
1570 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, in arm_iommu_get_sgtable()
1575 * Map a part of the scatter-gather list into contiguous io address space
1578 size_t size, dma_addr_t *handle, in __map_sg_chunk() argument
1589 size = PAGE_ALIGN(size); in __map_sg_chunk()
1592 iova_base = iova = __alloc_iova(mapping, size); in __map_sg_chunk()
1594 return -ENOMEM; in __map_sg_chunk()
1596 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { in __map_sg_chunk()
1598 unsigned int len = PAGE_ALIGN(s->offset + s->length); in __map_sg_chunk()
1601 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in __map_sg_chunk()
1605 ret = iommu_map(mapping->domain, iova, phys, len, prot); in __map_sg_chunk()
1615 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); in __map_sg_chunk()
1616 __free_iova(mapping, iova_base, size); in __map_sg_chunk()
1626 unsigned int offset = s->offset; in __iommu_map_sg()
1627 unsigned int size = s->offset + s->length; in __iommu_map_sg() local
1633 s->dma_address = DMA_MAPPING_ERROR; in __iommu_map_sg()
1634 s->dma_length = 0; in __iommu_map_sg()
1636 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in __iommu_map_sg()
1637 if (__map_sg_chunk(dev, start, size, &dma->dma_address, in __iommu_map_sg()
1641 dma->dma_address += offset; in __iommu_map_sg()
1642 dma->dma_length = size - offset; in __iommu_map_sg()
1644 size = offset = s->offset; in __iommu_map_sg()
1649 size += s->length; in __iommu_map_sg()
1651 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, in __iommu_map_sg()
1655 dma->dma_address += offset; in __iommu_map_sg()
1656 dma->dma_length = size - offset; in __iommu_map_sg()
1667 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1685 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1714 __dma_page_dev_to_cpu(sg_page(s), s->offset, in __iommu_unmap_sg()
1715 s->length, dir); in __iommu_unmap_sg()
1720 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1737 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1769 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_cpu()
1788 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_device()
1795 * @page: page that buffer resides in
1796 * @offset: offset into page for start of buffer
1797 * @size: size of buffer to map
1802 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, in arm_coherent_iommu_map_page() argument
1803 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_coherent_iommu_map_page() argument
1808 int ret, prot, len = PAGE_ALIGN(size + offset); in arm_coherent_iommu_map_page()
1816 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); in arm_coherent_iommu_map_page()
1829 * @page: page that buffer resides in
1830 * @offset: offset into page for start of buffer
1831 * @size: size of buffer to map
1836 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, in arm_iommu_map_page() argument
1837 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_iommu_map_page() argument
1841 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1843 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); in arm_iommu_map_page()
1850 * @size: size of buffer (same as passed to dma_map_page)
1856 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_coherent_iommu_unmap_page() argument
1861 int len = PAGE_ALIGN(size + offset); in arm_coherent_iommu_unmap_page()
1866 iommu_unmap(mapping->domain, iova, len); in arm_coherent_iommu_unmap_page()
1874 * @size: size of buffer (same as passed to dma_map_page)
1880 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_iommu_unmap_page() argument
1884 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page() local
1886 int len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_page()
1892 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1894 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_page()
1899 * arm_iommu_map_resource - map a device resource for DMA
1902 * @size: size of resource to map
1906 phys_addr_t phys_addr, size_t size, in arm_iommu_map_resource() argument
1914 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_map_resource()
1922 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); in arm_iommu_map_resource()
1933 * arm_iommu_unmap_resource - unmap a device DMA resource
1936 * @size: size of resource to map
1940 size_t size, enum dma_data_direction dir, in arm_iommu_unmap_resource() argument
1946 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_resource()
1951 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_resource()
1956 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_cpu() argument
1960 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu() local
1966 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1970 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_device() argument
1974 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device() local
1980 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()
2027 * @size: maximum size of the valid IO address space
2037 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) in arm_iommu_create_mapping() argument
2039 unsigned int bits = size >> PAGE_SHIFT; in arm_iommu_create_mapping()
2043 int err = -ENOMEM; in arm_iommu_create_mapping()
2045 /* currently only 32-bit DMA address space is supported */ in arm_iommu_create_mapping()
2046 if (size > DMA_BIT_MASK(32) + 1) in arm_iommu_create_mapping()
2047 return ERR_PTR(-ERANGE); in arm_iommu_create_mapping()
2050 return ERR_PTR(-EINVAL); in arm_iommu_create_mapping()
2061 mapping->bitmap_size = bitmap_size; in arm_iommu_create_mapping()
2062 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), in arm_iommu_create_mapping()
2064 if (!mapping->bitmaps) in arm_iommu_create_mapping()
2067 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); in arm_iommu_create_mapping()
2068 if (!mapping->bitmaps[0]) in arm_iommu_create_mapping()
2071 mapping->nr_bitmaps = 1; in arm_iommu_create_mapping()
2072 mapping->extensions = extensions; in arm_iommu_create_mapping()
2073 mapping->base = base; in arm_iommu_create_mapping()
2074 mapping->bits = BITS_PER_BYTE * bitmap_size; in arm_iommu_create_mapping()
2076 spin_lock_init(&mapping->lock); in arm_iommu_create_mapping()
2078 mapping->domain = iommu_domain_alloc(bus); in arm_iommu_create_mapping()
2079 if (!mapping->domain) in arm_iommu_create_mapping()
2082 kref_init(&mapping->kref); in arm_iommu_create_mapping()
2085 kfree(mapping->bitmaps[0]); in arm_iommu_create_mapping()
2087 kfree(mapping->bitmaps); in arm_iommu_create_mapping()
2101 iommu_domain_free(mapping->domain); in release_iommu_mapping()
2102 for (i = 0; i < mapping->nr_bitmaps; i++) in release_iommu_mapping()
2103 kfree(mapping->bitmaps[i]); in release_iommu_mapping()
2104 kfree(mapping->bitmaps); in release_iommu_mapping()
2112 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping()
2113 return -EINVAL; in extend_iommu_mapping()
2115 next_bitmap = mapping->nr_bitmaps; in extend_iommu_mapping()
2116 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping()
2118 if (!mapping->bitmaps[next_bitmap]) in extend_iommu_mapping()
2119 return -ENOMEM; in extend_iommu_mapping()
2121 mapping->nr_bitmaps++; in extend_iommu_mapping()
2129 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_release_mapping()
2138 err = iommu_attach_device(mapping->domain, dev); in __arm_iommu_attach_device()
2142 kref_get(&mapping->kref); in __arm_iommu_attach_device()
2181 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
2193 iommu_detach_device(mapping->domain, dev); in arm_iommu_detach_device()
2194 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_detach_device()
2196 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); in arm_iommu_detach_device()
2207 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, in arm_setup_iommu_dma_ops() argument
2215 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); in arm_setup_iommu_dma_ops()
2217 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", in arm_setup_iommu_dma_ops()
2218 size, dev_name(dev)); in arm_setup_iommu_dma_ops()
2245 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, in arm_setup_iommu_dma_ops() argument
2257 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, in arch_setup_dma_ops() argument
2262 dev->archdata.dma_coherent = coherent; in arch_setup_dma_ops()
2264 dev->dma_coherent = coherent; in arch_setup_dma_ops()
2272 if (dev->dma_ops) in arch_setup_dma_ops()
2275 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) in arch_setup_dma_ops()
2284 dev->dma_ops = &xen_swiotlb_dma_ops; in arch_setup_dma_ops()
2286 dev->archdata.dma_ops_setup = true; in arch_setup_dma_ops()
2291 if (!dev->archdata.dma_ops_setup) in arch_teardown_dma_ops()
2295 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ in arch_teardown_dma_ops()
2300 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, in arch_sync_dma_for_device() argument
2303 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_device()
2304 size, dir); in arch_sync_dma_for_device()
2307 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, in arch_sync_dma_for_cpu() argument
2310 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_cpu()
2311 size, dir); in arch_sync_dma_for_cpu()
2314 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, in arch_dma_alloc() argument
2317 return __dma_alloc(dev, size, dma_handle, gfp, in arch_dma_alloc()
2322 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, in arch_dma_free() argument
2325 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); in arch_dma_free()