Lines Matching +full:non +full:- +full:continuous
1 // SPDX-License-Identifier: GPL-2.0-or-later
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
24 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
25 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
37 if (WARN_ON_ONCE(!ops || !ops->alloc)) in __snd_dma_alloc_pages()
39 return ops->alloc(dmab, size); in __snd_dma_alloc_pages()
43 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
51 * Calls the memory-allocator function for the corresponding
62 return -ENXIO; in snd_dma_alloc_dir_pages()
64 return -ENXIO; in snd_dma_alloc_dir_pages()
67 dmab->dev.type = type; in snd_dma_alloc_dir_pages()
68 dmab->dev.dev = device; in snd_dma_alloc_dir_pages()
69 dmab->dev.dir = dir; in snd_dma_alloc_dir_pages()
70 dmab->bytes = 0; in snd_dma_alloc_dir_pages()
71 dmab->addr = 0; in snd_dma_alloc_dir_pages()
72 dmab->private_data = NULL; in snd_dma_alloc_dir_pages()
73 dmab->area = __snd_dma_alloc_pages(dmab, size); in snd_dma_alloc_dir_pages()
74 if (!dmab->area) in snd_dma_alloc_dir_pages()
75 return -ENOMEM; in snd_dma_alloc_dir_pages()
76 dmab->bytes = size; in snd_dma_alloc_dir_pages()
82 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
88 * Calls the memory-allocator function for the corresponding
102 if (err != -ENOMEM) in snd_dma_alloc_pages_fallback()
105 return -ENOMEM; in snd_dma_alloc_pages_fallback()
109 if (! dmab->area) in snd_dma_alloc_pages_fallback()
110 return -ENOMEM; in snd_dma_alloc_pages_fallback()
116 * snd_dma_free_pages - release the allocated buffer
125 if (ops && ops->free) in snd_dma_free_pages()
126 ops->free(dmab); in snd_dma_free_pages()
137 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
179 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
191 return -ENOENT; in snd_dma_buffer_mmap()
193 if (ops && ops->mmap) in snd_dma_buffer_mmap()
194 return ops->mmap(dmab, area); in snd_dma_buffer_mmap()
196 return -ENOENT; in snd_dma_buffer_mmap()
202 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
211 if (!dmab || !dmab->dev.need_sync) in snd_dma_buffer_sync()
214 if (ops && ops->sync) in snd_dma_buffer_sync()
215 ops->sync(dmab, mode); in snd_dma_buffer_sync()
221 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
231 if (ops && ops->get_addr) in snd_sgbuf_get_addr()
232 return ops->get_addr(dmab, offset); in snd_sgbuf_get_addr()
234 return dmab->addr + offset; in snd_sgbuf_get_addr()
239 * snd_sgbuf_get_page - return the physical page at the corresponding offset
249 if (ops && ops->get_page) in snd_sgbuf_get_page()
250 return ops->get_page(dmab, offset); in snd_sgbuf_get_page()
252 return virt_to_page(dmab->area + offset); in snd_sgbuf_get_page()
257 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
258 * on sg-buffer
270 if (ops && ops->get_chunk_size) in snd_sgbuf_get_chunk_size()
271 return ops->get_chunk_size(dmab, ofs, size); in snd_sgbuf_get_chunk_size()
278 * Continuous pages allocator
293 if ((*addr + size - 1) & ~dev->coherent_dma_mask) { in do_alloc_pages()
322 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); in snd_dma_continuous_alloc()
327 do_free_pages(dmab->area, dmab->bytes, false); in snd_dma_continuous_free()
333 return remap_pfn_range(area, area->vm_start, in snd_dma_continuous_mmap()
334 dmab->addr >> PAGE_SHIFT, in snd_dma_continuous_mmap()
335 area->vm_end - area->vm_start, in snd_dma_continuous_mmap()
336 area->vm_page_prot); in snd_dma_continuous_mmap()
355 vfree(dmab->area); in snd_dma_vmalloc_free()
361 return remap_vmalloc_range(area, dmab->area, 0); in snd_dma_vmalloc_mmap()
365 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
376 return vmalloc_to_page(dmab->area + offset); in snd_dma_vmalloc_get_page()
387 end = ofs + size - 1; /* the last byte address */ in snd_dma_vmalloc_get_chunk_size()
396 return start - ofs; in snd_dma_vmalloc_get_chunk_size()
398 /* ok, all on continuous pages */ in snd_dma_vmalloc_get_chunk_size()
418 struct device *dev = dmab->dev.dev; in snd_dma_iram_alloc()
422 if (dev->of_node) { in snd_dma_iram_alloc()
423 pool = of_gen_pool_get(dev->of_node, "iram", 0); in snd_dma_iram_alloc()
425 dmab->private_data = pool; in snd_dma_iram_alloc()
427 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); in snd_dma_iram_alloc()
435 dmab->dev.type = SNDRV_DMA_TYPE_DEV; in snd_dma_iram_alloc()
441 struct gen_pool *pool = dmab->private_data; in snd_dma_iram_free()
443 if (pool && dmab->area) in snd_dma_iram_free()
444 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_dma_iram_free()
450 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_iram_mmap()
451 return remap_pfn_range(area, area->vm_start, in snd_dma_iram_mmap()
452 dmab->addr >> PAGE_SHIFT, in snd_dma_iram_mmap()
453 area->vm_end - area->vm_start, in snd_dma_iram_mmap()
454 area->vm_page_prot); in snd_dma_iram_mmap()
469 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_dev_alloc()
474 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_dev_free()
480 return dma_mmap_coherent(dmab->dev.dev, area, in snd_dma_dev_mmap()
481 dmab->area, dmab->addr, dmab->bytes); in snd_dma_dev_mmap()
491 * Write-combined pages
493 /* x86-specific allocations */
497 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); in snd_dma_wc_alloc()
502 do_free_pages(dmab->area, dmab->bytes, true); in snd_dma_wc_free()
508 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_wc_mmap()
514 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_wc_alloc()
519 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_wc_free()
525 return dma_mmap_wc(dmab->dev.dev, area, in snd_dma_wc_mmap()
526 dmab->area, dmab->addr, dmab->bytes); in snd_dma_wc_mmap()
537 * Non-contiguous pages allocator
548 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, in snd_dma_noncontig_alloc()
551 if (!sgt && !get_dma_ops(dmab->dev.dev)) in snd_dma_noncontig_alloc()
557 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, in snd_dma_noncontig_alloc()
558 sg_dma_address(sgt->sgl)); in snd_dma_noncontig_alloc()
559 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); in snd_dma_noncontig_alloc()
561 dmab->private_data = sgt; in snd_dma_noncontig_alloc()
563 dmab->addr = snd_sgbuf_get_addr(dmab, 0); in snd_dma_noncontig_alloc()
565 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); in snd_dma_noncontig_alloc()
572 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); in snd_dma_noncontig_free()
573 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, in snd_dma_noncontig_free()
574 dmab->dev.dir); in snd_dma_noncontig_free()
580 return dma_mmap_noncontiguous(dmab->dev.dev, area, in snd_dma_noncontig_mmap()
581 dmab->bytes, dmab->private_data); in snd_dma_noncontig_mmap()
588 if (dmab->dev.dir == DMA_TO_DEVICE) in snd_dma_noncontig_sync()
590 invalidate_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
591 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
592 dmab->dev.dir); in snd_dma_noncontig_sync()
594 if (dmab->dev.dir == DMA_FROM_DEVICE) in snd_dma_noncontig_sync()
596 flush_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
597 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
598 dmab->dev.dir); in snd_dma_noncontig_sync()
606 struct sg_table *sgt = dmab->private_data; in snd_dma_noncontig_iter_set()
608 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, in snd_dma_noncontig_iter_set()
641 end = ofs + size - 1; /* the last byte address */ in snd_dma_noncontig_get_chunk_size()
654 return start - ofs; in snd_dma_noncontig_get_chunk_size()
656 /* ok, all on continuous pages */ in snd_dma_noncontig_get_chunk_size()
670 /* x86-specific SG-buffer with WC pages */
677 struct sg_table *sgt = dmab->private_data; in snd_dma_sg_wc_alloc()
682 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG) in snd_dma_sg_wc_alloc()
691 struct sg_table *sgt = dmab->private_data; in snd_dma_sg_wc_free()
702 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_sg_wc_mmap()
703 return dma_mmap_noncontiguous(dmab->dev.dev, area, in snd_dma_sg_wc_mmap()
704 dmab->bytes, dmab->private_data); in snd_dma_sg_wc_mmap()
717 /* Fallback SG-buffer allocations for x86 */
731 if (sgbuf->pages && sgbuf->addrs) { in __snd_dma_sg_fallback_free()
733 while (i < sgbuf->count) { in __snd_dma_sg_fallback_free()
734 if (!sgbuf->pages[i] || !sgbuf->addrs[i]) in __snd_dma_sg_fallback_free()
736 size = sgbuf->addrs[i] & ~PAGE_MASK; in __snd_dma_sg_fallback_free()
739 if (sgbuf->use_dma_alloc_coherent) in __snd_dma_sg_fallback_free()
740 dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT, in __snd_dma_sg_fallback_free()
741 page_address(sgbuf->pages[i]), in __snd_dma_sg_fallback_free()
742 sgbuf->addrs[i] & PAGE_MASK); in __snd_dma_sg_fallback_free()
744 do_free_pages(page_address(sgbuf->pages[i]), in __snd_dma_sg_fallback_free()
749 kvfree(sgbuf->pages); in __snd_dma_sg_fallback_free()
750 kvfree(sgbuf->addrs); in __snd_dma_sg_fallback_free()
764 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) in snd_dma_sg_fallback_alloc()
765 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK; in snd_dma_sg_fallback_alloc()
766 else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) in snd_dma_sg_fallback_alloc()
767 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; in snd_dma_sg_fallback_alloc()
772 sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV); in snd_dma_sg_fallback_alloc()
774 sgbuf->count = size >> PAGE_SHIFT; in snd_dma_sg_fallback_alloc()
775 sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL); in snd_dma_sg_fallback_alloc()
776 sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL); in snd_dma_sg_fallback_alloc()
777 if (!sgbuf->pages || !sgbuf->addrs) in snd_dma_sg_fallback_alloc()
780 pagep = sgbuf->pages; in snd_dma_sg_fallback_alloc()
781 addrp = sgbuf->addrs; in snd_dma_sg_fallback_alloc()
782 chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */ in snd_dma_sg_fallback_alloc()
785 if (sgbuf->use_dma_alloc_coherent) in snd_dma_sg_fallback_alloc()
786 p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP); in snd_dma_sg_fallback_alloc()
788 p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false); in snd_dma_sg_fallback_alloc()
797 size -= chunk; in snd_dma_sg_fallback_alloc()
802 while (npages--) { in snd_dma_sg_fallback_alloc()
809 p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL); in snd_dma_sg_fallback_alloc()
813 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) in snd_dma_sg_fallback_alloc()
814 set_pages_array_wc(sgbuf->pages, sgbuf->count); in snd_dma_sg_fallback_alloc()
816 dmab->private_data = sgbuf; in snd_dma_sg_fallback_alloc()
818 dmab->addr = sgbuf->addrs[0] & PAGE_MASK; in snd_dma_sg_fallback_alloc()
828 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_free()
830 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) in snd_dma_sg_fallback_free()
831 set_pages_array_wb(sgbuf->pages, sgbuf->count); in snd_dma_sg_fallback_free()
832 vunmap(dmab->area); in snd_dma_sg_fallback_free()
833 __snd_dma_sg_fallback_free(dmab, dmab->private_data); in snd_dma_sg_fallback_free()
839 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_get_addr()
842 return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK); in snd_dma_sg_fallback_get_addr()
848 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_mmap()
850 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) in snd_dma_sg_fallback_mmap()
851 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_sg_fallback_mmap()
852 return vm_map_pages(area, sgbuf->pages, sgbuf->count); in snd_dma_sg_fallback_mmap()
867 * Non-coherent pages allocator
873 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, in snd_dma_noncoherent_alloc()
874 dmab->dev.dir, DEFAULT_GFP); in snd_dma_noncoherent_alloc()
876 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); in snd_dma_noncoherent_alloc()
882 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, in snd_dma_noncoherent_free()
883 dmab->addr, dmab->dev.dir); in snd_dma_noncoherent_free()
889 area->vm_page_prot = vm_get_page_prot(area->vm_flags); in snd_dma_noncoherent_mmap()
890 return dma_mmap_pages(dmab->dev.dev, area, in snd_dma_noncoherent_mmap()
891 area->vm_end - area->vm_start, in snd_dma_noncoherent_mmap()
892 virt_to_page(dmab->area)); in snd_dma_noncoherent_mmap()
899 if (dmab->dev.dir != DMA_TO_DEVICE) in snd_dma_noncoherent_sync()
900 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
901 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
903 if (dmab->dev.dir != DMA_FROM_DEVICE) in snd_dma_noncoherent_sync()
904 dma_sync_single_for_device(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
905 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
946 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || in snd_dma_get_ops()
947 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) in snd_dma_get_ops()
949 return snd_dma_ops[dmab->dev.type]; in snd_dma_get_ops()