xref: /linux/kernel/dma/remap.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1f0edfea8SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2f0edfea8SChristoph Hellwig /*
3f0edfea8SChristoph Hellwig  * Copyright (c) 2014 The Linux Foundation
4f0edfea8SChristoph Hellwig  */
5695cebe5SChristoph Hellwig #include <linux/dma-map-ops.h>
6f0edfea8SChristoph Hellwig #include <linux/slab.h>
7f0edfea8SChristoph Hellwig #include <linux/vmalloc.h>
8f0edfea8SChristoph Hellwig 
dma_common_find_pages(void * cpu_addr)95cf45379SChristoph Hellwig struct page **dma_common_find_pages(void *cpu_addr)
105cf45379SChristoph Hellwig {
115cf45379SChristoph Hellwig 	struct vm_struct *area = find_vm_area(cpu_addr);
125cf45379SChristoph Hellwig 
135cf45379SChristoph Hellwig 	if (!area || !(area->flags & VM_DMA_COHERENT))
145cf45379SChristoph Hellwig 		return NULL;
155cf45379SChristoph Hellwig 	WARN(area->flags != VM_DMA_COHERENT,
165cf45379SChristoph Hellwig 	     "unexpected flags in area: %p\n", cpu_addr);
175cf45379SChristoph Hellwig 	return area->pages;
18f0edfea8SChristoph Hellwig }
19f0edfea8SChristoph Hellwig 
20f0edfea8SChristoph Hellwig /*
21f0edfea8SChristoph Hellwig  * Remaps an array of PAGE_SIZE pages into another vm_area.
22f0edfea8SChristoph Hellwig  * Cannot be used in non-sleeping contexts
2351231740SChristoph Hellwig  */
dma_common_pages_remap(struct page ** pages,size_t size,pgprot_t prot,const void * caller)24f0edfea8SChristoph Hellwig void *dma_common_pages_remap(struct page **pages, size_t size,
25515e5b6dSChristoph Hellwig 			 pgprot_t prot, const void *caller)
26f0edfea8SChristoph Hellwig {
278e36baf9SEric Auger 	void *vaddr;
288e36baf9SEric Auger 
29515e5b6dSChristoph Hellwig 	vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
30515e5b6dSChristoph Hellwig 		     VM_DMA_COHERENT, prot);
31515e5b6dSChristoph Hellwig 	if (vaddr)
32f0edfea8SChristoph Hellwig 		find_vm_area(vaddr)->pages = pages;
33f0edfea8SChristoph Hellwig 	return vaddr;
34f0edfea8SChristoph Hellwig }
35f0edfea8SChristoph Hellwig 
36f0edfea8SChristoph Hellwig /*
37f0edfea8SChristoph Hellwig  * Remaps an allocated contiguous region into another vm_area.
38f0edfea8SChristoph Hellwig  * Cannot be used in non-sleeping contexts
39f0edfea8SChristoph Hellwig  */
dma_common_contiguous_remap(struct page * page,size_t size,pgprot_t prot,const void * caller)40f0edfea8SChristoph Hellwig void *dma_common_contiguous_remap(struct page *page, size_t size,
418e36baf9SEric Auger 			pgprot_t prot, const void *caller)
42f0edfea8SChristoph Hellwig {
43515e5b6dSChristoph Hellwig 	int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
44515e5b6dSChristoph Hellwig 	struct page **pages;
45f0edfea8SChristoph Hellwig 	void *vaddr;
46*51ff97d5Sgaoxu 	int i;
47f0edfea8SChristoph Hellwig 
48f0edfea8SChristoph Hellwig 	pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
49515e5b6dSChristoph Hellwig 	if (!pages)
50f0edfea8SChristoph Hellwig 		return NULL;
51515e5b6dSChristoph Hellwig 	for (i = 0; i < count; i++)
52*51ff97d5Sgaoxu 		pages[i] = page++;
53f0edfea8SChristoph Hellwig 	vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
54515e5b6dSChristoph Hellwig 	kvfree(pages);
55f0edfea8SChristoph Hellwig 
56f0edfea8SChristoph Hellwig 	return vaddr;
57f0edfea8SChristoph Hellwig }
58f0edfea8SChristoph Hellwig 
59f0edfea8SChristoph Hellwig /*
6051231740SChristoph Hellwig  * Unmaps a range previously mapped by dma_common_*_remap
61f0edfea8SChristoph Hellwig  */
dma_common_free_remap(void * cpu_addr,size_t size)622cf2aa6aSAndrey Smirnov void dma_common_free_remap(void *cpu_addr, size_t size)
63f0edfea8SChristoph Hellwig {
642cf2aa6aSAndrey Smirnov 	struct vm_struct *area = find_vm_area(cpu_addr);
65f0edfea8SChristoph Hellwig 
66f0edfea8SChristoph Hellwig 	if (!area || !(area->flags & VM_DMA_COHERENT)) {
67f0edfea8SChristoph Hellwig 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
68f0edfea8SChristoph Hellwig 		return;
69f0edfea8SChristoph Hellwig 	}
70f0edfea8SChristoph Hellwig 
71 	vunmap(cpu_addr);
72 }
73