Lines Matching +full:dma +full:- +full:mem

23 #include "mem.h"
35 struct page **mem; member
38 dma_addr_t *dma; member
45 return nvkm_mem(memory)->target; in nvkm_mem_target()
57 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_addr() local
58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr()
59 return mem->dma[0]; in nvkm_mem_addr()
66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size()
73 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_map_dma() local
75 .memory = &mem->memory, in nvkm_mem_map_dma()
77 .dma = mem->dma, in nvkm_mem_map_dma()
85 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_dtor() local
86 if (mem->mem) { in nvkm_mem_dtor()
87 while (mem->pages--) { in nvkm_mem_dtor()
88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor()
89 mem->dma[mem->pages], PAGE_SIZE, in nvkm_mem_dtor()
91 __free_page(mem->mem[mem->pages]); in nvkm_mem_dtor()
93 kvfree(mem->dma); in nvkm_mem_dtor()
94 kvfree(mem->mem); in nvkm_mem_dtor()
96 return mem; in nvkm_mem_dtor()
113 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_map_sgl() local
115 .memory = &mem->memory, in nvkm_mem_map_sgl()
117 .sgl = mem->sgl, in nvkm_mem_map_sgl()
135 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_map_host() local
136 if (mem->mem) { in nvkm_mem_map_host()
137 *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL); in nvkm_mem_map_host()
138 return *pmap ? 0 : -EFAULT; in nvkm_mem_map_host()
140 return -EINVAL; in nvkm_mem_map_host()
147 struct device *dev = mmu->subdev.device->dev; in nvkm_mem_new_host()
152 int ret = -ENOSYS; in nvkm_mem_new_host()
154 struct nvkm_mem *mem; in nvkm_mem_new_host() local
157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) && in nvkm_mem_new_host()
158 !(mmu->type[type].type & NVKM_MEM_UNCACHED)) in nvkm_mem_new_host()
164 return -EINVAL; in nvkm_mem_new_host()
166 if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL))) in nvkm_mem_new_host()
167 return -ENOMEM; in nvkm_mem_new_host()
168 mem->target = target; in nvkm_mem_new_host()
169 mem->mmu = mmu; in nvkm_mem_new_host()
170 *pmemory = &mem->memory; in nvkm_mem_new_host()
172 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { in nvkm_mem_new_host()
173 if (args->v0.dma) { in nvkm_mem_new_host()
174 nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory); in nvkm_mem_new_host()
175 mem->dma = args->v0.dma; in nvkm_mem_new_host()
177 nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory); in nvkm_mem_new_host()
178 mem->sgl = args->v0.sgl; in nvkm_mem_new_host()
182 return -EINVAL; in nvkm_mem_new_host()
183 mem->pages = size >> PAGE_SHIFT; in nvkm_mem_new_host()
186 if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) { in nvkm_mem_new_host()
187 kfree(mem); in nvkm_mem_new_host()
191 nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory); in nvkm_mem_new_host()
194 if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL))) in nvkm_mem_new_host()
195 return -ENOMEM; in nvkm_mem_new_host()
196 if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL))) in nvkm_mem_new_host()
197 return -ENOMEM; in nvkm_mem_new_host()
199 if (mmu->dma_bits > 32) in nvkm_mem_new_host()
204 for (mem->pages = 0; size; size--, mem->pages++) { in nvkm_mem_new_host()
207 return -ENOMEM; in nvkm_mem_new_host()
209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host()
212 if (dma_mapping_error(dev, mem->dma[mem->pages])) { in nvkm_mem_new_host()
214 return -ENOMEM; in nvkm_mem_new_host()
217 mem->mem[mem->pages] = p; in nvkm_mem_new_host()
230 if (mmu->type[type].type & NVKM_MEM_VRAM) { in nvkm_mem_new_type()
231 ret = mmu->func->mem.vram(mmu, type, page, size, in nvkm_mem_new_type()