Lines Matching full:buffer

73 	struct cma_heap_buffer *buffer = dmabuf->priv;  in cma_heap_attach()  local
81 ret = sg_alloc_table_from_pages(&a->table, buffer->pages, in cma_heap_attach()
82 buffer->pagecount, 0, in cma_heap_attach()
83 buffer->pagecount << PAGE_SHIFT, in cma_heap_attach()
96 mutex_lock(&buffer->lock); in cma_heap_attach()
97 list_add(&a->list, &buffer->attachments); in cma_heap_attach()
98 mutex_unlock(&buffer->lock); in cma_heap_attach()
106 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_detach() local
109 mutex_lock(&buffer->lock); in cma_heap_detach()
111 mutex_unlock(&buffer->lock); in cma_heap_detach()
144 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_begin_cpu_access() local
147 mutex_lock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access()
149 if (buffer->vmap_cnt) in cma_heap_dma_buf_begin_cpu_access()
150 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_begin_cpu_access()
152 list_for_each_entry(a, &buffer->attachments, list) { in cma_heap_dma_buf_begin_cpu_access()
157 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_begin_cpu_access()
165 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_end_cpu_access() local
168 mutex_lock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access()
170 if (buffer->vmap_cnt) in cma_heap_dma_buf_end_cpu_access()
171 flush_kernel_vmap_range(buffer->vaddr, buffer->len); in cma_heap_dma_buf_end_cpu_access()
173 list_for_each_entry(a, &buffer->attachments, list) { in cma_heap_dma_buf_end_cpu_access()
178 mutex_unlock(&buffer->lock); in cma_heap_dma_buf_end_cpu_access()
186 struct cma_heap_buffer *buffer = vma->vm_private_data; in cma_heap_vm_fault() local
188 if (vmf->pgoff >= buffer->pagecount) in cma_heap_vm_fault()
191 return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff])); in cma_heap_vm_fault()
200 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_mmap() local
208 vma->vm_private_data = buffer; in cma_heap_mmap()
213 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) in cma_heap_do_vmap() argument
217 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); in cma_heap_do_vmap()
226 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_vmap() local
230 mutex_lock(&buffer->lock); in cma_heap_vmap()
231 if (buffer->vmap_cnt) { in cma_heap_vmap()
232 buffer->vmap_cnt++; in cma_heap_vmap()
233 iosys_map_set_vaddr(map, buffer->vaddr); in cma_heap_vmap()
237 vaddr = cma_heap_do_vmap(buffer); in cma_heap_vmap()
242 buffer->vaddr = vaddr; in cma_heap_vmap()
243 buffer->vmap_cnt++; in cma_heap_vmap()
244 iosys_map_set_vaddr(map, buffer->vaddr); in cma_heap_vmap()
246 mutex_unlock(&buffer->lock); in cma_heap_vmap()
253 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_vunmap() local
255 mutex_lock(&buffer->lock); in cma_heap_vunmap()
256 if (!--buffer->vmap_cnt) { in cma_heap_vunmap()
257 vunmap(buffer->vaddr); in cma_heap_vunmap()
258 buffer->vaddr = NULL; in cma_heap_vunmap()
260 mutex_unlock(&buffer->lock); in cma_heap_vunmap()
266 struct cma_heap_buffer *buffer = dmabuf->priv; in cma_heap_dma_buf_release() local
267 struct cma_heap *cma_heap = buffer->heap; in cma_heap_dma_buf_release()
269 if (buffer->vmap_cnt > 0) { in cma_heap_dma_buf_release()
270 WARN(1, "%s: buffer still mapped in the kernel\n", __func__); in cma_heap_dma_buf_release()
271 vunmap(buffer->vaddr); in cma_heap_dma_buf_release()
272 buffer->vaddr = NULL; in cma_heap_dma_buf_release()
276 kfree(buffer->pages); in cma_heap_dma_buf_release()
278 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); in cma_heap_dma_buf_release()
279 kfree(buffer); in cma_heap_dma_buf_release()
301 struct cma_heap_buffer *buffer; in cma_heap_allocate() local
311 buffer = kzalloc_obj(*buffer); in cma_heap_allocate()
312 if (!buffer) in cma_heap_allocate()
315 INIT_LIST_HEAD(&buffer->attachments); in cma_heap_allocate()
316 mutex_init(&buffer->lock); in cma_heap_allocate()
317 buffer->len = size; in cma_heap_allocate()
349 buffer->pages = kmalloc_objs(*buffer->pages, pagecount); in cma_heap_allocate()
350 if (!buffer->pages) { in cma_heap_allocate()
356 buffer->pages[pg] = &cma_pages[pg]; in cma_heap_allocate()
358 buffer->cma_pages = cma_pages; in cma_heap_allocate()
359 buffer->heap = cma_heap; in cma_heap_allocate()
360 buffer->pagecount = pagecount; in cma_heap_allocate()
365 exp_info.size = buffer->len; in cma_heap_allocate()
367 exp_info.priv = buffer; in cma_heap_allocate()
376 kfree(buffer->pages); in cma_heap_allocate()
380 kfree(buffer); in cma_heap_allocate()