1 /*
2 * videobuf2-memops.c - generic memory handling routines for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mm.h>
19 #include <linux/sched.h>
20 #include <linux/file.h>
21
22 #include <media/videobuf2-core.h>
23 #include <media/videobuf2-memops.h>
24
25 /**
26 * vb2_get_vma() - acquire and lock the virtual memory area
27 * @vma: given virtual memory area
28 *
29 * This function attempts to acquire an area mapped in the userspace for
30 * the duration of a hardware operation. The area is "locked" by performing
31 * the same set of operation that are done when process calls fork() and
32 * memory areas are duplicated.
33 *
34 * Returns a copy of a virtual memory region on success or NULL.
35 */
vb2_get_vma(struct vm_area_struct * vma)36 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
37 {
38 struct vm_area_struct *vma_copy;
39
40 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
41 if (vma_copy == NULL)
42 return NULL;
43
44 if (vma->vm_ops && vma->vm_ops->open)
45 vma->vm_ops->open(vma);
46
47 if (vma->vm_file)
48 get_file(vma->vm_file);
49
50 memcpy(vma_copy, vma, sizeof(*vma));
51
52 vma_copy->vm_mm = NULL;
53 vma_copy->vm_next = NULL;
54 vma_copy->vm_prev = NULL;
55
56 return vma_copy;
57 }
58
59 /**
60 * vb2_put_userptr() - release a userspace virtual memory area
61 * @vma: virtual memory region associated with the area to be released
62 *
63 * This function releases the previously acquired memory area after a hardware
64 * operation.
65 */
vb2_put_vma(struct vm_area_struct * vma)66 void vb2_put_vma(struct vm_area_struct *vma)
67 {
68 if (!vma)
69 return;
70
71 if (vma->vm_ops && vma->vm_ops->close)
72 vma->vm_ops->close(vma);
73
74 if (vma->vm_file)
75 fput(vma->vm_file);
76
77 kfree(vma);
78 }
79 EXPORT_SYMBOL_GPL(vb2_put_vma);
80
81 /**
82 * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
83 * @vaddr: starting virtual address of the area to be verified
84 * @size: size of the area
85 * @res_paddr: will return physical address for the given vaddr
86 * @res_vma: will return locked copy of struct vm_area for the given area
87 *
88 * This function will go through memory area of size @size mapped at @vaddr and
89 * verify that the underlying physical pages are contiguous. If they are
90 * contiguous the virtual memory area is locked and a @res_vma is filled with
91 * the copy and @res_pa set to the physical address of the buffer.
92 *
93 * Returns 0 on success.
94 */
vb2_get_contig_userptr(unsigned long vaddr,unsigned long size,struct vm_area_struct ** res_vma,dma_addr_t * res_pa)95 int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
96 struct vm_area_struct **res_vma, dma_addr_t *res_pa)
97 {
98 struct mm_struct *mm = current->mm;
99 struct vm_area_struct *vma;
100 unsigned long offset, start, end;
101 unsigned long this_pfn, prev_pfn;
102 dma_addr_t pa = 0;
103
104 start = vaddr;
105 offset = start & ~PAGE_MASK;
106 end = start + size;
107
108 vma = find_vma(mm, start);
109
110 if (vma == NULL || vma->vm_end < end)
111 return -EFAULT;
112
113 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
114 int ret = follow_pfn(vma, start, &this_pfn);
115 if (ret)
116 return ret;
117
118 if (prev_pfn == 0)
119 pa = this_pfn << PAGE_SHIFT;
120 else if (this_pfn != prev_pfn + 1)
121 return -EFAULT;
122
123 prev_pfn = this_pfn;
124 }
125
126 /*
127 * Memory is contigous, lock vma and return to the caller
128 */
129 *res_vma = vb2_get_vma(vma);
130 if (*res_vma == NULL)
131 return -ENOMEM;
132
133 *res_pa = pa + offset;
134 return 0;
135 }
136 EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
137
138 /**
139 * vb2_mmap_pfn_range() - map physical pages to userspace
140 * @vma: virtual memory region for the mapping
141 * @paddr: starting physical address of the memory to be mapped
142 * @size: size of the memory to be mapped
143 * @vm_ops: vm operations to be assigned to the created area
144 * @priv: private data to be associated with the area
145 *
146 * Returns 0 on success.
147 */
vb2_mmap_pfn_range(struct vm_area_struct * vma,unsigned long paddr,unsigned long size,const struct vm_operations_struct * vm_ops,void * priv)148 int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
149 unsigned long size,
150 const struct vm_operations_struct *vm_ops,
151 void *priv)
152 {
153 int ret;
154
155 size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
156
157 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
158 ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
159 size, vma->vm_page_prot);
160 if (ret) {
161 printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
162 return ret;
163 }
164
165 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
166 vma->vm_private_data = priv;
167 vma->vm_ops = vm_ops;
168
169 vma->vm_ops->open(vma);
170
171 pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
172 __func__, paddr, vma->vm_start, size);
173
174 return 0;
175 }
176 EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
177
178 /**
179 * vb2_common_vm_open() - increase refcount of the vma
180 * @vma: virtual memory region for the mapping
181 *
182 * This function adds another user to the provided vma. It expects
183 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
184 */
vb2_common_vm_open(struct vm_area_struct * vma)185 static void vb2_common_vm_open(struct vm_area_struct *vma)
186 {
187 struct vb2_vmarea_handler *h = vma->vm_private_data;
188
189 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
190 __func__, h, atomic_read(h->refcount), vma->vm_start,
191 vma->vm_end);
192
193 atomic_inc(h->refcount);
194 }
195
196 /**
197 * vb2_common_vm_close() - decrease refcount of the vma
198 * @vma: virtual memory region for the mapping
199 *
200 * This function releases the user from the provided vma. It expects
201 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
202 */
vb2_common_vm_close(struct vm_area_struct * vma)203 static void vb2_common_vm_close(struct vm_area_struct *vma)
204 {
205 struct vb2_vmarea_handler *h = vma->vm_private_data;
206
207 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
208 __func__, h, atomic_read(h->refcount), vma->vm_start,
209 vma->vm_end);
210
211 h->put(h->arg);
212 }
213
214 /**
215 * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
216 * video buffers
217 */
218 const struct vm_operations_struct vb2_common_vm_ops = {
219 .open = vb2_common_vm_open,
220 .close = vb2_common_vm_close,
221 };
222 EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
223
224 MODULE_DESCRIPTION("common memory handling routines for videobuf2");
225 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
226 MODULE_LICENSE("GPL");
227