1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
5 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <linux/export.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/rbtree.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/types.h>
33
34 #include <drm/drm_mm.h>
35 #include <drm/drm_vma_manager.h>
36
37 /**
38 * DOC: vma offset manager
39 *
40 * The vma-manager is responsible to map arbitrary driver-dependent memory
41 * regions into the linear user address-space. It provides offsets to the
42 * caller which can then be used on the address_space of the drm-device. It
43 * takes care to not overlap regions, size them appropriately and to not
44 * confuse mm-core by inconsistent fake vm_pgoff fields.
45 * Drivers shouldn't use this for object placement in VMEM. This manager should
46 * only be used to manage mappings into linear user-space VMs.
47 *
48 * We use drm_mm as backend to manage object allocations. But it is highly
49 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
50 * speed up offset lookups.
51 *
52 * You must not use multiple offset managers on a single address_space.
53 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
54 * no longer be linear.
55 *
56 * This offset manager works on page-based addresses. That is, every argument
57 * and return code (with the exception of drm_vma_node_offset_addr()) is given
58 * in number of pages, not number of bytes. That means, object sizes and offsets
59 * must always be page-aligned (as usual).
60 * If you want to get a valid byte-based user-space address for a given offset,
61 * please see drm_vma_node_offset_addr().
62 *
63 * Additionally to offset management, the vma offset manager also handles access
64 * management. For every open-file context that is allowed to access a given
65 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
66 * open-file with the offset of the node will fail with -EACCES. To revoke
67 * access again, use drm_vma_node_revoke(). However, the caller is responsible
68 * for destroying already existing mappings, if required.
69 */
70
71 /**
72 * drm_vma_offset_manager_init - Initialize new offset-manager
73 * @mgr: Manager object
74 * @page_offset: Offset of available memory area (page-based)
75 * @size: Size of available address space range (page-based)
76 *
77 * Initialize a new offset-manager. The offset and area size available for the
78 * manager are given as @page_offset and @size. Both are interpreted as
79 * page-numbers, not bytes.
80 *
81 * Adding/removing nodes from the manager is locked internally and protected
82 * against concurrent access. However, node allocation and destruction is left
83 * for the caller. While calling into the vma-manager, a given node must
84 * always be guaranteed to be referenced.
85 */
drm_vma_offset_manager_init(struct drm_vma_offset_manager * mgr,unsigned long page_offset,unsigned long size)86 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
87 unsigned long page_offset, unsigned long size)
88 {
89 rwlock_init(&mgr->vm_lock);
90 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
91 }
92 EXPORT_SYMBOL(drm_vma_offset_manager_init);
93
94 /**
95 * drm_vma_offset_manager_destroy() - Destroy offset manager
96 * @mgr: Manager object
97 *
98 * Destroy an object manager which was previously created via
99 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
100 * before destroying the manager. Otherwise, drm_mm will refuse to free the
101 * requested resources.
102 *
103 * The manager must not be accessed after this function is called.
104 */
drm_vma_offset_manager_destroy(struct drm_vma_offset_manager * mgr)105 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
106 {
107 drm_mm_takedown(&mgr->vm_addr_space_mm);
108 }
109 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
110
111 /**
112 * drm_vma_offset_lookup_locked() - Find node in offset space
113 * @mgr: Manager object
114 * @start: Start address for object (page-based)
115 * @pages: Size of object (page-based)
116 *
117 * Find a node given a start address and object size. This returns the _best_
118 * match for the given node. That is, @start may point somewhere into a valid
119 * region and the given node will be returned, as long as the node spans the
120 * whole requested area (given the size in number of pages as @pages).
121 *
122 * Note that before lookup the vma offset manager lookup lock must be acquired
123 * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
124 * used to implement weakly referenced lookups using kref_get_unless_zero().
125 *
126 * Example:
127 *
128 * ::
129 *
130 * drm_vma_offset_lock_lookup(mgr);
131 * node = drm_vma_offset_lookup_locked(mgr);
132 * if (node)
133 * kref_get_unless_zero(container_of(node, sth, entr));
134 * drm_vma_offset_unlock_lookup(mgr);
135 *
136 * RETURNS:
137 * Returns NULL if no suitable node can be found. Otherwise, the best match
138 * is returned. It's the caller's responsibility to make sure the node doesn't
139 * get destroyed before the caller can access it.
140 */
drm_vma_offset_lookup_locked(struct drm_vma_offset_manager * mgr,unsigned long start,unsigned long pages)141 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
142 unsigned long start,
143 unsigned long pages)
144 {
145 struct drm_mm_node *node, *best;
146 struct rb_node *iter;
147 unsigned long offset;
148
149 iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
150 best = NULL;
151
152 while (likely(iter)) {
153 node = rb_entry(iter, struct drm_mm_node, rb);
154 offset = node->start;
155 if (start >= offset) {
156 iter = iter->rb_right;
157 best = node;
158 if (start == offset)
159 break;
160 } else {
161 iter = iter->rb_left;
162 }
163 }
164
165 /* verify that the node spans the requested area */
166 if (best) {
167 offset = best->start + best->size;
168 if (offset < start + pages)
169 best = NULL;
170 }
171
172 if (!best)
173 return NULL;
174
175 return container_of(best, struct drm_vma_offset_node, vm_node);
176 }
177 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
178
179 /**
180 * drm_vma_offset_add() - Add offset node to manager
181 * @mgr: Manager object
182 * @node: Node to be added
183 * @pages: Allocation size visible to user-space (in number of pages)
184 *
185 * Add a node to the offset-manager. If the node was already added, this does
186 * nothing and return 0. @pages is the size of the object given in number of
187 * pages.
188 * After this call succeeds, you can access the offset of the node until it
189 * is removed again.
190 *
191 * If this call fails, it is safe to retry the operation or call
192 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
193 * case.
194 *
195 * @pages is not required to be the same size as the underlying memory object
196 * that you want to map. It only limits the size that user-space can map into
197 * their address space.
198 *
199 * RETURNS:
200 * 0 on success, negative error code on failure.
201 */
drm_vma_offset_add(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node,unsigned long pages)202 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
203 struct drm_vma_offset_node *node, unsigned long pages)
204 {
205 int ret = 0;
206
207 write_lock(&mgr->vm_lock);
208
209 if (!drm_mm_node_allocated(&node->vm_node))
210 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
211 &node->vm_node, pages);
212
213 write_unlock(&mgr->vm_lock);
214
215 return ret;
216 }
217 EXPORT_SYMBOL(drm_vma_offset_add);
218
219 /**
220 * drm_vma_offset_remove() - Remove offset node from manager
221 * @mgr: Manager object
222 * @node: Node to be removed
223 *
224 * Remove a node from the offset manager. If the node wasn't added before, this
225 * does nothing. After this call returns, the offset and size will be 0 until a
226 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
227 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
228 * offset is allocated.
229 */
drm_vma_offset_remove(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node)230 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
231 struct drm_vma_offset_node *node)
232 {
233 write_lock(&mgr->vm_lock);
234
235 if (drm_mm_node_allocated(&node->vm_node)) {
236 drm_mm_remove_node(&node->vm_node);
237 memset(&node->vm_node, 0, sizeof(node->vm_node));
238 }
239
240 write_unlock(&mgr->vm_lock);
241 }
242 EXPORT_SYMBOL(drm_vma_offset_remove);
243
vma_node_allow(struct drm_vma_offset_node * node,struct drm_file * tag,bool ref_counted)244 static int vma_node_allow(struct drm_vma_offset_node *node,
245 struct drm_file *tag, bool ref_counted)
246 {
247 struct rb_node **iter;
248 struct rb_node *parent = NULL;
249 struct drm_vma_offset_file *new, *entry;
250 int ret = 0;
251
252 /* Preallocate entry to avoid atomic allocations below. It is quite
253 * unlikely that an open-file is added twice to a single node so we
254 * don't optimize for this case. OOM is checked below only if the entry
255 * is actually used. */
256 new = kmalloc(sizeof(*entry), GFP_KERNEL);
257
258 write_lock(&node->vm_lock);
259
260 iter = &node->vm_files.rb_node;
261
262 while (likely(*iter)) {
263 parent = *iter;
264 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
265
266 if (tag == entry->vm_tag) {
267 if (ref_counted)
268 entry->vm_count++;
269 goto unlock;
270 } else if (tag > entry->vm_tag) {
271 iter = &(*iter)->rb_right;
272 } else {
273 iter = &(*iter)->rb_left;
274 }
275 }
276
277 if (!new) {
278 ret = -ENOMEM;
279 goto unlock;
280 }
281
282 new->vm_tag = tag;
283 new->vm_count = 1;
284 rb_link_node(&new->vm_rb, parent, iter);
285 rb_insert_color(&new->vm_rb, &node->vm_files);
286 new = NULL;
287
288 unlock:
289 write_unlock(&node->vm_lock);
290 kfree(new);
291 return ret;
292 }
293
294 /**
295 * drm_vma_node_allow - Add open-file to list of allowed users
296 * @node: Node to modify
297 * @tag: Tag of file to remove
298 *
299 * Add @tag to the list of allowed open-files for this node. If @tag is
300 * already on this list, the ref-count is incremented.
301 *
302 * The list of allowed-users is preserved across drm_vma_offset_add() and
303 * drm_vma_offset_remove() calls. You may even call it if the node is currently
304 * not added to any offset-manager.
305 *
306 * You must remove all open-files the same number of times as you added them
307 * before destroying the node. Otherwise, you will leak memory.
308 *
309 * This is locked against concurrent access internally.
310 *
311 * RETURNS:
312 * 0 on success, negative error code on internal failure (out-of-mem)
313 */
drm_vma_node_allow(struct drm_vma_offset_node * node,struct drm_file * tag)314 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
315 {
316 return vma_node_allow(node, tag, true);
317 }
318 EXPORT_SYMBOL(drm_vma_node_allow);
319
320 /**
321 * drm_vma_node_allow_once - Add open-file to list of allowed users
322 * @node: Node to modify
323 * @tag: Tag of file to remove
324 *
325 * Add @tag to the list of allowed open-files for this node.
326 *
327 * The list of allowed-users is preserved across drm_vma_offset_add() and
328 * drm_vma_offset_remove() calls. You may even call it if the node is currently
329 * not added to any offset-manager.
330 *
331 * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
332 * should only be called once after this.
333 *
334 * This is locked against concurrent access internally.
335 *
336 * RETURNS:
337 * 0 on success, negative error code on internal failure (out-of-mem)
338 */
drm_vma_node_allow_once(struct drm_vma_offset_node * node,struct drm_file * tag)339 int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
340 {
341 return vma_node_allow(node, tag, false);
342 }
343 EXPORT_SYMBOL(drm_vma_node_allow_once);
344
345 /**
346 * drm_vma_node_revoke - Remove open-file from list of allowed users
347 * @node: Node to modify
348 * @tag: Tag of file to remove
349 *
350 * Decrement the ref-count of @tag in the list of allowed open-files on @node.
351 * If the ref-count drops to zero, remove @tag from the list. You must call
352 * this once for every drm_vma_node_allow() on @tag.
353 *
354 * This is locked against concurrent access internally.
355 *
356 * If @tag is not on the list, nothing is done.
357 */
drm_vma_node_revoke(struct drm_vma_offset_node * node,struct drm_file * tag)358 void drm_vma_node_revoke(struct drm_vma_offset_node *node,
359 struct drm_file *tag)
360 {
361 struct drm_vma_offset_file *entry;
362 struct rb_node *iter;
363
364 write_lock(&node->vm_lock);
365
366 iter = node->vm_files.rb_node;
367 while (likely(iter)) {
368 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
369 if (tag == entry->vm_tag) {
370 if (!--entry->vm_count) {
371 rb_erase(&entry->vm_rb, &node->vm_files);
372 kfree(entry);
373 }
374 break;
375 } else if (tag > entry->vm_tag) {
376 iter = iter->rb_right;
377 } else {
378 iter = iter->rb_left;
379 }
380 }
381
382 write_unlock(&node->vm_lock);
383 }
384 EXPORT_SYMBOL(drm_vma_node_revoke);
385
386 /**
387 * drm_vma_node_is_allowed - Check whether an open-file is granted access
388 * @node: Node to check
389 * @tag: Tag of file to remove
390 *
391 * Search the list in @node whether @tag is currently on the list of allowed
392 * open-files (see drm_vma_node_allow()).
393 *
394 * This is locked against concurrent access internally.
395 *
396 * RETURNS:
397 * true if @filp is on the list
398 */
drm_vma_node_is_allowed(struct drm_vma_offset_node * node,struct drm_file * tag)399 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
400 struct drm_file *tag)
401 {
402 struct drm_vma_offset_file *entry;
403 struct rb_node *iter;
404
405 read_lock(&node->vm_lock);
406
407 iter = node->vm_files.rb_node;
408 while (likely(iter)) {
409 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
410 if (tag == entry->vm_tag)
411 break;
412 else if (tag > entry->vm_tag)
413 iter = iter->rb_right;
414 else
415 iter = iter->rb_left;
416 }
417
418 read_unlock(&node->vm_lock);
419
420 return iter;
421 }
422 EXPORT_SYMBOL(drm_vma_node_is_allowed);
423