xref: /linux/drivers/gpu/drm/msm/msm_gem.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 
13 #include <drm/drm_prime.h>
14 #include <drm/drm_file.h>
15 
16 #include <trace/events/gpu_mem.h>
17 
18 #include "msm_drv.h"
19 #include "msm_gem.h"
20 #include "msm_gpu.h"
21 #include "msm_kms.h"
22 
update_device_mem(struct msm_drm_private * priv,ssize_t size)23 static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
24 {
25 	uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
26 	trace_gpu_mem_total(0, 0, total_mem);
27 }
28 
update_ctx_mem(struct drm_file * file,ssize_t size)29 static void update_ctx_mem(struct drm_file *file, ssize_t size)
30 {
31 	struct msm_context *ctx = file->driver_priv;
32 	uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
33 
34 	rcu_read_lock(); /* Locks file->pid! */
35 	trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
36 	rcu_read_unlock();
37 
38 }
39 
msm_gem_open(struct drm_gem_object * obj,struct drm_file * file)40 static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
41 {
42 	msm_gem_vma_get(obj);
43 	update_ctx_mem(file, obj->size);
44 	return 0;
45 }
46 
47 static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
48 			    bool close, const char *reason);
49 
msm_gem_close(struct drm_gem_object * obj,struct drm_file * file)50 static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
51 {
52 	struct msm_context *ctx = file->driver_priv;
53 	struct drm_exec exec;
54 
55 	update_ctx_mem(file, -obj->size);
56 	msm_gem_vma_put(obj);
57 
58 	/*
59 	 * If VM isn't created yet, nothing to cleanup.  And in fact calling
60 	 * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
61 	 * down the mappings of shared buffers in other contexts.
62 	 */
63 	if (!ctx->vm)
64 		return;
65 
66 	/*
67 	 * VM_BIND does not depend on implicit teardown of VMAs on handle
68 	 * close, but instead on implicit teardown of the VM when the device
69 	 * is closed (see msm_gem_vm_close())
70 	 */
71 	if (msm_context_is_vmbind(ctx))
72 		return;
73 
74 	/*
75 	 * TODO we might need to kick this to a queue to avoid blocking
76 	 * in CLOSE ioctl
77 	 */
78 	dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
79 			      MAX_SCHEDULE_TIMEOUT);
80 
81 	msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
82 	put_iova_spaces(obj, ctx->vm, true, "close");
83 	drm_exec_fini(&exec);     /* drop locks */
84 }
85 
86 /*
87  * Get/put for kms->vm VMA
88  */
89 
msm_gem_vma_get(struct drm_gem_object * obj)90 void msm_gem_vma_get(struct drm_gem_object *obj)
91 {
92 	atomic_inc(&to_msm_bo(obj)->vma_ref);
93 }
94 
msm_gem_vma_put(struct drm_gem_object * obj)95 void msm_gem_vma_put(struct drm_gem_object *obj)
96 {
97 	struct msm_drm_private *priv = obj->dev->dev_private;
98 	struct drm_exec exec;
99 
100 	if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
101 		return;
102 
103 	if (!priv->kms)
104 		return;
105 
106 	msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
107 	put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
108 	drm_exec_fini(&exec);     /* drop locks */
109 }
110 
111 /*
112  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
113  * API.  Really GPU cache is out of scope here (handled on cmdstream)
114  * and all we need to do is invalidate newly allocated pages before
115  * mapping to CPU as uncached/writecombine.
116  *
117  * On top of this, we have the added headache, that depending on
118  * display generation, the display's iommu may be wired up to either
119  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
120  * that here we either have dma-direct or iommu ops.
121  *
122  * Let this be a cautionary tail of abstraction gone wrong.
123  */
124 
sync_for_device(struct msm_gem_object * msm_obj)125 static void sync_for_device(struct msm_gem_object *msm_obj)
126 {
127 	struct device *dev = msm_obj->base.dev->dev;
128 
129 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
130 }
131 
sync_for_cpu(struct msm_gem_object * msm_obj)132 static void sync_for_cpu(struct msm_gem_object *msm_obj)
133 {
134 	struct device *dev = msm_obj->base.dev->dev;
135 
136 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
137 }
138 
update_lru_active(struct drm_gem_object * obj)139 static void update_lru_active(struct drm_gem_object *obj)
140 {
141 	struct msm_drm_private *priv = obj->dev->dev_private;
142 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
143 
144 	GEM_WARN_ON(!msm_obj->pages);
145 
146 	if (msm_obj->pin_count) {
147 		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
148 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
149 		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
150 	} else {
151 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
152 
153 		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
154 	}
155 }
156 
update_lru_locked(struct drm_gem_object * obj)157 static void update_lru_locked(struct drm_gem_object *obj)
158 {
159 	struct msm_drm_private *priv = obj->dev->dev_private;
160 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
161 
162 	msm_gem_assert_locked(&msm_obj->base);
163 
164 	if (!msm_obj->pages) {
165 		GEM_WARN_ON(msm_obj->pin_count);
166 
167 		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
168 	} else {
169 		update_lru_active(obj);
170 	}
171 }
172 
update_lru(struct drm_gem_object * obj)173 static void update_lru(struct drm_gem_object *obj)
174 {
175 	struct msm_drm_private *priv = obj->dev->dev_private;
176 
177 	mutex_lock(&priv->lru.lock);
178 	update_lru_locked(obj);
179 	mutex_unlock(&priv->lru.lock);
180 }
181 
get_pages(struct drm_gem_object * obj)182 static struct page **get_pages(struct drm_gem_object *obj)
183 {
184 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
185 
186 	msm_gem_assert_locked(obj);
187 
188 	if (!msm_obj->pages) {
189 		struct drm_device *dev = obj->dev;
190 		struct page **p;
191 		int npages = obj->size >> PAGE_SHIFT;
192 
193 		p = drm_gem_get_pages(obj);
194 
195 		if (IS_ERR(p)) {
196 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
197 					PTR_ERR(p));
198 			return p;
199 		}
200 
201 		update_device_mem(dev->dev_private, obj->size);
202 
203 		msm_obj->pages = p;
204 
205 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
206 		if (IS_ERR(msm_obj->sgt)) {
207 			void *ptr = ERR_CAST(msm_obj->sgt);
208 
209 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
210 			msm_obj->sgt = NULL;
211 			return ptr;
212 		}
213 
214 		/* For non-cached buffers, ensure the new pages are clean
215 		 * because display controller, GPU, etc. are not coherent:
216 		 */
217 		if (msm_obj->flags & MSM_BO_WC)
218 			sync_for_device(msm_obj);
219 
220 		update_lru(obj);
221 	}
222 
223 	return msm_obj->pages;
224 }
225 
put_pages(struct drm_gem_object * obj)226 static void put_pages(struct drm_gem_object *obj)
227 {
228 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
229 
230 	/*
231 	 * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
232 	 * See explaination in msm_gem_assert_locked()
233 	 */
234 	if (kref_read(&obj->refcount))
235 		drm_gpuvm_bo_gem_evict(obj, true);
236 
237 	if (msm_obj->pages) {
238 		if (msm_obj->sgt) {
239 			/* For non-cached buffers, ensure the new
240 			 * pages are clean because display controller,
241 			 * GPU, etc. are not coherent:
242 			 */
243 			if (msm_obj->flags & MSM_BO_WC)
244 				sync_for_cpu(msm_obj);
245 
246 			sg_free_table(msm_obj->sgt);
247 			kfree(msm_obj->sgt);
248 			msm_obj->sgt = NULL;
249 		}
250 
251 		update_device_mem(obj->dev->dev_private, -obj->size);
252 
253 		drm_gem_put_pages(obj, msm_obj->pages, true, false);
254 
255 		msm_obj->pages = NULL;
256 		update_lru(obj);
257 	}
258 }
259 
msm_gem_get_pages_locked(struct drm_gem_object * obj,unsigned madv)260 struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
261 {
262 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
263 
264 	msm_gem_assert_locked(obj);
265 
266 	if (msm_obj->madv > madv) {
267 		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
268 				     msm_obj->madv, madv);
269 		return ERR_PTR(-EBUSY);
270 	}
271 
272 	return get_pages(obj);
273 }
274 
275 /*
276  * Update the pin count of the object, call under lru.lock
277  */
msm_gem_pin_obj_locked(struct drm_gem_object * obj)278 void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
279 {
280 	struct msm_drm_private *priv = obj->dev->dev_private;
281 
282 	msm_gem_assert_locked(obj);
283 
284 	to_msm_bo(obj)->pin_count++;
285 	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
286 }
287 
pin_obj_locked(struct drm_gem_object * obj)288 static void pin_obj_locked(struct drm_gem_object *obj)
289 {
290 	struct msm_drm_private *priv = obj->dev->dev_private;
291 
292 	mutex_lock(&priv->lru.lock);
293 	msm_gem_pin_obj_locked(obj);
294 	mutex_unlock(&priv->lru.lock);
295 }
296 
msm_gem_pin_pages_locked(struct drm_gem_object * obj)297 struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
298 {
299 	struct page **p;
300 
301 	msm_gem_assert_locked(obj);
302 
303 	p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
304 	if (!IS_ERR(p))
305 		pin_obj_locked(obj);
306 
307 	return p;
308 }
309 
msm_gem_unpin_pages_locked(struct drm_gem_object * obj)310 void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
311 {
312 	msm_gem_assert_locked(obj);
313 
314 	msm_gem_unpin_locked(obj);
315 }
316 
msm_gem_pgprot(struct msm_gem_object * msm_obj,pgprot_t prot)317 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
318 {
319 	if (msm_obj->flags & MSM_BO_WC)
320 		return pgprot_writecombine(prot);
321 	return prot;
322 }
323 
msm_gem_fault(struct vm_fault * vmf)324 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
325 {
326 	struct vm_area_struct *vma = vmf->vma;
327 	struct drm_gem_object *obj = vma->vm_private_data;
328 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
329 	struct page **pages;
330 	unsigned long pfn;
331 	pgoff_t pgoff;
332 	int err;
333 	vm_fault_t ret;
334 
335 	/*
336 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
337 	 * a reference on obj. So, we dont need to hold one here.
338 	 */
339 	err = msm_gem_lock_interruptible(obj);
340 	if (err) {
341 		ret = VM_FAULT_NOPAGE;
342 		goto out;
343 	}
344 
345 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
346 		msm_gem_unlock(obj);
347 		return VM_FAULT_SIGBUS;
348 	}
349 
350 	/* make sure we have pages attached now */
351 	pages = get_pages(obj);
352 	if (IS_ERR(pages)) {
353 		ret = vmf_error(PTR_ERR(pages));
354 		goto out_unlock;
355 	}
356 
357 	/* We don't use vmf->pgoff since that has the fake offset: */
358 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
359 
360 	pfn = page_to_pfn(pages[pgoff]);
361 
362 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
363 			pfn, pfn << PAGE_SHIFT);
364 
365 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
366 
367 out_unlock:
368 	msm_gem_unlock(obj);
369 out:
370 	return ret;
371 }
372 
373 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)374 static uint64_t mmap_offset(struct drm_gem_object *obj)
375 {
376 	struct drm_device *dev = obj->dev;
377 	int ret;
378 
379 	msm_gem_assert_locked(obj);
380 
381 	/* Make it mmapable */
382 	ret = drm_gem_create_mmap_offset(obj);
383 
384 	if (ret) {
385 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
386 		return 0;
387 	}
388 
389 	return drm_vma_node_offset_addr(&obj->vma_node);
390 }
391 
msm_gem_mmap_offset(struct drm_gem_object * obj)392 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
393 {
394 	uint64_t offset;
395 
396 	msm_gem_lock(obj);
397 	offset = mmap_offset(obj);
398 	msm_gem_unlock(obj);
399 	return offset;
400 }
401 
lookup_vma(struct drm_gem_object * obj,struct drm_gpuvm * vm)402 static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
403 				    struct drm_gpuvm *vm)
404 {
405 	struct drm_gpuvm_bo *vm_bo;
406 
407 	msm_gem_assert_locked(obj);
408 
409 	drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
410 		struct drm_gpuva *vma;
411 
412 		drm_gpuvm_bo_for_each_va (vma, vm_bo) {
413 			if (vma->vm == vm) {
414 				/* lookup_vma() should only be used in paths
415 				 * with at most one vma per vm
416 				 */
417 				GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
418 
419 				return vma;
420 			}
421 		}
422 	}
423 
424 	return NULL;
425 }
426 
427 /*
428  * If close is true, this also closes the VMA (releasing the allocated
429  * iova range) in addition to removing the iommu mapping.  In the eviction
430  * case (!close), we keep the iova allocated, but only remove the iommu
431  * mapping.
432  */
433 static void
put_iova_spaces(struct drm_gem_object * obj,struct drm_gpuvm * vm,bool close,const char * reason)434 put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
435 		bool close, const char *reason)
436 {
437 	struct drm_gpuvm_bo *vm_bo, *tmp;
438 
439 	msm_gem_assert_locked(obj);
440 
441 	drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
442 		struct drm_gpuva *vma, *vmatmp;
443 
444 		if (vm && vm_bo->vm != vm)
445 			continue;
446 
447 		drm_gpuvm_bo_get(vm_bo);
448 
449 		drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
450 			msm_gem_vma_unmap(vma, reason);
451 			if (close)
452 				msm_gem_vma_close(vma);
453 		}
454 
455 		drm_gpuvm_bo_put(vm_bo);
456 	}
457 }
458 
get_vma_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm,u64 range_start,u64 range_end)459 static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
460 					struct drm_gpuvm *vm, u64 range_start,
461 					u64 range_end)
462 {
463 	struct drm_gpuva *vma;
464 
465 	msm_gem_assert_locked(obj);
466 
467 	vma = lookup_vma(obj, vm);
468 
469 	if (!vma) {
470 		vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
471 	} else {
472 		GEM_WARN_ON(vma->va.addr < range_start);
473 		GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
474 	}
475 
476 	return vma;
477 }
478 
msm_gem_prot(struct drm_gem_object * obj)479 int msm_gem_prot(struct drm_gem_object *obj)
480 {
481 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
482 	int prot = IOMMU_READ;
483 
484 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
485 		prot |= IOMMU_WRITE;
486 
487 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
488 		prot |= IOMMU_PRIV;
489 
490 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
491 		prot |= IOMMU_CACHE;
492 
493 	return prot;
494 }
495 
msm_gem_pin_vma_locked(struct drm_gem_object * obj,struct drm_gpuva * vma)496 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
497 {
498 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
499 	struct page **pages;
500 	int prot = msm_gem_prot(obj);
501 
502 	msm_gem_assert_locked(obj);
503 
504 	pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
505 	if (IS_ERR(pages))
506 		return PTR_ERR(pages);
507 
508 	return msm_gem_vma_map(vma, prot, msm_obj->sgt);
509 }
510 
msm_gem_unpin_locked(struct drm_gem_object * obj)511 void msm_gem_unpin_locked(struct drm_gem_object *obj)
512 {
513 	struct msm_drm_private *priv = obj->dev->dev_private;
514 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
515 
516 	msm_gem_assert_locked(obj);
517 
518 	mutex_lock(&priv->lru.lock);
519 	msm_obj->pin_count--;
520 	GEM_WARN_ON(msm_obj->pin_count < 0);
521 	update_lru_locked(obj);
522 	mutex_unlock(&priv->lru.lock);
523 }
524 
525 /* Special unpin path for use in fence-signaling path, avoiding the need
526  * to hold the obj lock by only depending on things that a protected by
527  * the LRU lock.  In particular we know that that we already have backing
528  * and and that the object's dma_resv has the fence for the current
529  * submit/job which will prevent us racing against page eviction.
530  */
msm_gem_unpin_active(struct drm_gem_object * obj)531 void msm_gem_unpin_active(struct drm_gem_object *obj)
532 {
533 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
534 
535 	msm_obj->pin_count--;
536 	GEM_WARN_ON(msm_obj->pin_count < 0);
537 	update_lru_active(obj);
538 }
539 
msm_gem_get_vma_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm)540 struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
541 					 struct drm_gpuvm *vm)
542 {
543 	return get_vma_locked(obj, vm, 0, U64_MAX);
544 }
545 
get_and_pin_iova_range_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova,u64 range_start,u64 range_end)546 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
547 					 struct drm_gpuvm *vm, uint64_t *iova,
548 					 u64 range_start, u64 range_end)
549 {
550 	struct drm_gpuva *vma;
551 	int ret;
552 
553 	msm_gem_assert_locked(obj);
554 
555 	if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
556 		return -EINVAL;
557 
558 	vma = get_vma_locked(obj, vm, range_start, range_end);
559 	if (IS_ERR(vma))
560 		return PTR_ERR(vma);
561 
562 	ret = msm_gem_pin_vma_locked(obj, vma);
563 	if (!ret) {
564 		*iova = vma->va.addr;
565 		pin_obj_locked(obj);
566 	}
567 
568 	return ret;
569 }
570 
571 /*
572  * get iova and pin it. Should have a matching put
573  * limits iova to specified range (in pages)
574  */
msm_gem_get_and_pin_iova_range(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova,u64 range_start,u64 range_end)575 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
576 				   struct drm_gpuvm *vm, uint64_t *iova,
577 				   u64 range_start, u64 range_end)
578 {
579 	struct drm_exec exec;
580 	int ret;
581 
582 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
583 	ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
584 	drm_exec_fini(&exec);     /* drop locks */
585 
586 	return ret;
587 }
588 
589 /* get iova and pin it. Should have a matching put */
msm_gem_get_and_pin_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova)590 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
591 			     uint64_t *iova)
592 {
593 	return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
594 }
595 
596 /*
597  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
598  * valid for the life of the object
599  */
msm_gem_get_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova)600 int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
601 		     uint64_t *iova)
602 {
603 	struct drm_gpuva *vma;
604 	struct drm_exec exec;
605 	int ret = 0;
606 
607 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
608 	vma = get_vma_locked(obj, vm, 0, U64_MAX);
609 	if (IS_ERR(vma)) {
610 		ret = PTR_ERR(vma);
611 	} else {
612 		*iova = vma->va.addr;
613 	}
614 	drm_exec_fini(&exec);     /* drop locks */
615 
616 	return ret;
617 }
618 
clear_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm)619 static int clear_iova(struct drm_gem_object *obj,
620 		      struct drm_gpuvm *vm)
621 {
622 	struct drm_gpuva *vma = lookup_vma(obj, vm);
623 
624 	if (!vma)
625 		return 0;
626 
627 	msm_gem_vma_unmap(vma, NULL);
628 	msm_gem_vma_close(vma);
629 
630 	return 0;
631 }
632 
633 /*
634  * Get the requested iova but don't pin it.  Fails if the requested iova is
635  * not available.  Doesn't need a put because iovas are currently valid for
636  * the life of the object.
637  *
638  * Setting an iova of zero will clear the vma.
639  */
msm_gem_set_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t iova)640 int msm_gem_set_iova(struct drm_gem_object *obj,
641 		     struct drm_gpuvm *vm, uint64_t iova)
642 {
643 	struct drm_exec exec;
644 	int ret = 0;
645 
646 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
647 	if (!iova) {
648 		ret = clear_iova(obj, vm);
649 	} else {
650 		struct drm_gpuva *vma;
651 		vma = get_vma_locked(obj, vm, iova, iova + obj->size);
652 		if (IS_ERR(vma)) {
653 			ret = PTR_ERR(vma);
654 		} else if (GEM_WARN_ON(vma->va.addr != iova)) {
655 			clear_iova(obj, vm);
656 			ret = -EBUSY;
657 		}
658 	}
659 	drm_exec_fini(&exec);     /* drop locks */
660 
661 	return ret;
662 }
663 
is_kms_vm(struct drm_gpuvm * vm)664 static bool is_kms_vm(struct drm_gpuvm *vm)
665 {
666 	struct msm_drm_private *priv = vm->drm->dev_private;
667 
668 	return priv->kms && (priv->kms->vm == vm);
669 }
670 
671 /*
672  * Unpin a iova by updating the reference counts. The memory isn't actually
673  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
674  * to get rid of it
675  */
msm_gem_unpin_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm)676 void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
677 {
678 	struct drm_gpuva *vma;
679 	struct drm_exec exec;
680 
681 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
682 	vma = lookup_vma(obj, vm);
683 	if (vma) {
684 		msm_gem_unpin_locked(obj);
685 	}
686 	if (!is_kms_vm(vm))
687 		put_iova_spaces(obj, vm, true, "close");
688 	drm_exec_fini(&exec);     /* drop locks */
689 }
690 
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)691 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
692 		struct drm_mode_create_dumb *args)
693 {
694 	args->pitch = align_pitch(args->width, args->bpp);
695 	args->size  = PAGE_ALIGN(args->pitch * args->height);
696 	return msm_gem_new_handle(dev, file, args->size,
697 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
698 }
699 
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)700 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
701 		uint32_t handle, uint64_t *offset)
702 {
703 	struct drm_gem_object *obj;
704 	int ret = 0;
705 
706 	/* GEM does all our handle to object mapping */
707 	obj = drm_gem_object_lookup(file, handle);
708 	if (obj == NULL) {
709 		ret = -ENOENT;
710 		goto fail;
711 	}
712 
713 	*offset = msm_gem_mmap_offset(obj);
714 
715 	drm_gem_object_put(obj);
716 
717 fail:
718 	return ret;
719 }
720 
get_vaddr(struct drm_gem_object * obj,unsigned madv)721 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
722 {
723 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
724 	struct page **pages;
725 	int ret = 0;
726 
727 	msm_gem_assert_locked(obj);
728 
729 	if (drm_gem_is_imported(obj))
730 		return ERR_PTR(-ENODEV);
731 
732 	pages = msm_gem_get_pages_locked(obj, madv);
733 	if (IS_ERR(pages))
734 		return ERR_CAST(pages);
735 
736 	pin_obj_locked(obj);
737 
738 	/* increment vmap_count *before* vmap() call, so shrinker can
739 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
740 	 * This guarantees that we won't try to msm_gem_vunmap() this
741 	 * same object from within the vmap() call (while we already
742 	 * hold msm_obj lock)
743 	 */
744 	msm_obj->vmap_count++;
745 
746 	if (!msm_obj->vaddr) {
747 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
748 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
749 		if (msm_obj->vaddr == NULL) {
750 			ret = -ENOMEM;
751 			goto fail;
752 		}
753 	}
754 
755 	return msm_obj->vaddr;
756 
757 fail:
758 	msm_obj->vmap_count--;
759 	msm_gem_unpin_locked(obj);
760 	return ERR_PTR(ret);
761 }
762 
msm_gem_get_vaddr_locked(struct drm_gem_object * obj)763 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
764 {
765 	return get_vaddr(obj, MSM_MADV_WILLNEED);
766 }
767 
msm_gem_get_vaddr(struct drm_gem_object * obj)768 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
769 {
770 	void *ret;
771 
772 	msm_gem_lock(obj);
773 	ret = msm_gem_get_vaddr_locked(obj);
774 	msm_gem_unlock(obj);
775 
776 	return ret;
777 }
778 
779 /*
780  * Don't use this!  It is for the very special case of dumping
781  * submits from GPU hangs or faults, were the bo may already
782  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
783  * active list.
784  */
msm_gem_get_vaddr_active(struct drm_gem_object * obj)785 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
786 {
787 	return get_vaddr(obj, __MSM_MADV_PURGED);
788 }
789 
msm_gem_put_vaddr_locked(struct drm_gem_object * obj)790 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
791 {
792 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
793 
794 	msm_gem_assert_locked(obj);
795 	GEM_WARN_ON(msm_obj->vmap_count < 1);
796 
797 	msm_obj->vmap_count--;
798 	msm_gem_unpin_locked(obj);
799 }
800 
msm_gem_put_vaddr(struct drm_gem_object * obj)801 void msm_gem_put_vaddr(struct drm_gem_object *obj)
802 {
803 	msm_gem_lock(obj);
804 	msm_gem_put_vaddr_locked(obj);
805 	msm_gem_unlock(obj);
806 }
807 
808 /* Update madvise status, returns true if not purged, else
809  * false or -errno.
810  */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)811 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
812 {
813 	struct msm_drm_private *priv = obj->dev->dev_private;
814 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
815 
816 	msm_gem_lock(obj);
817 
818 	mutex_lock(&priv->lru.lock);
819 
820 	if (msm_obj->madv != __MSM_MADV_PURGED)
821 		msm_obj->madv = madv;
822 
823 	madv = msm_obj->madv;
824 
825 	/* If the obj is inactive, we might need to move it
826 	 * between inactive lists
827 	 */
828 	update_lru_locked(obj);
829 
830 	mutex_unlock(&priv->lru.lock);
831 
832 	msm_gem_unlock(obj);
833 
834 	return (madv != __MSM_MADV_PURGED);
835 }
836 
msm_gem_purge(struct drm_gem_object * obj)837 void msm_gem_purge(struct drm_gem_object *obj)
838 {
839 	struct drm_device *dev = obj->dev;
840 	struct msm_drm_private *priv = obj->dev->dev_private;
841 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
842 
843 	msm_gem_assert_locked(obj);
844 	GEM_WARN_ON(!is_purgeable(msm_obj));
845 
846 	/* Get rid of any iommu mapping(s): */
847 	put_iova_spaces(obj, NULL, false, "purge");
848 
849 	msm_gem_vunmap(obj);
850 
851 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
852 
853 	put_pages(obj);
854 
855 	mutex_lock(&priv->lru.lock);
856 	/* A one-way transition: */
857 	msm_obj->madv = __MSM_MADV_PURGED;
858 	mutex_unlock(&priv->lru.lock);
859 
860 	drm_gem_free_mmap_offset(obj);
861 
862 	/* Our goal here is to return as much of the memory as
863 	 * is possible back to the system as we are called from OOM.
864 	 * To do this we must instruct the shmfs to drop all of its
865 	 * backing pages, *now*.
866 	 */
867 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
868 
869 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
870 			0, (loff_t)-1);
871 }
872 
873 /*
874  * Unpin the backing pages and make them available to be swapped out.
875  */
msm_gem_evict(struct drm_gem_object * obj)876 void msm_gem_evict(struct drm_gem_object *obj)
877 {
878 	struct drm_device *dev = obj->dev;
879 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
880 
881 	msm_gem_assert_locked(obj);
882 	GEM_WARN_ON(is_unevictable(msm_obj));
883 
884 	/* Get rid of any iommu mapping(s): */
885 	put_iova_spaces(obj, NULL, false, "evict");
886 
887 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
888 
889 	put_pages(obj);
890 }
891 
msm_gem_vunmap(struct drm_gem_object * obj)892 void msm_gem_vunmap(struct drm_gem_object *obj)
893 {
894 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
895 
896 	msm_gem_assert_locked(obj);
897 
898 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
899 		return;
900 
901 	vunmap(msm_obj->vaddr);
902 	msm_obj->vaddr = NULL;
903 }
904 
msm_gem_active(struct drm_gem_object * obj)905 bool msm_gem_active(struct drm_gem_object *obj)
906 {
907 	msm_gem_assert_locked(obj);
908 
909 	if (to_msm_bo(obj)->pin_count)
910 		return true;
911 
912 	return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
913 }
914 
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)915 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
916 {
917 	bool write = !!(op & MSM_PREP_WRITE);
918 	unsigned long remain =
919 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
920 	long ret;
921 
922 	if (op & MSM_PREP_BOOST) {
923 		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
924 				      ktime_get());
925 	}
926 
927 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
928 				    true,  remain);
929 	if (ret == 0)
930 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
931 	else if (ret < 0)
932 		return ret;
933 
934 	/* TODO cache maintenance */
935 
936 	return 0;
937 }
938 
msm_gem_cpu_fini(struct drm_gem_object * obj)939 int msm_gem_cpu_fini(struct drm_gem_object *obj)
940 {
941 	/* TODO cache maintenance */
942 	return 0;
943 }
944 
945 #ifdef CONFIG_DEBUG_FS
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m,struct msm_gem_stats * stats)946 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
947 		struct msm_gem_stats *stats)
948 {
949 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
950 	struct dma_resv *robj = obj->resv;
951 	uint64_t off = drm_vma_node_start(&obj->vma_node);
952 	const char *madv;
953 
954 	if (!msm_gem_trylock(obj))
955 		return;
956 
957 	stats->all.count++;
958 	stats->all.size += obj->size;
959 
960 	if (msm_gem_active(obj)) {
961 		stats->active.count++;
962 		stats->active.size += obj->size;
963 	}
964 
965 	if (msm_obj->pages) {
966 		stats->resident.count++;
967 		stats->resident.size += obj->size;
968 	}
969 
970 	switch (msm_obj->madv) {
971 	case __MSM_MADV_PURGED:
972 		stats->purged.count++;
973 		stats->purged.size += obj->size;
974 		madv = " purged";
975 		break;
976 	case MSM_MADV_DONTNEED:
977 		stats->purgeable.count++;
978 		stats->purgeable.size += obj->size;
979 		madv = " purgeable";
980 		break;
981 	case MSM_MADV_WILLNEED:
982 	default:
983 		madv = "";
984 		break;
985 	}
986 
987 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
988 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
989 			obj->name, kref_read(&obj->refcount),
990 			off, msm_obj->vaddr);
991 
992 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
993 
994 	if (!list_empty(&obj->gpuva.list)) {
995 		struct drm_gpuvm_bo *vm_bo;
996 
997 		seq_puts(m, "      vmas:");
998 
999 		drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
1000 			struct drm_gpuva *vma;
1001 
1002 			drm_gpuvm_bo_for_each_va (vma, vm_bo) {
1003 				const char *name, *comm;
1004 				struct msm_gem_vm *vm = to_msm_vm(vma->vm);
1005 				struct task_struct *task =
1006 					get_pid_task(vm->pid, PIDTYPE_PID);
1007 				if (task) {
1008 					comm = kstrdup(task->comm, GFP_KERNEL);
1009 					put_task_struct(task);
1010 				} else {
1011 					comm = NULL;
1012 				}
1013 				name = vm->base.name;
1014 
1015 				seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
1016 					   name, comm ? ":" : "", comm ? comm : "",
1017 					   vma->vm, vma->va.addr,
1018 					   to_msm_vma(vma)->mapped ? "" : "un");
1019 				kfree(comm);
1020 			}
1021 		}
1022 
1023 		seq_puts(m, "\n");
1024 	}
1025 
1026 	dma_resv_describe(robj, m);
1027 	msm_gem_unlock(obj);
1028 }
1029 
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)1030 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1031 {
1032 	struct msm_gem_stats stats = {};
1033 	struct msm_gem_object *msm_obj;
1034 
1035 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1036 	list_for_each_entry(msm_obj, list, node) {
1037 		struct drm_gem_object *obj = &msm_obj->base;
1038 		seq_puts(m, "   ");
1039 		msm_gem_describe(obj, m, &stats);
1040 	}
1041 
1042 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1043 			stats.all.count, stats.all.size);
1044 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1045 			stats.active.count, stats.active.size);
1046 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1047 			stats.resident.count, stats.resident.size);
1048 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1049 			stats.purgeable.count, stats.purgeable.size);
1050 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1051 			stats.purged.count, stats.purged.size);
1052 }
1053 #endif
1054 
1055 /* don't call directly!  Use drm_gem_object_put() */
msm_gem_free_object(struct drm_gem_object * obj)1056 static void msm_gem_free_object(struct drm_gem_object *obj)
1057 {
1058 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1059 	struct drm_device *dev = obj->dev;
1060 	struct msm_drm_private *priv = dev->dev_private;
1061 	struct drm_exec exec;
1062 
1063 	mutex_lock(&priv->obj_lock);
1064 	list_del(&msm_obj->node);
1065 	mutex_unlock(&priv->obj_lock);
1066 
1067 	/*
1068 	 * We need to lock any VMs the object is still attached to, but not
1069 	 * the object itself (see explaination in msm_gem_assert_locked()),
1070 	 * so just open-code this special case.
1071 	 *
1072 	 * Note that we skip the dance if we aren't attached to any VM.  This
1073 	 * is load bearing.  The driver needs to support two usage models:
1074 	 *
1075 	 * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
1076 	 *    implicitly torn down when the object is freed, the VMA's do
1077 	 *    not hold a hard reference to the BO.
1078 	 *
1079 	 * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
1080 	 *    BO.  This can be dropped when the VM is closed and it's associated
1081 	 *    VMAs are torn down.  (See msm_gem_vm_close()).
1082 	 *
1083 	 * In the latter case the last reference to a BO can be dropped while
1084 	 * we already have the VM locked.  It would have already been removed
1085 	 * from the gpuva list, but lockdep doesn't know that.  Or understand
1086 	 * the differences between the two usage models.
1087 	 */
1088 	if (!list_empty(&obj->gpuva.list)) {
1089 		drm_exec_init(&exec, 0, 0);
1090 		drm_exec_until_all_locked (&exec) {
1091 			struct drm_gpuvm_bo *vm_bo;
1092 			drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
1093 				drm_exec_lock_obj(&exec,
1094 						  drm_gpuvm_resv_obj(vm_bo->vm));
1095 				drm_exec_retry_on_contention(&exec);
1096 			}
1097 		}
1098 		put_iova_spaces(obj, NULL, true, "free");
1099 		drm_exec_fini(&exec);     /* drop locks */
1100 	}
1101 
1102 	if (drm_gem_is_imported(obj)) {
1103 		GEM_WARN_ON(msm_obj->vaddr);
1104 
1105 		/* Don't drop the pages for imported dmabuf, as they are not
1106 		 * ours, just free the array we allocated:
1107 		 */
1108 		kvfree(msm_obj->pages);
1109 
1110 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1111 	} else {
1112 		msm_gem_vunmap(obj);
1113 		put_pages(obj);
1114 	}
1115 
1116 	if (msm_obj->flags & MSM_BO_NO_SHARE) {
1117 		struct drm_gem_object *r_obj =
1118 			container_of(obj->resv, struct drm_gem_object, _resv);
1119 
1120 		/* Drop reference we hold to shared resv obj: */
1121 		drm_gem_object_put(r_obj);
1122 	}
1123 
1124 	drm_gem_object_release(obj);
1125 
1126 	kfree(msm_obj->metadata);
1127 	kfree(msm_obj);
1128 }
1129 
msm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)1130 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1131 {
1132 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1133 
1134 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1135 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1136 
1137 	return 0;
1138 }
1139 
1140 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle,char * name)1141 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1142 		uint32_t size, uint32_t flags, uint32_t *handle,
1143 		char *name)
1144 {
1145 	struct drm_gem_object *obj;
1146 	int ret;
1147 
1148 	obj = msm_gem_new(dev, size, flags);
1149 
1150 	if (IS_ERR(obj))
1151 		return PTR_ERR(obj);
1152 
1153 	if (name)
1154 		msm_gem_object_set_name(obj, "%s", name);
1155 
1156 	if (flags & MSM_BO_NO_SHARE) {
1157 		struct msm_context *ctx = file->driver_priv;
1158 		struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
1159 
1160 		drm_gem_object_get(r_obj);
1161 
1162 		obj->resv = r_obj->resv;
1163 	}
1164 
1165 	ret = drm_gem_handle_create(file, obj, handle);
1166 
1167 	/* drop reference from allocate - handle holds it now */
1168 	drm_gem_object_put(obj);
1169 
1170 	return ret;
1171 }
1172 
msm_gem_status(struct drm_gem_object * obj)1173 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1174 {
1175 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1176 	enum drm_gem_object_status status = 0;
1177 
1178 	if (msm_obj->pages)
1179 		status |= DRM_GEM_OBJECT_RESIDENT;
1180 
1181 	if (msm_obj->madv == MSM_MADV_DONTNEED)
1182 		status |= DRM_GEM_OBJECT_PURGEABLE;
1183 
1184 	return status;
1185 }
1186 
1187 static const struct vm_operations_struct vm_ops = {
1188 	.fault = msm_gem_fault,
1189 	.open = drm_gem_vm_open,
1190 	.close = drm_gem_vm_close,
1191 };
1192 
1193 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1194 	.free = msm_gem_free_object,
1195 	.open = msm_gem_open,
1196 	.close = msm_gem_close,
1197 	.export = msm_gem_prime_export,
1198 	.pin = msm_gem_prime_pin,
1199 	.unpin = msm_gem_prime_unpin,
1200 	.get_sg_table = msm_gem_prime_get_sg_table,
1201 	.vmap = msm_gem_prime_vmap,
1202 	.vunmap = msm_gem_prime_vunmap,
1203 	.mmap = msm_gem_object_mmap,
1204 	.status = msm_gem_status,
1205 	.vm_ops = &vm_ops,
1206 };
1207 
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gem_object ** obj)1208 static int msm_gem_new_impl(struct drm_device *dev,
1209 		uint32_t size, uint32_t flags,
1210 		struct drm_gem_object **obj)
1211 {
1212 	struct msm_drm_private *priv = dev->dev_private;
1213 	struct msm_gem_object *msm_obj;
1214 
1215 	switch (flags & MSM_BO_CACHE_MASK) {
1216 	case MSM_BO_CACHED:
1217 	case MSM_BO_WC:
1218 		break;
1219 	case MSM_BO_CACHED_COHERENT:
1220 		if (priv->has_cached_coherent)
1221 			break;
1222 		fallthrough;
1223 	default:
1224 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1225 				(flags & MSM_BO_CACHE_MASK));
1226 		return -EINVAL;
1227 	}
1228 
1229 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1230 	if (!msm_obj)
1231 		return -ENOMEM;
1232 
1233 	msm_obj->flags = flags;
1234 	msm_obj->madv = MSM_MADV_WILLNEED;
1235 
1236 	INIT_LIST_HEAD(&msm_obj->node);
1237 
1238 	*obj = &msm_obj->base;
1239 	(*obj)->funcs = &msm_gem_object_funcs;
1240 
1241 	return 0;
1242 }
1243 
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)1244 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1245 {
1246 	struct msm_drm_private *priv = dev->dev_private;
1247 	struct msm_gem_object *msm_obj;
1248 	struct drm_gem_object *obj = NULL;
1249 	int ret;
1250 
1251 	size = PAGE_ALIGN(size);
1252 
1253 	/* Disallow zero sized objects as they make the underlying
1254 	 * infrastructure grumpy
1255 	 */
1256 	if (size == 0)
1257 		return ERR_PTR(-EINVAL);
1258 
1259 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1260 	if (ret)
1261 		return ERR_PTR(ret);
1262 
1263 	msm_obj = to_msm_bo(obj);
1264 
1265 	ret = drm_gem_object_init(dev, obj, size);
1266 	if (ret)
1267 		goto fail;
1268 	/*
1269 	 * Our buffers are kept pinned, so allocating them from the
1270 	 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1271 	 * See comments above new_inode() why this is required _and_
1272 	 * expected if you're going to pin these pages.
1273 	 */
1274 	mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1275 
1276 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1277 
1278 	mutex_lock(&priv->obj_lock);
1279 	list_add_tail(&msm_obj->node, &priv->objects);
1280 	mutex_unlock(&priv->obj_lock);
1281 
1282 	ret = drm_gem_create_mmap_offset(obj);
1283 	if (ret)
1284 		goto fail;
1285 
1286 	return obj;
1287 
1288 fail:
1289 	drm_gem_object_put(obj);
1290 	return ERR_PTR(ret);
1291 }
1292 
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)1293 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1294 		struct dma_buf *dmabuf, struct sg_table *sgt)
1295 {
1296 	struct msm_drm_private *priv = dev->dev_private;
1297 	struct msm_gem_object *msm_obj;
1298 	struct drm_gem_object *obj;
1299 	uint32_t size;
1300 	int ret, npages;
1301 
1302 	size = PAGE_ALIGN(dmabuf->size);
1303 
1304 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1305 	if (ret)
1306 		return ERR_PTR(ret);
1307 
1308 	drm_gem_private_object_init(dev, obj, size);
1309 
1310 	npages = size / PAGE_SIZE;
1311 
1312 	msm_obj = to_msm_bo(obj);
1313 	msm_gem_lock(obj);
1314 	msm_obj->sgt = sgt;
1315 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1316 	if (!msm_obj->pages) {
1317 		msm_gem_unlock(obj);
1318 		ret = -ENOMEM;
1319 		goto fail;
1320 	}
1321 
1322 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1323 	if (ret) {
1324 		msm_gem_unlock(obj);
1325 		goto fail;
1326 	}
1327 
1328 	msm_gem_unlock(obj);
1329 
1330 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1331 
1332 	mutex_lock(&priv->obj_lock);
1333 	list_add_tail(&msm_obj->node, &priv->objects);
1334 	mutex_unlock(&priv->obj_lock);
1335 
1336 	ret = drm_gem_create_mmap_offset(obj);
1337 	if (ret)
1338 		goto fail;
1339 
1340 	return obj;
1341 
1342 fail:
1343 	drm_gem_object_put(obj);
1344 	return ERR_PTR(ret);
1345 }
1346 
msm_gem_kernel_new(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gpuvm * vm,struct drm_gem_object ** bo,uint64_t * iova)1347 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
1348 			 struct drm_gpuvm *vm, struct drm_gem_object **bo,
1349 			 uint64_t *iova)
1350 {
1351 	void *vaddr;
1352 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1353 	int ret;
1354 
1355 	if (IS_ERR(obj))
1356 		return ERR_CAST(obj);
1357 
1358 	if (iova) {
1359 		ret = msm_gem_get_and_pin_iova(obj, vm, iova);
1360 		if (ret)
1361 			goto err;
1362 	}
1363 
1364 	vaddr = msm_gem_get_vaddr(obj);
1365 	if (IS_ERR(vaddr)) {
1366 		msm_gem_unpin_iova(obj, vm);
1367 		ret = PTR_ERR(vaddr);
1368 		goto err;
1369 	}
1370 
1371 	if (bo)
1372 		*bo = obj;
1373 
1374 	return vaddr;
1375 err:
1376 	drm_gem_object_put(obj);
1377 
1378 	return ERR_PTR(ret);
1379 
1380 }
1381 
msm_gem_kernel_put(struct drm_gem_object * bo,struct drm_gpuvm * vm)1382 void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
1383 {
1384 	if (IS_ERR_OR_NULL(bo))
1385 		return;
1386 
1387 	msm_gem_put_vaddr(bo);
1388 	msm_gem_unpin_iova(bo, vm);
1389 	drm_gem_object_put(bo);
1390 }
1391 
msm_gem_object_set_name(struct drm_gem_object * bo,const char * fmt,...)1392 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1393 {
1394 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1395 	va_list ap;
1396 
1397 	if (!fmt)
1398 		return;
1399 
1400 	va_start(ap, fmt);
1401 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1402 	va_end(ap);
1403 }
1404