1 /*
2  * drivers/staging/omapdrm/omap_gem.c
3  *
4  * Copyright (C) 2011 Texas Instruments
5  * Author: Rob Clark <rob.clark@linaro.org>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published by
9  * the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
23 
24 #include "omap_drv.h"
25 #include "omap_dmm_tiler.h"
26 
27 /* remove these once drm core helpers are merged */
28 struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
29 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
30 		bool dirty, bool accessed);
31 int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
32 
33 /*
34  * GEM buffer object implementation.
35  */
36 
37 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
38 
39 /* note: we use upper 8 bits of flags for driver-internal flags: */
40 #define OMAP_BO_DMA			0x01000000	/* actually is physically contiguous */
41 #define OMAP_BO_EXT_SYNC	0x02000000	/* externally allocated sync object */
42 #define OMAP_BO_EXT_MEM		0x04000000	/* externally allocated memory */
43 
44 
45 struct omap_gem_object {
46 	struct drm_gem_object base;
47 
48 	uint32_t flags;
49 
50 	/** width/height for tiled formats (rounded up to slot boundaries) */
51 	uint16_t width, height;
52 
53 	/** roll applied when mapping to DMM */
54 	uint32_t roll;
55 
56 	/**
57 	 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
58 	 * is set and the paddr is valid.  Also if the buffer is remapped in
59 	 * TILER and paddr_cnt > 0, then paddr is valid.  But if you are using
60 	 * the physical address and OMAP_BO_DMA is not set, then you should
61 	 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
62 	 * not removed from under your feet.
63 	 *
64 	 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
65 	 * buffer is requested, but doesn't mean that it is.  Use the
66 	 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
67 	 * physical address.
68 	 */
69 	dma_addr_t paddr;
70 
71 	/**
72 	 * # of users of paddr
73 	 */
74 	uint32_t paddr_cnt;
75 
76 	/**
77 	 * tiler block used when buffer is remapped in DMM/TILER.
78 	 */
79 	struct tiler_block *block;
80 
81 	/**
82 	 * Array of backing pages, if allocated.  Note that pages are never
83 	 * allocated for buffers originally allocated from contiguous memory
84 	 */
85 	struct page **pages;
86 
87 	/** addresses corresponding to pages in above array */
88 	dma_addr_t *addrs;
89 
90 	/**
91 	 * Virtual address, if mapped.
92 	 */
93 	void *vaddr;
94 
95 	/**
96 	 * sync-object allocated on demand (if needed)
97 	 *
98 	 * Per-buffer sync-object for tracking pending and completed hw/dma
99 	 * read and write operations.  The layout in memory is dictated by
100 	 * the SGX firmware, which uses this information to stall the command
101 	 * stream if a surface is not ready yet.
102 	 *
103 	 * Note that when buffer is used by SGX, the sync-object needs to be
104 	 * allocated from a special heap of sync-objects.  This way many sync
105 	 * objects can be packed in a page, and not waste GPU virtual address
106 	 * space.  Because of this we have to have a omap_gem_set_sync_object()
107 	 * API to allow replacement of the syncobj after it has (potentially)
108 	 * already been allocated.  A bit ugly but I haven't thought of a
109 	 * better alternative.
110 	 */
111 	struct {
112 		uint32_t write_pending;
113 		uint32_t write_complete;
114 		uint32_t read_pending;
115 		uint32_t read_complete;
116 	} *sync;
117 };
118 
119 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
120 static uint64_t mmap_offset(struct drm_gem_object *obj);
121 
122 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
123  * not necessarily pinned in TILER all the time, and (b) when they are
124  * they are not necessarily page aligned, we reserve one or more small
125  * regions in each of the 2d containers to use as a user-GART where we
126  * can create a second page-aligned mapping of parts of the buffer
127  * being accessed from userspace.
128  *
129  * Note that we could optimize slightly when we know that multiple
130  * tiler containers are backed by the same PAT.. but I'll leave that
131  * for later..
132  */
133 #define NUM_USERGART_ENTRIES 2
134 struct usergart_entry {
135 	struct tiler_block *block;	/* the reserved tiler block */
136 	dma_addr_t paddr;
137 	struct drm_gem_object *obj;	/* the current pinned obj */
138 	pgoff_t obj_pgoff;		/* page offset of obj currently
139 					   mapped in */
140 };
141 static struct {
142 	struct usergart_entry entry[NUM_USERGART_ENTRIES];
143 	int height;				/* height in rows */
144 	int height_shift;		/* ilog2(height in rows) */
145 	int slot_shift;			/* ilog2(width per slot) */
146 	int stride_pfn;			/* stride in pages */
147 	int last;				/* index of last used entry */
148 } *usergart;
149 
evict_entry(struct drm_gem_object * obj,enum tiler_fmt fmt,struct usergart_entry * entry)150 static void evict_entry(struct drm_gem_object *obj,
151 		enum tiler_fmt fmt, struct usergart_entry *entry)
152 {
153 	if (obj->dev->dev_mapping) {
154 		size_t size = PAGE_SIZE * usergart[fmt].height;
155 		loff_t off = mmap_offset(obj) +
156 				(entry->obj_pgoff << PAGE_SHIFT);
157 		unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
158 	}
159 
160 	entry->obj = NULL;
161 }
162 
163 /* Evict a buffer from usergart, if it is mapped there */
evict(struct drm_gem_object * obj)164 static void evict(struct drm_gem_object *obj)
165 {
166 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
167 
168 	if (omap_obj->flags & OMAP_BO_TILED) {
169 		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
170 		int i;
171 
172 		if (!usergart)
173 			return;
174 
175 		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
176 			struct usergart_entry *entry = &usergart[fmt].entry[i];
177 			if (entry->obj == obj)
178 				evict_entry(obj, fmt, entry);
179 		}
180 	}
181 }
182 
183 /* GEM objects can either be allocated from contiguous memory (in which
184  * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL).  But non
185  * contiguous buffers can be remapped in TILER/DMM if they need to be
186  * contiguous... but we don't do this all the time to reduce pressure
187  * on TILER/DMM space when we know at allocation time that the buffer
188  * will need to be scanned out.
189  */
is_shmem(struct drm_gem_object * obj)190 static inline bool is_shmem(struct drm_gem_object *obj)
191 {
192 	return obj->filp != NULL;
193 }
194 
195 static DEFINE_SPINLOCK(sync_lock);
196 
197 /** ensure backing pages are allocated */
omap_gem_attach_pages(struct drm_gem_object * obj)198 static int omap_gem_attach_pages(struct drm_gem_object *obj)
199 {
200 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
201 	struct page **pages;
202 
203 	WARN_ON(omap_obj->pages);
204 
205 	/* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
206 	 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
207 	 * we actually want CMA memory for it all anyways..
208 	 */
209 	pages = _drm_gem_get_pages(obj, GFP_KERNEL);
210 	if (IS_ERR(pages)) {
211 		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
212 		return PTR_ERR(pages);
213 	}
214 
215 	/* for non-cached buffers, ensure the new pages are clean because
216 	 * DSS, GPU, etc. are not cache coherent:
217 	 */
218 	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
219 		int i, npages = obj->size >> PAGE_SHIFT;
220 		dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
221 		for (i = 0; i < npages; i++) {
222 			addrs[i] = dma_map_page(obj->dev->dev, pages[i],
223 					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
224 		}
225 		omap_obj->addrs = addrs;
226 	}
227 
228 	omap_obj->pages = pages;
229 	return 0;
230 }
231 
232 /** release backing pages */
omap_gem_detach_pages(struct drm_gem_object * obj)233 static void omap_gem_detach_pages(struct drm_gem_object *obj)
234 {
235 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
236 
237 	/* for non-cached buffers, ensure the new pages are clean because
238 	 * DSS, GPU, etc. are not cache coherent:
239 	 */
240 	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
241 		int i, npages = obj->size >> PAGE_SHIFT;
242 		for (i = 0; i < npages; i++) {
243 			dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
244 					PAGE_SIZE, DMA_BIDIRECTIONAL);
245 		}
246 		kfree(omap_obj->addrs);
247 		omap_obj->addrs = NULL;
248 	}
249 
250 	_drm_gem_put_pages(obj, omap_obj->pages, true, false);
251 	omap_obj->pages = NULL;
252 }
253 
254 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)255 static uint64_t mmap_offset(struct drm_gem_object *obj)
256 {
257 	if (!obj->map_list.map) {
258 		/* Make it mmapable */
259 		size_t size = omap_gem_mmap_size(obj);
260 		int ret = _drm_gem_create_mmap_offset_size(obj, size);
261 
262 		if (ret) {
263 			dev_err(obj->dev->dev, "could not allocate mmap offset");
264 			return 0;
265 		}
266 	}
267 
268 	return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
269 }
270 
omap_gem_mmap_offset(struct drm_gem_object * obj)271 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
272 {
273 	uint64_t offset;
274 	mutex_lock(&obj->dev->struct_mutex);
275 	offset = mmap_offset(obj);
276 	mutex_unlock(&obj->dev->struct_mutex);
277 	return offset;
278 }
279 
280 /** get mmap size */
omap_gem_mmap_size(struct drm_gem_object * obj)281 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
282 {
283 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
284 	size_t size = obj->size;
285 
286 	if (omap_obj->flags & OMAP_BO_TILED) {
287 		/* for tiled buffers, the virtual size has stride rounded up
288 		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
289 		 * 32kb later!).  But we don't back the entire buffer with
290 		 * pages, only the valid picture part.. so need to adjust for
291 		 * this in the size used to mmap and generate mmap offset
292 		 */
293 		size = tiler_vsize(gem2fmt(omap_obj->flags),
294 				omap_obj->width, omap_obj->height);
295 	}
296 
297 	return size;
298 }
299 
300 
301 /* Normal handling for the case of faulting in non-tiled buffers */
fault_1d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)302 static int fault_1d(struct drm_gem_object *obj,
303 		struct vm_area_struct *vma, struct vm_fault *vmf)
304 {
305 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
306 	unsigned long pfn;
307 	pgoff_t pgoff;
308 
309 	/* We don't use vmf->pgoff since that has the fake offset: */
310 	pgoff = ((unsigned long)vmf->virtual_address -
311 			vma->vm_start) >> PAGE_SHIFT;
312 
313 	if (omap_obj->pages) {
314 		pfn = page_to_pfn(omap_obj->pages[pgoff]);
315 	} else {
316 		BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
317 		pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
318 	}
319 
320 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
321 			pfn, pfn << PAGE_SHIFT);
322 
323 	return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
324 }
325 
326 /* Special handling for the case of faulting in 2d tiled buffers */
fault_2d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)327 static int fault_2d(struct drm_gem_object *obj,
328 		struct vm_area_struct *vma, struct vm_fault *vmf)
329 {
330 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
331 	struct usergart_entry *entry;
332 	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
333 	struct page *pages[64];  /* XXX is this too much to have on stack? */
334 	unsigned long pfn;
335 	pgoff_t pgoff, base_pgoff;
336 	void __user *vaddr;
337 	int i, ret, slots;
338 
339 	if (!usergart)
340 		return -EFAULT;
341 
342 	/* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
343 	 * that are wider than 4kb
344 	 */
345 
346 	/* We don't use vmf->pgoff since that has the fake offset: */
347 	pgoff = ((unsigned long)vmf->virtual_address -
348 			vma->vm_start) >> PAGE_SHIFT;
349 
350 	/* actual address we start mapping at is rounded down to previous slot
351 	 * boundary in the y direction:
352 	 */
353 	base_pgoff = round_down(pgoff, usergart[fmt].height);
354 	vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
355 	entry = &usergart[fmt].entry[usergart[fmt].last];
356 
357 	slots = omap_obj->width >> usergart[fmt].slot_shift;
358 
359 	/* evict previous buffer using this usergart entry, if any: */
360 	if (entry->obj)
361 		evict_entry(entry->obj, fmt, entry);
362 
363 	entry->obj = obj;
364 	entry->obj_pgoff = base_pgoff;
365 
366 	/* now convert base_pgoff to phys offset from virt offset:
367 	 */
368 	base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
369 
370 	/* map in pages.  Note the height of the slot is also equal to the
371 	 * number of pages that need to be mapped in to fill 4kb wide CPU page.
372 	 * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
373 	 * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
374 	 * get a dummy page mapped in.. if someone reads/writes it they will get
375 	 * random/undefined content, but at least it won't be corrupting
376 	 * whatever other random page used to be mapped in, or other undefined
377 	 * behavior.
378 	 */
379 	memcpy(pages, &omap_obj->pages[base_pgoff],
380 			sizeof(struct page *) * slots);
381 	memset(pages + slots, 0,
382 			sizeof(struct page *) * (usergart[fmt].height - slots));
383 
384 	ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
385 	if (ret) {
386 		dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
387 		return ret;
388 	}
389 
390 	i = usergart[fmt].height;
391 	pfn = entry->paddr >> PAGE_SHIFT;
392 
393 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
394 			pfn, pfn << PAGE_SHIFT);
395 
396 	while (i--) {
397 		vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
398 		pfn += usergart[fmt].stride_pfn;
399 		vaddr += PAGE_SIZE;
400 	}
401 
402 	/* simple round-robin: */
403 	usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
404 
405 	return 0;
406 }
407 
408 /**
409  * omap_gem_fault		-	pagefault handler for GEM objects
410  * @vma: the VMA of the GEM object
411  * @vmf: fault detail
412  *
413  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
414  * does most of the work for us including the actual map/unmap calls
415  * but we need to do the actual page work.
416  *
417  * The VMA was set up by GEM. In doing so it also ensured that the
418  * vma->vm_private_data points to the GEM object that is backing this
419  * mapping.
420  */
omap_gem_fault(struct vm_area_struct * vma,struct vm_fault * vmf)421 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
422 {
423 	struct drm_gem_object *obj = vma->vm_private_data;
424 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
425 	struct drm_device *dev = obj->dev;
426 	struct page **pages;
427 	int ret;
428 
429 	/* Make sure we don't parallel update on a fault, nor move or remove
430 	 * something from beneath our feet
431 	 */
432 	mutex_lock(&dev->struct_mutex);
433 
434 	/* if a shmem backed object, make sure we have pages attached now */
435 	ret = get_pages(obj, &pages);
436 	if (ret) {
437 		goto fail;
438 	}
439 
440 	/* where should we do corresponding put_pages().. we are mapping
441 	 * the original page, rather than thru a GART, so we can't rely
442 	 * on eviction to trigger this.  But munmap() or all mappings should
443 	 * probably trigger put_pages()?
444 	 */
445 
446 	if (omap_obj->flags & OMAP_BO_TILED)
447 		ret = fault_2d(obj, vma, vmf);
448 	else
449 		ret = fault_1d(obj, vma, vmf);
450 
451 
452 fail:
453 	mutex_unlock(&dev->struct_mutex);
454 	switch (ret) {
455 	case 0:
456 	case -ERESTARTSYS:
457 	case -EINTR:
458 		return VM_FAULT_NOPAGE;
459 	case -ENOMEM:
460 		return VM_FAULT_OOM;
461 	default:
462 		return VM_FAULT_SIGBUS;
463 	}
464 }
465 
466 /** We override mainly to fix up some of the vm mapping flags.. */
omap_gem_mmap(struct file * filp,struct vm_area_struct * vma)467 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
468 {
469 	struct omap_gem_object *omap_obj;
470 	int ret;
471 
472 	ret = drm_gem_mmap(filp, vma);
473 	if (ret) {
474 		DBG("mmap failed: %d", ret);
475 		return ret;
476 	}
477 
478 	/* after drm_gem_mmap(), it is safe to access the obj */
479 	omap_obj = to_omap_bo(vma->vm_private_data);
480 
481 	vma->vm_flags &= ~VM_PFNMAP;
482 	vma->vm_flags |= VM_MIXEDMAP;
483 
484 	if (omap_obj->flags & OMAP_BO_WC) {
485 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
486 	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
487 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
488 	} else {
489 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
490 	}
491 
492 	return ret;
493 }
494 
495 /**
496  * omap_gem_dumb_create	-	create a dumb buffer
497  * @drm_file: our client file
498  * @dev: our device
499  * @args: the requested arguments copied from userspace
500  *
501  * Allocate a buffer suitable for use for a frame buffer of the
502  * form described by user space. Give userspace a handle by which
503  * to reference it.
504  */
omap_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)505 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
506 		struct drm_mode_create_dumb *args)
507 {
508 	union omap_gem_size gsize;
509 
510 	/* in case someone tries to feed us a completely bogus stride: */
511 	args->pitch = align_pitch(args->pitch, args->width, args->bpp);
512 	args->size = PAGE_ALIGN(args->pitch * args->height);
513 
514 	gsize = (union omap_gem_size){
515 		.bytes = args->size,
516 	};
517 
518 	return omap_gem_new_handle(dev, file, gsize,
519 			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
520 }
521 
522 /**
523  * omap_gem_dumb_destroy	-	destroy a dumb buffer
524  * @file: client file
525  * @dev: our DRM device
526  * @handle: the object handle
527  *
528  * Destroy a handle that was created via omap_gem_dumb_create.
529  */
omap_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,uint32_t handle)530 int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
531 		uint32_t handle)
532 {
533 	/* No special work needed, drop the reference and see what falls out */
534 	return drm_gem_handle_delete(file, handle);
535 }
536 
537 /**
538  * omap_gem_dumb_map	-	buffer mapping for dumb interface
539  * @file: our drm client file
540  * @dev: drm device
541  * @handle: GEM handle to the object (from dumb_create)
542  *
543  * Do the necessary setup to allow the mapping of the frame buffer
544  * into user memory. We don't have to do much here at the moment.
545  */
omap_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)546 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
547 		uint32_t handle, uint64_t *offset)
548 {
549 	struct drm_gem_object *obj;
550 	int ret = 0;
551 
552 	/* GEM does all our handle to object mapping */
553 	obj = drm_gem_object_lookup(dev, file, handle);
554 	if (obj == NULL) {
555 		ret = -ENOENT;
556 		goto fail;
557 	}
558 
559 	*offset = omap_gem_mmap_offset(obj);
560 
561 	drm_gem_object_unreference_unlocked(obj);
562 
563 fail:
564 	return ret;
565 }
566 
567 /* Set scrolling position.  This allows us to implement fast scrolling
568  * for console.
569  */
omap_gem_roll(struct drm_gem_object * obj,uint32_t roll)570 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
571 {
572 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
573 	uint32_t npages = obj->size >> PAGE_SHIFT;
574 	int ret = 0;
575 
576 	if (roll > npages) {
577 		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
578 		return -EINVAL;
579 	}
580 
581 	omap_obj->roll = roll;
582 
583 	if (in_atomic() || mutex_is_locked(&obj->dev->struct_mutex)) {
584 		/* this can get called from fbcon in atomic context.. so
585 		 * just ignore it and wait for next time called from
586 		 * interruptible context to update the PAT.. the result
587 		 * may be that user sees wrap-around instead of scrolling
588 		 * momentarily on the screen.  If we wanted to be fancier
589 		 * we could perhaps schedule some workqueue work at this
590 		 * point.
591 		 */
592 		return 0;
593 	}
594 
595 	mutex_lock(&obj->dev->struct_mutex);
596 
597 	/* if we aren't mapped yet, we don't need to do anything */
598 	if (omap_obj->block) {
599 		struct page **pages;
600 		ret = get_pages(obj, &pages);
601 		if (ret)
602 			goto fail;
603 		ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
604 		if (ret)
605 			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
606 	}
607 
608 fail:
609 	mutex_unlock(&obj->dev->struct_mutex);
610 
611 	return ret;
612 }
613 
614 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
615  * already contiguous, remap it to pin in physically contiguous memory.. (ie.
616  * map in TILER)
617  */
omap_gem_get_paddr(struct drm_gem_object * obj,dma_addr_t * paddr,bool remap)618 int omap_gem_get_paddr(struct drm_gem_object *obj,
619 		dma_addr_t *paddr, bool remap)
620 {
621 	struct omap_drm_private *priv = obj->dev->dev_private;
622 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
623 	int ret = 0;
624 
625 	mutex_lock(&obj->dev->struct_mutex);
626 
627 	if (remap && is_shmem(obj) && priv->has_dmm) {
628 		if (omap_obj->paddr_cnt == 0) {
629 			struct page **pages;
630 			uint32_t npages = obj->size >> PAGE_SHIFT;
631 			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
632 			struct tiler_block *block;
633 
634 			BUG_ON(omap_obj->block);
635 
636 			ret = get_pages(obj, &pages);
637 			if (ret)
638 				goto fail;
639 
640 			if (omap_obj->flags & OMAP_BO_TILED) {
641 				block = tiler_reserve_2d(fmt,
642 						omap_obj->width,
643 						omap_obj->height, 0);
644 			} else {
645 				block = tiler_reserve_1d(obj->size);
646 			}
647 
648 			if (IS_ERR(block)) {
649 				ret = PTR_ERR(block);
650 				dev_err(obj->dev->dev,
651 					"could not remap: %d (%d)\n", ret, fmt);
652 				goto fail;
653 			}
654 
655 			/* TODO: enable async refill.. */
656 			ret = tiler_pin(block, pages, npages,
657 					omap_obj->roll, true);
658 			if (ret) {
659 				tiler_release(block);
660 				dev_err(obj->dev->dev,
661 						"could not pin: %d\n", ret);
662 				goto fail;
663 			}
664 
665 			omap_obj->paddr = tiler_ssptr(block);
666 			omap_obj->block = block;
667 
668 			DBG("got paddr: %08x", omap_obj->paddr);
669 		}
670 
671 		omap_obj->paddr_cnt++;
672 
673 		*paddr = omap_obj->paddr;
674 	} else if (omap_obj->flags & OMAP_BO_DMA) {
675 		*paddr = omap_obj->paddr;
676 	} else {
677 		ret = -EINVAL;
678 	}
679 
680 fail:
681 	mutex_unlock(&obj->dev->struct_mutex);
682 
683 	return ret;
684 }
685 
686 /* Release physical address, when DMA is no longer being performed.. this
687  * could potentially unpin and unmap buffers from TILER
688  */
omap_gem_put_paddr(struct drm_gem_object * obj)689 int omap_gem_put_paddr(struct drm_gem_object *obj)
690 {
691 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
692 	int ret = 0;
693 
694 	mutex_lock(&obj->dev->struct_mutex);
695 	if (omap_obj->paddr_cnt > 0) {
696 		omap_obj->paddr_cnt--;
697 		if (omap_obj->paddr_cnt == 0) {
698 			ret = tiler_unpin(omap_obj->block);
699 			if (ret) {
700 				dev_err(obj->dev->dev,
701 					"could not unpin pages: %d\n", ret);
702 				goto fail;
703 			}
704 			ret = tiler_release(omap_obj->block);
705 			if (ret) {
706 				dev_err(obj->dev->dev,
707 					"could not release unmap: %d\n", ret);
708 			}
709 			omap_obj->block = NULL;
710 		}
711 	}
712 fail:
713 	mutex_unlock(&obj->dev->struct_mutex);
714 	return ret;
715 }
716 
717 /* acquire pages when needed (for example, for DMA where physically
718  * contiguous buffer is not required
719  */
get_pages(struct drm_gem_object * obj,struct page *** pages)720 static int get_pages(struct drm_gem_object *obj, struct page ***pages)
721 {
722 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
723 	int ret = 0;
724 
725 	if (is_shmem(obj) && !omap_obj->pages) {
726 		ret = omap_gem_attach_pages(obj);
727 		if (ret) {
728 			dev_err(obj->dev->dev, "could not attach pages\n");
729 			return ret;
730 		}
731 	}
732 
733 	/* TODO: even phys-contig.. we should have a list of pages? */
734 	*pages = omap_obj->pages;
735 
736 	return 0;
737 }
738 
omap_gem_get_pages(struct drm_gem_object * obj,struct page *** pages)739 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
740 {
741 	int ret;
742 	mutex_lock(&obj->dev->struct_mutex);
743 	ret = get_pages(obj, pages);
744 	mutex_unlock(&obj->dev->struct_mutex);
745 	return ret;
746 }
747 
748 /* release pages when DMA no longer being performed */
omap_gem_put_pages(struct drm_gem_object * obj)749 int omap_gem_put_pages(struct drm_gem_object *obj)
750 {
751 	/* do something here if we dynamically attach/detach pages.. at
752 	 * least they would no longer need to be pinned if everyone has
753 	 * released the pages..
754 	 */
755 	return 0;
756 }
757 
758 /* Get kernel virtual address for CPU access.. this more or less only
759  * exists for omap_fbdev.  This should be called with struct_mutex
760  * held.
761  */
omap_gem_vaddr(struct drm_gem_object * obj)762 void *omap_gem_vaddr(struct drm_gem_object *obj)
763 {
764 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
765 	WARN_ON(! mutex_is_locked(&obj->dev->struct_mutex));
766 	if (!omap_obj->vaddr) {
767 		struct page **pages;
768 		int ret = get_pages(obj, &pages);
769 		if (ret)
770 			return ERR_PTR(ret);
771 		omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
772 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
773 	}
774 	return omap_obj->vaddr;
775 }
776 
777 /* Buffer Synchronization:
778  */
779 
780 struct omap_gem_sync_waiter {
781 	struct list_head list;
782 	struct omap_gem_object *omap_obj;
783 	enum omap_gem_op op;
784 	uint32_t read_target, write_target;
785 	/* notify called w/ sync_lock held */
786 	void (*notify)(void *arg);
787 	void *arg;
788 };
789 
790 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
791  * the read and/or write target count is achieved which can call a user
792  * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
793  * cpu access), etc.
794  */
795 static LIST_HEAD(waiters);
796 
is_waiting(struct omap_gem_sync_waiter * waiter)797 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
798 {
799 	struct omap_gem_object *omap_obj = waiter->omap_obj;
800 	if ((waiter->op & OMAP_GEM_READ) &&
801 			(omap_obj->sync->read_complete < waiter->read_target))
802 		return true;
803 	if ((waiter->op & OMAP_GEM_WRITE) &&
804 			(omap_obj->sync->write_complete < waiter->write_target))
805 		return true;
806 	return false;
807 }
808 
809 /* macro for sync debug.. */
810 #define SYNCDBG 0
811 #define SYNC(fmt, ...) do { if (SYNCDBG) \
812 		printk(KERN_ERR "%s:%d: "fmt"\n", \
813 				__func__, __LINE__, ##__VA_ARGS__); \
814 	} while (0)
815 
816 
sync_op_update(void)817 static void sync_op_update(void)
818 {
819 	struct omap_gem_sync_waiter *waiter, *n;
820 	list_for_each_entry_safe(waiter, n, &waiters, list) {
821 		if (!is_waiting(waiter)) {
822 			list_del(&waiter->list);
823 			SYNC("notify: %p", waiter);
824 			waiter->notify(waiter->arg);
825 			kfree(waiter);
826 		}
827 	}
828 }
829 
sync_op(struct drm_gem_object * obj,enum omap_gem_op op,bool start)830 static inline int sync_op(struct drm_gem_object *obj,
831 		enum omap_gem_op op, bool start)
832 {
833 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
834 	int ret = 0;
835 
836 	spin_lock(&sync_lock);
837 
838 	if (!omap_obj->sync) {
839 		omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
840 		if (!omap_obj->sync) {
841 			ret = -ENOMEM;
842 			goto unlock;
843 		}
844 	}
845 
846 	if (start) {
847 		if (op & OMAP_GEM_READ)
848 			omap_obj->sync->read_pending++;
849 		if (op & OMAP_GEM_WRITE)
850 			omap_obj->sync->write_pending++;
851 	} else {
852 		if (op & OMAP_GEM_READ)
853 			omap_obj->sync->read_complete++;
854 		if (op & OMAP_GEM_WRITE)
855 			omap_obj->sync->write_complete++;
856 		sync_op_update();
857 	}
858 
859 unlock:
860 	spin_unlock(&sync_lock);
861 
862 	return ret;
863 }
864 
865 /* it is a bit lame to handle updates in this sort of polling way, but
866  * in case of PVR, the GPU can directly update read/write complete
867  * values, and not really tell us which ones it updated.. this also
868  * means that sync_lock is not quite sufficient.  So we'll need to
869  * do something a bit better when it comes time to add support for
870  * separate 2d hw..
871  */
omap_gem_op_update(void)872 void omap_gem_op_update(void)
873 {
874 	spin_lock(&sync_lock);
875 	sync_op_update();
876 	spin_unlock(&sync_lock);
877 }
878 
879 /* mark the start of read and/or write operation */
omap_gem_op_start(struct drm_gem_object * obj,enum omap_gem_op op)880 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
881 {
882 	return sync_op(obj, op, true);
883 }
884 
omap_gem_op_finish(struct drm_gem_object * obj,enum omap_gem_op op)885 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
886 {
887 	return sync_op(obj, op, false);
888 }
889 
890 static DECLARE_WAIT_QUEUE_HEAD(sync_event);
891 
sync_notify(void * arg)892 static void sync_notify(void *arg)
893 {
894 	struct task_struct **waiter_task = arg;
895 	*waiter_task = NULL;
896 	wake_up_all(&sync_event);
897 }
898 
omap_gem_op_sync(struct drm_gem_object * obj,enum omap_gem_op op)899 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
900 {
901 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
902 	int ret = 0;
903 	if (omap_obj->sync) {
904 		struct task_struct *waiter_task = current;
905 		struct omap_gem_sync_waiter *waiter =
906 				kzalloc(sizeof(*waiter), GFP_KERNEL);
907 
908 		if (!waiter) {
909 			return -ENOMEM;
910 		}
911 
912 		waiter->omap_obj = omap_obj;
913 		waiter->op = op;
914 		waiter->read_target = omap_obj->sync->read_pending;
915 		waiter->write_target = omap_obj->sync->write_pending;
916 		waiter->notify = sync_notify;
917 		waiter->arg = &waiter_task;
918 
919 		spin_lock(&sync_lock);
920 		if (is_waiting(waiter)) {
921 			SYNC("waited: %p", waiter);
922 			list_add_tail(&waiter->list, &waiters);
923 			spin_unlock(&sync_lock);
924 			ret = wait_event_interruptible(sync_event,
925 					(waiter_task == NULL));
926 			spin_lock(&sync_lock);
927 			if (waiter_task) {
928 				SYNC("interrupted: %p", waiter);
929 				/* we were interrupted */
930 				list_del(&waiter->list);
931 				waiter_task = NULL;
932 			} else {
933 				/* freed in sync_op_update() */
934 				waiter = NULL;
935 			}
936 		}
937 		spin_unlock(&sync_lock);
938 
939 		if (waiter) {
940 			kfree(waiter);
941 		}
942 	}
943 	return ret;
944 }
945 
946 /* call fxn(arg), either synchronously or asynchronously if the op
947  * is currently blocked..  fxn() can be called from any context
948  *
949  * (TODO for now fxn is called back from whichever context calls
950  * omap_gem_op_update().. but this could be better defined later
951  * if needed)
952  *
953  * TODO more code in common w/ _sync()..
954  */
omap_gem_op_async(struct drm_gem_object * obj,enum omap_gem_op op,void (* fxn)(void * arg),void * arg)955 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
956 		void (*fxn)(void *arg), void *arg)
957 {
958 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
959 	if (omap_obj->sync) {
960 		struct omap_gem_sync_waiter *waiter =
961 				kzalloc(sizeof(*waiter), GFP_ATOMIC);
962 
963 		if (!waiter) {
964 			return -ENOMEM;
965 		}
966 
967 		waiter->omap_obj = omap_obj;
968 		waiter->op = op;
969 		waiter->read_target = omap_obj->sync->read_pending;
970 		waiter->write_target = omap_obj->sync->write_pending;
971 		waiter->notify = fxn;
972 		waiter->arg = arg;
973 
974 		spin_lock(&sync_lock);
975 		if (is_waiting(waiter)) {
976 			SYNC("waited: %p", waiter);
977 			list_add_tail(&waiter->list, &waiters);
978 			spin_unlock(&sync_lock);
979 			return 0;
980 		}
981 
982 		spin_unlock(&sync_lock);
983 	}
984 
985 	/* no waiting.. */
986 	fxn(arg);
987 
988 	return 0;
989 }
990 
991 /* special API so PVR can update the buffer to use a sync-object allocated
992  * from it's sync-obj heap.  Only used for a newly allocated (from PVR's
993  * perspective) sync-object, so we overwrite the new syncobj w/ values
994  * from the already allocated syncobj (if there is one)
995  */
omap_gem_set_sync_object(struct drm_gem_object * obj,void * syncobj)996 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
997 {
998 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
999 	int ret = 0;
1000 
1001 	spin_lock(&sync_lock);
1002 
1003 	if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1004 		/* clearing a previously set syncobj */
1005 		syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1006 		if (!syncobj) {
1007 			ret = -ENOMEM;
1008 			goto unlock;
1009 		}
1010 		memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1011 		omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1012 		omap_obj->sync = syncobj;
1013 	} else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1014 		/* replacing an existing syncobj */
1015 		if (omap_obj->sync) {
1016 			memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1017 			kfree(omap_obj->sync);
1018 		}
1019 		omap_obj->flags |= OMAP_BO_EXT_SYNC;
1020 		omap_obj->sync = syncobj;
1021 	}
1022 
1023 unlock:
1024 	spin_unlock(&sync_lock);
1025 	return ret;
1026 }
1027 
omap_gem_init_object(struct drm_gem_object * obj)1028 int omap_gem_init_object(struct drm_gem_object *obj)
1029 {
1030 	return -EINVAL;          /* unused */
1031 }
1032 
1033 /* don't call directly.. called from GEM core when it is time to actually
1034  * free the object..
1035  */
omap_gem_free_object(struct drm_gem_object * obj)1036 void omap_gem_free_object(struct drm_gem_object *obj)
1037 {
1038 	struct drm_device *dev = obj->dev;
1039 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1040 
1041 	evict(obj);
1042 
1043 	if (obj->map_list.map) {
1044 		drm_gem_free_mmap_offset(obj);
1045 	}
1046 
1047 	/* this means the object is still pinned.. which really should
1048 	 * not happen.  I think..
1049 	 */
1050 	WARN_ON(omap_obj->paddr_cnt > 0);
1051 
1052 	/* don't free externally allocated backing memory */
1053 	if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1054 		if (omap_obj->pages) {
1055 			omap_gem_detach_pages(obj);
1056 		}
1057 		if (!is_shmem(obj)) {
1058 			dma_free_writecombine(dev->dev, obj->size,
1059 					omap_obj->vaddr, omap_obj->paddr);
1060 		} else if (omap_obj->vaddr) {
1061 			vunmap(omap_obj->vaddr);
1062 		}
1063 	}
1064 
1065 	/* don't free externally allocated syncobj */
1066 	if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1067 		kfree(omap_obj->sync);
1068 	}
1069 
1070 	drm_gem_object_release(obj);
1071 
1072 	kfree(obj);
1073 }
1074 
1075 /* convenience method to construct a GEM buffer object, and userspace handle */
omap_gem_new_handle(struct drm_device * dev,struct drm_file * file,union omap_gem_size gsize,uint32_t flags,uint32_t * handle)1076 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1077 		union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1078 {
1079 	struct drm_gem_object *obj;
1080 	int ret;
1081 
1082 	obj = omap_gem_new(dev, gsize, flags);
1083 	if (!obj)
1084 		return -ENOMEM;
1085 
1086 	ret = drm_gem_handle_create(file, obj, handle);
1087 	if (ret) {
1088 		drm_gem_object_release(obj);
1089 		kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1090 		return ret;
1091 	}
1092 
1093 	/* drop reference from allocate - handle holds it now */
1094 	drm_gem_object_unreference_unlocked(obj);
1095 
1096 	return 0;
1097 }
1098 
1099 /* GEM buffer object constructor */
omap_gem_new(struct drm_device * dev,union omap_gem_size gsize,uint32_t flags)1100 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1101 		union omap_gem_size gsize, uint32_t flags)
1102 {
1103 	struct omap_drm_private *priv = dev->dev_private;
1104 	struct omap_gem_object *omap_obj;
1105 	struct drm_gem_object *obj = NULL;
1106 	size_t size;
1107 	int ret;
1108 
1109 	if (flags & OMAP_BO_TILED) {
1110 		if (!usergart) {
1111 			dev_err(dev->dev, "Tiled buffers require DMM\n");
1112 			goto fail;
1113 		}
1114 
1115 		/* tiled buffers are always shmem paged backed.. when they are
1116 		 * scanned out, they are remapped into DMM/TILER
1117 		 */
1118 		flags &= ~OMAP_BO_SCANOUT;
1119 
1120 		/* currently don't allow cached buffers.. there is some caching
1121 		 * stuff that needs to be handled better
1122 		 */
1123 		flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
1124 		flags |= OMAP_BO_WC;
1125 
1126 		/* align dimensions to slot boundaries... */
1127 		tiler_align(gem2fmt(flags),
1128 				&gsize.tiled.width, &gsize.tiled.height);
1129 
1130 		/* ...and calculate size based on aligned dimensions */
1131 		size = tiler_size(gem2fmt(flags),
1132 				gsize.tiled.width, gsize.tiled.height);
1133 	} else {
1134 		size = PAGE_ALIGN(gsize.bytes);
1135 	}
1136 
1137 	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1138 	if (!omap_obj) {
1139 		dev_err(dev->dev, "could not allocate GEM object\n");
1140 		goto fail;
1141 	}
1142 
1143 	obj = &omap_obj->base;
1144 
1145 	if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1146 		/* attempt to allocate contiguous memory if we don't
1147 		 * have DMM for remappign discontiguous buffers
1148 		 */
1149 		omap_obj->vaddr =  dma_alloc_writecombine(dev->dev, size,
1150 				&omap_obj->paddr, GFP_KERNEL);
1151 		if (omap_obj->vaddr) {
1152 			flags |= OMAP_BO_DMA;
1153 		}
1154 	}
1155 
1156 	omap_obj->flags = flags;
1157 
1158 	if (flags & OMAP_BO_TILED) {
1159 		omap_obj->width = gsize.tiled.width;
1160 		omap_obj->height = gsize.tiled.height;
1161 	}
1162 
1163 	if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
1164 		ret = drm_gem_private_object_init(dev, obj, size);
1165 	} else {
1166 		ret = drm_gem_object_init(dev, obj, size);
1167 	}
1168 
1169 	if (ret) {
1170 		goto fail;
1171 	}
1172 
1173 	return obj;
1174 
1175 fail:
1176 	if (obj) {
1177 		omap_gem_free_object(obj);
1178 	}
1179 	return NULL;
1180 }
1181 
1182 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
omap_gem_init(struct drm_device * dev)1183 void omap_gem_init(struct drm_device *dev)
1184 {
1185 	struct omap_drm_private *priv = dev->dev_private;
1186 	const enum tiler_fmt fmts[] = {
1187 			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1188 	};
1189 	int i, j, ret;
1190 
1191 	ret = omap_dmm_init(dev);
1192 	if (ret) {
1193 		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1194 		dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n");
1195 		return;
1196 	}
1197 
1198 	usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
1199 	if (!usergart) {
1200 		dev_warn(dev->dev, "could not allocate usergart\n");
1201 		return;
1202 	}
1203 
1204 	/* reserve 4k aligned/wide regions for userspace mappings: */
1205 	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1206 		uint16_t h = 1, w = PAGE_SIZE >> i;
1207 		tiler_align(fmts[i], &w, &h);
1208 		/* note: since each region is 1 4kb page wide, and minimum
1209 		 * number of rows, the height ends up being the same as the
1210 		 * # of pages in the region
1211 		 */
1212 		usergart[i].height = h;
1213 		usergart[i].height_shift = ilog2(h);
1214 		usergart[i].stride_pfn = tiler_stride(fmts[i]) >> PAGE_SHIFT;
1215 		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1216 		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1217 			struct usergart_entry *entry = &usergart[i].entry[j];
1218 			struct tiler_block *block =
1219 					tiler_reserve_2d(fmts[i], w, h,
1220 							PAGE_SIZE);
1221 			if (IS_ERR(block)) {
1222 				dev_err(dev->dev,
1223 						"reserve failed: %d, %d, %ld\n",
1224 						i, j, PTR_ERR(block));
1225 				return;
1226 			}
1227 			entry->paddr = tiler_ssptr(block);
1228 			entry->block = block;
1229 
1230 			DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
1231 					entry->paddr,
1232 					usergart[i].stride_pfn << PAGE_SHIFT);
1233 		}
1234 	}
1235 
1236 	priv->has_dmm = true;
1237 }
1238 
omap_gem_deinit(struct drm_device * dev)1239 void omap_gem_deinit(struct drm_device *dev)
1240 {
1241 	/* I believe we can rely on there being no more outstanding GEM
1242 	 * objects which could depend on usergart/dmm at this point.
1243 	 */
1244 	omap_dmm_remove();
1245 	kfree(usergart);
1246 }
1247