1 /*
2  *  pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
3  *
4  *  This driver needs a DirectFB counterpart in user space, communication
5  *  is handled via mmap()ed memory areas and an ioctl.
6  *
7  *  Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
8  *  Copyright (c) 2009 Janine Kropp <nin@directfb.org>
9  *  Copyright (c) 2009 Denis Oliver Kropp <dok@directfb.org>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2 of the License, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; if not, write to the Free Software
23  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 /*
27  * WARNING: This controller is attached to System Bus 2 of the PXA which
28  * needs its arbiter to be enabled explicitly (CKENB & 1<<9).
29  * There is currently no way to do this from Linux, so you need to teach
30  * your bootloader for now.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/miscdevice.h>
37 #include <linux/interrupt.h>
38 #include <linux/spinlock.h>
39 #include <linux/uaccess.h>
40 #include <linux/ioctl.h>
41 #include <linux/delay.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/clk.h>
45 #include <linux/fs.h>
46 #include <linux/io.h>
47 
48 #include "pxa3xx-gcu.h"
49 
50 #define DRV_NAME	"pxa3xx-gcu"
51 #define MISCDEV_MINOR	197
52 
53 #define REG_GCCR	0x00
54 #define GCCR_SYNC_CLR	(1 << 9)
55 #define GCCR_BP_RST	(1 << 8)
56 #define GCCR_ABORT	(1 << 6)
57 #define GCCR_STOP	(1 << 4)
58 
59 #define REG_GCISCR	0x04
60 #define REG_GCIECR	0x08
61 #define REG_GCRBBR	0x20
62 #define REG_GCRBLR	0x24
63 #define REG_GCRBHR	0x28
64 #define REG_GCRBTR	0x2C
65 #define REG_GCRBEXHR	0x30
66 
67 #define IE_EOB		(1 << 0)
68 #define IE_EEOB		(1 << 5)
69 #define IE_ALL		0xff
70 
71 #define SHARED_SIZE	PAGE_ALIGN(sizeof(struct pxa3xx_gcu_shared))
72 
73 /* #define PXA3XX_GCU_DEBUG */
74 /* #define PXA3XX_GCU_DEBUG_TIMER */
75 
76 #ifdef PXA3XX_GCU_DEBUG
77 #define QDUMP(msg)					\
78 	do {						\
79 		QPRINT(priv, KERN_DEBUG, msg);		\
80 	} while (0)
81 #else
82 #define QDUMP(msg)	do {} while (0)
83 #endif
84 
85 #define QERROR(msg)					\
86 	do {						\
87 		QPRINT(priv, KERN_ERR, msg);		\
88 	} while (0)
89 
90 struct pxa3xx_gcu_batch {
91 	struct pxa3xx_gcu_batch *next;
92 	u32			*ptr;
93 	dma_addr_t		 phys;
94 	unsigned long		 length;
95 };
96 
97 struct pxa3xx_gcu_priv {
98 	void __iomem		 *mmio_base;
99 	struct clk		 *clk;
100 	struct pxa3xx_gcu_shared *shared;
101 	dma_addr_t		  shared_phys;
102 	struct resource		 *resource_mem;
103 	struct miscdevice	  misc_dev;
104 	struct file_operations	  misc_fops;
105 	wait_queue_head_t	  wait_idle;
106 	wait_queue_head_t	  wait_free;
107 	spinlock_t		  spinlock;
108 	struct timeval 		  base_time;
109 
110 	struct pxa3xx_gcu_batch *free;
111 
112 	struct pxa3xx_gcu_batch *ready;
113 	struct pxa3xx_gcu_batch *ready_last;
114 	struct pxa3xx_gcu_batch *running;
115 };
116 
117 static inline unsigned long
gc_readl(struct pxa3xx_gcu_priv * priv,unsigned int off)118 gc_readl(struct pxa3xx_gcu_priv *priv, unsigned int off)
119 {
120 	return __raw_readl(priv->mmio_base + off);
121 }
122 
123 static inline void
gc_writel(struct pxa3xx_gcu_priv * priv,unsigned int off,unsigned long val)124 gc_writel(struct pxa3xx_gcu_priv *priv, unsigned int off, unsigned long val)
125 {
126 	__raw_writel(val, priv->mmio_base + off);
127 }
128 
129 #define QPRINT(priv, level, msg)					\
130 	do {								\
131 		struct timeval tv;					\
132 		struct pxa3xx_gcu_shared *shared = priv->shared;	\
133 		u32 base = gc_readl(priv, REG_GCRBBR);			\
134 									\
135 		do_gettimeofday(&tv);					\
136 									\
137 		printk(level "%ld.%03ld.%03ld - %-17s: %-21s (%s, "	\
138 			"STATUS "					\
139 			"0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, "	\
140 			"T %5ld)\n",					\
141 			tv.tv_sec - priv->base_time.tv_sec,		\
142 			tv.tv_usec / 1000, tv.tv_usec % 1000,		\
143 			__func__, msg,					\
144 			shared->hw_running ? "running" : "   idle",	\
145 			gc_readl(priv, REG_GCISCR),			\
146 			gc_readl(priv, REG_GCRBBR),			\
147 			gc_readl(priv, REG_GCRBLR),			\
148 			(gc_readl(priv, REG_GCRBEXHR) - base) / 4,	\
149 			(gc_readl(priv, REG_GCRBHR) - base) / 4,	\
150 			(gc_readl(priv, REG_GCRBTR) - base) / 4);	\
151 	} while (0)
152 
153 static void
pxa3xx_gcu_reset(struct pxa3xx_gcu_priv * priv)154 pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv)
155 {
156 	QDUMP("RESET");
157 
158 	/* disable interrupts */
159 	gc_writel(priv, REG_GCIECR, 0);
160 
161 	/* reset hardware */
162 	gc_writel(priv, REG_GCCR, GCCR_ABORT);
163 	gc_writel(priv, REG_GCCR, 0);
164 
165 	memset(priv->shared, 0, SHARED_SIZE);
166 	priv->shared->buffer_phys = priv->shared_phys;
167 	priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC;
168 
169 	do_gettimeofday(&priv->base_time);
170 
171 	/* set up the ring buffer pointers */
172 	gc_writel(priv, REG_GCRBLR, 0);
173 	gc_writel(priv, REG_GCRBBR, priv->shared_phys);
174 	gc_writel(priv, REG_GCRBTR, priv->shared_phys);
175 
176 	/* enable all IRQs except EOB */
177 	gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB);
178 }
179 
180 static void
dump_whole_state(struct pxa3xx_gcu_priv * priv)181 dump_whole_state(struct pxa3xx_gcu_priv *priv)
182 {
183 	struct pxa3xx_gcu_shared *sh = priv->shared;
184 	u32 base = gc_readl(priv, REG_GCRBBR);
185 
186 	QDUMP("DUMP");
187 
188 	printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n"
189 		"%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n",
190 		sh->hw_running ? "running" : "idle   ",
191 		gc_readl(priv, REG_GCISCR),
192 		gc_readl(priv, REG_GCRBBR),
193 		gc_readl(priv, REG_GCRBLR),
194 		(gc_readl(priv, REG_GCRBEXHR) - base) / 4,
195 		(gc_readl(priv, REG_GCRBHR) - base) / 4,
196 		(gc_readl(priv, REG_GCRBTR) - base) / 4);
197 }
198 
199 static void
flush_running(struct pxa3xx_gcu_priv * priv)200 flush_running(struct pxa3xx_gcu_priv *priv)
201 {
202 	struct pxa3xx_gcu_batch *running = priv->running;
203 	struct pxa3xx_gcu_batch *next;
204 
205 	while (running) {
206 		next = running->next;
207 		running->next = priv->free;
208 		priv->free = running;
209 		running = next;
210 	}
211 
212 	priv->running = NULL;
213 }
214 
215 static void
run_ready(struct pxa3xx_gcu_priv * priv)216 run_ready(struct pxa3xx_gcu_priv *priv)
217 {
218 	unsigned int num = 0;
219 	struct pxa3xx_gcu_shared *shared = priv->shared;
220 	struct pxa3xx_gcu_batch	*ready = priv->ready;
221 
222 	QDUMP("Start");
223 
224 	BUG_ON(!ready);
225 
226 	shared->buffer[num++] = 0x05000000;
227 
228 	while (ready) {
229 		shared->buffer[num++] = 0x00000001;
230 		shared->buffer[num++] = ready->phys;
231 		ready = ready->next;
232 	}
233 
234 	shared->buffer[num++] = 0x05000000;
235 	priv->running = priv->ready;
236 	priv->ready = priv->ready_last = NULL;
237 	gc_writel(priv, REG_GCRBLR, 0);
238 	shared->hw_running = 1;
239 
240 	/* ring base address */
241 	gc_writel(priv, REG_GCRBBR, shared->buffer_phys);
242 
243 	/* ring tail address */
244 	gc_writel(priv, REG_GCRBTR, shared->buffer_phys + num * 4);
245 
246 	/* ring length */
247 	gc_writel(priv, REG_GCRBLR, ((num + 63) & ~63) * 4);
248 }
249 
250 static irqreturn_t
pxa3xx_gcu_handle_irq(int irq,void * ctx)251 pxa3xx_gcu_handle_irq(int irq, void *ctx)
252 {
253 	struct pxa3xx_gcu_priv *priv = ctx;
254 	struct pxa3xx_gcu_shared *shared = priv->shared;
255 	u32 status = gc_readl(priv, REG_GCISCR) & IE_ALL;
256 
257 	QDUMP("-Interrupt");
258 
259 	if (!status)
260 		return IRQ_NONE;
261 
262 	spin_lock(&priv->spinlock);
263 	shared->num_interrupts++;
264 
265 	if (status & IE_EEOB) {
266 		QDUMP(" [EEOB]");
267 
268 		flush_running(priv);
269 		wake_up_all(&priv->wait_free);
270 
271 		if (priv->ready) {
272 			run_ready(priv);
273 		} else {
274 			/* There is no more data prepared by the userspace.
275 			 * Set hw_running = 0 and wait for the next userspace
276 			 * kick-off */
277 			shared->num_idle++;
278 			shared->hw_running = 0;
279 
280 			QDUMP(" '-> Idle.");
281 
282 			/* set ring buffer length to zero */
283 			gc_writel(priv, REG_GCRBLR, 0);
284 
285 			wake_up_all(&priv->wait_idle);
286 		}
287 
288 		shared->num_done++;
289 	} else {
290 		QERROR(" [???]");
291 		dump_whole_state(priv);
292 	}
293 
294 	/* Clear the interrupt */
295 	gc_writel(priv, REG_GCISCR, status);
296 	spin_unlock(&priv->spinlock);
297 
298 	return IRQ_HANDLED;
299 }
300 
301 static int
pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv * priv)302 pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
303 {
304 	int ret = 0;
305 
306 	QDUMP("Waiting for idle...");
307 
308 	/* Does not need to be atomic. There's a lock in user space,
309 	 * but anyhow, this is just for statistics. */
310 	priv->shared->num_wait_idle++;
311 
312 	while (priv->shared->hw_running) {
313 		int num = priv->shared->num_interrupts;
314 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
315 
316 		ret = wait_event_interruptible_timeout(priv->wait_idle,
317 					!priv->shared->hw_running, HZ*4);
318 
319 		if (ret < 0)
320 			break;
321 
322 		if (ret > 0)
323 			continue;
324 
325 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
326 		    priv->shared->num_interrupts == num) {
327 			QERROR("TIMEOUT");
328 			ret = -ETIMEDOUT;
329 			break;
330 		}
331 	}
332 
333 	QDUMP("done");
334 
335 	return ret;
336 }
337 
338 static int
pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv * priv)339 pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
340 {
341 	int ret = 0;
342 
343 	QDUMP("Waiting for free...");
344 
345 	/* Does not need to be atomic. There's a lock in user space,
346 	 * but anyhow, this is just for statistics. */
347 	priv->shared->num_wait_free++;
348 
349 	while (!priv->free) {
350 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
351 
352 		ret = wait_event_interruptible_timeout(priv->wait_free,
353 						       priv->free, HZ*4);
354 
355 		if (ret < 0)
356 			break;
357 
358 		if (ret > 0)
359 			continue;
360 
361 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr) {
362 			QERROR("TIMEOUT");
363 			ret = -ETIMEDOUT;
364 			break;
365 		}
366 	}
367 
368 	QDUMP("done");
369 
370 	return ret;
371 }
372 
373 /* Misc device layer */
374 
375 static ssize_t
pxa3xx_gcu_misc_write(struct file * filp,const char * buff,size_t count,loff_t * offp)376 pxa3xx_gcu_misc_write(struct file *filp, const char *buff,
377 		      size_t count, loff_t *offp)
378 {
379 	int ret;
380 	unsigned long flags;
381 	struct pxa3xx_gcu_batch	*buffer;
382 	struct pxa3xx_gcu_priv *priv =
383 		container_of(filp->f_op, struct pxa3xx_gcu_priv, misc_fops);
384 
385 	int words = count / 4;
386 
387 	/* Does not need to be atomic. There's a lock in user space,
388 	 * but anyhow, this is just for statistics. */
389 	priv->shared->num_writes++;
390 
391 	priv->shared->num_words += words;
392 
393 	/* Last word reserved for batch buffer end command */
394 	if (words >= PXA3XX_GCU_BATCH_WORDS)
395 		return -E2BIG;
396 
397 	/* Wait for a free buffer */
398 	if (!priv->free) {
399 		ret = pxa3xx_gcu_wait_free(priv);
400 		if (ret < 0)
401 			return ret;
402 	}
403 
404 	/*
405 	 * Get buffer from free list
406 	 */
407 	spin_lock_irqsave(&priv->spinlock, flags);
408 
409 	buffer = priv->free;
410 	priv->free = buffer->next;
411 
412 	spin_unlock_irqrestore(&priv->spinlock, flags);
413 
414 
415 	/* Copy data from user into buffer */
416 	ret = copy_from_user(buffer->ptr, buff, words * 4);
417 	if (ret) {
418 		spin_lock_irqsave(&priv->spinlock, flags);
419 		buffer->next = priv->free;
420 		priv->free = buffer;
421 		spin_unlock_irqrestore(&priv->spinlock, flags);
422 		return -EFAULT;
423 	}
424 
425 	buffer->length = words;
426 
427 	/* Append batch buffer end command */
428 	buffer->ptr[words] = 0x01000000;
429 
430 	/*
431 	 * Add buffer to ready list
432 	 */
433 	spin_lock_irqsave(&priv->spinlock, flags);
434 
435 	buffer->next = NULL;
436 
437 	if (priv->ready) {
438 		BUG_ON(priv->ready_last == NULL);
439 
440 		priv->ready_last->next = buffer;
441 	} else
442 		priv->ready = buffer;
443 
444 	priv->ready_last = buffer;
445 
446 	if (!priv->shared->hw_running)
447 		run_ready(priv);
448 
449 	spin_unlock_irqrestore(&priv->spinlock, flags);
450 
451 	return words * 4;
452 }
453 
454 
455 static long
pxa3xx_gcu_misc_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)456 pxa3xx_gcu_misc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
457 {
458 	unsigned long flags;
459 	struct pxa3xx_gcu_priv *priv =
460 		container_of(filp->f_op, struct pxa3xx_gcu_priv, misc_fops);
461 
462 	switch (cmd) {
463 	case PXA3XX_GCU_IOCTL_RESET:
464 		spin_lock_irqsave(&priv->spinlock, flags);
465 		pxa3xx_gcu_reset(priv);
466 		spin_unlock_irqrestore(&priv->spinlock, flags);
467 		return 0;
468 
469 	case PXA3XX_GCU_IOCTL_WAIT_IDLE:
470 		return pxa3xx_gcu_wait_idle(priv);
471 	}
472 
473 	return -ENOSYS;
474 }
475 
476 static int
pxa3xx_gcu_misc_mmap(struct file * filp,struct vm_area_struct * vma)477 pxa3xx_gcu_misc_mmap(struct file *filp, struct vm_area_struct *vma)
478 {
479 	unsigned int size = vma->vm_end - vma->vm_start;
480 	struct pxa3xx_gcu_priv *priv =
481 		container_of(filp->f_op, struct pxa3xx_gcu_priv, misc_fops);
482 
483 	switch (vma->vm_pgoff) {
484 	case 0:
485 		/* hand out the shared data area */
486 		if (size != SHARED_SIZE)
487 			return -EINVAL;
488 
489 		return dma_mmap_coherent(NULL, vma,
490 			priv->shared, priv->shared_phys, size);
491 
492 	case SHARED_SIZE >> PAGE_SHIFT:
493 		/* hand out the MMIO base for direct register access
494 		 * from userspace */
495 		if (size != resource_size(priv->resource_mem))
496 			return -EINVAL;
497 
498 		vma->vm_flags |= VM_IO;
499 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
500 
501 		return io_remap_pfn_range(vma, vma->vm_start,
502 				priv->resource_mem->start >> PAGE_SHIFT,
503 				size, vma->vm_page_prot);
504 	}
505 
506 	return -EINVAL;
507 }
508 
509 
510 #ifdef PXA3XX_GCU_DEBUG_TIMER
511 static struct timer_list pxa3xx_gcu_debug_timer;
512 
pxa3xx_gcu_debug_timedout(unsigned long ptr)513 static void pxa3xx_gcu_debug_timedout(unsigned long ptr)
514 {
515 	struct pxa3xx_gcu_priv *priv = (struct pxa3xx_gcu_priv *) ptr;
516 
517 	QERROR("Timer DUMP");
518 
519 	/* init the timer structure */
520 	init_timer(&pxa3xx_gcu_debug_timer);
521 	pxa3xx_gcu_debug_timer.function = pxa3xx_gcu_debug_timedout;
522 	pxa3xx_gcu_debug_timer.data = ptr;
523 	pxa3xx_gcu_debug_timer.expires = jiffies + 5*HZ; /* one second */
524 
525 	add_timer(&pxa3xx_gcu_debug_timer);
526 }
527 
pxa3xx_gcu_init_debug_timer(void)528 static void pxa3xx_gcu_init_debug_timer(void)
529 {
530 	pxa3xx_gcu_debug_timedout((unsigned long) &pxa3xx_gcu_debug_timer);
531 }
532 #else
pxa3xx_gcu_init_debug_timer(void)533 static inline void pxa3xx_gcu_init_debug_timer(void) {}
534 #endif
535 
536 static int
add_buffer(struct platform_device * dev,struct pxa3xx_gcu_priv * priv)537 add_buffer(struct platform_device *dev,
538 	   struct pxa3xx_gcu_priv *priv)
539 {
540 	struct pxa3xx_gcu_batch *buffer;
541 
542 	buffer = kzalloc(sizeof(struct pxa3xx_gcu_batch), GFP_KERNEL);
543 	if (!buffer)
544 		return -ENOMEM;
545 
546 	buffer->ptr = dma_alloc_coherent(&dev->dev, PXA3XX_GCU_BATCH_WORDS * 4,
547 					 &buffer->phys, GFP_KERNEL);
548 	if (!buffer->ptr) {
549 		kfree(buffer);
550 		return -ENOMEM;
551 	}
552 
553 	buffer->next = priv->free;
554 
555 	priv->free = buffer;
556 
557 	return 0;
558 }
559 
560 static void
free_buffers(struct platform_device * dev,struct pxa3xx_gcu_priv * priv)561 free_buffers(struct platform_device *dev,
562 	     struct pxa3xx_gcu_priv *priv)
563 {
564 	struct pxa3xx_gcu_batch *next, *buffer = priv->free;
565 
566 	while (buffer) {
567 		next = buffer->next;
568 
569 		dma_free_coherent(&dev->dev, PXA3XX_GCU_BATCH_WORDS * 4,
570 				  buffer->ptr, buffer->phys);
571 
572 		kfree(buffer);
573 
574 		buffer = next;
575 	}
576 
577 	priv->free = NULL;
578 }
579 
580 static int __devinit
pxa3xx_gcu_probe(struct platform_device * dev)581 pxa3xx_gcu_probe(struct platform_device *dev)
582 {
583 	int i, ret, irq;
584 	struct resource *r;
585 	struct pxa3xx_gcu_priv *priv;
586 
587 	priv = kzalloc(sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
588 	if (!priv)
589 		return -ENOMEM;
590 
591 	for (i = 0; i < 8; i++) {
592 		ret = add_buffer(dev, priv);
593 		if (ret) {
594 			dev_err(&dev->dev, "failed to allocate DMA memory\n");
595 			goto err_free_priv;
596 		}
597 	}
598 
599 	init_waitqueue_head(&priv->wait_idle);
600 	init_waitqueue_head(&priv->wait_free);
601 	spin_lock_init(&priv->spinlock);
602 
603 	/* we allocate the misc device structure as part of our own allocation,
604 	 * so we can get a pointer to our priv structure later on with
605 	 * container_of(). This isn't really necessary as we have a fixed minor
606 	 * number anyway, but this is to avoid statics. */
607 
608 	priv->misc_fops.owner	= THIS_MODULE;
609 	priv->misc_fops.write	= pxa3xx_gcu_misc_write;
610 	priv->misc_fops.unlocked_ioctl = pxa3xx_gcu_misc_ioctl;
611 	priv->misc_fops.mmap	= pxa3xx_gcu_misc_mmap;
612 
613 	priv->misc_dev.minor	= MISCDEV_MINOR,
614 	priv->misc_dev.name	= DRV_NAME,
615 	priv->misc_dev.fops	= &priv->misc_fops,
616 
617 	/* register misc device */
618 	ret = misc_register(&priv->misc_dev);
619 	if (ret < 0) {
620 		dev_err(&dev->dev, "misc_register() for minor %d failed\n",
621 			MISCDEV_MINOR);
622 		goto err_free_priv;
623 	}
624 
625 	/* handle IO resources */
626 	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
627 	if (r == NULL) {
628 		dev_err(&dev->dev, "no I/O memory resource defined\n");
629 		ret = -ENODEV;
630 		goto err_misc_deregister;
631 	}
632 
633 	if (!request_mem_region(r->start, resource_size(r), dev->name)) {
634 		dev_err(&dev->dev, "failed to request I/O memory\n");
635 		ret = -EBUSY;
636 		goto err_misc_deregister;
637 	}
638 
639 	priv->mmio_base = ioremap_nocache(r->start, resource_size(r));
640 	if (!priv->mmio_base) {
641 		dev_err(&dev->dev, "failed to map I/O memory\n");
642 		ret = -EBUSY;
643 		goto err_free_mem_region;
644 	}
645 
646 	/* allocate dma memory */
647 	priv->shared = dma_alloc_coherent(&dev->dev, SHARED_SIZE,
648 					  &priv->shared_phys, GFP_KERNEL);
649 
650 	if (!priv->shared) {
651 		dev_err(&dev->dev, "failed to allocate DMA memory\n");
652 		ret = -ENOMEM;
653 		goto err_free_io;
654 	}
655 
656 	/* enable the clock */
657 	priv->clk = clk_get(&dev->dev, NULL);
658 	if (IS_ERR(priv->clk)) {
659 		dev_err(&dev->dev, "failed to get clock\n");
660 		ret = -ENODEV;
661 		goto err_free_dma;
662 	}
663 
664 	ret = clk_enable(priv->clk);
665 	if (ret < 0) {
666 		dev_err(&dev->dev, "failed to enable clock\n");
667 		goto err_put_clk;
668 	}
669 
670 	/* request the IRQ */
671 	irq = platform_get_irq(dev, 0);
672 	if (irq < 0) {
673 		dev_err(&dev->dev, "no IRQ defined\n");
674 		ret = -ENODEV;
675 		goto err_put_clk;
676 	}
677 
678 	ret = request_irq(irq, pxa3xx_gcu_handle_irq,
679 			  0, DRV_NAME, priv);
680 	if (ret) {
681 		dev_err(&dev->dev, "request_irq failed\n");
682 		ret = -EBUSY;
683 		goto err_put_clk;
684 	}
685 
686 	platform_set_drvdata(dev, priv);
687 	priv->resource_mem = r;
688 	pxa3xx_gcu_reset(priv);
689 	pxa3xx_gcu_init_debug_timer();
690 
691 	dev_info(&dev->dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
692 			(void *) r->start, (void *) priv->shared_phys,
693 			SHARED_SIZE, irq);
694 	return 0;
695 
696 err_put_clk:
697 	clk_disable(priv->clk);
698 	clk_put(priv->clk);
699 
700 err_free_dma:
701 	dma_free_coherent(&dev->dev, SHARED_SIZE,
702 			priv->shared, priv->shared_phys);
703 
704 err_free_io:
705 	iounmap(priv->mmio_base);
706 
707 err_free_mem_region:
708 	release_mem_region(r->start, resource_size(r));
709 
710 err_misc_deregister:
711 	misc_deregister(&priv->misc_dev);
712 
713 err_free_priv:
714 	platform_set_drvdata(dev, NULL);
715 	free_buffers(dev, priv);
716 	kfree(priv);
717 	return ret;
718 }
719 
720 static int __devexit
pxa3xx_gcu_remove(struct platform_device * dev)721 pxa3xx_gcu_remove(struct platform_device *dev)
722 {
723 	struct pxa3xx_gcu_priv *priv = platform_get_drvdata(dev);
724 	struct resource *r = priv->resource_mem;
725 
726 	pxa3xx_gcu_wait_idle(priv);
727 
728 	misc_deregister(&priv->misc_dev);
729 	dma_free_coherent(&dev->dev, SHARED_SIZE,
730 			priv->shared, priv->shared_phys);
731 	iounmap(priv->mmio_base);
732 	release_mem_region(r->start, resource_size(r));
733 	platform_set_drvdata(dev, NULL);
734 	clk_disable(priv->clk);
735 	free_buffers(dev, priv);
736 	kfree(priv);
737 
738 	return 0;
739 }
740 
741 static struct platform_driver pxa3xx_gcu_driver = {
742 	.probe	  = pxa3xx_gcu_probe,
743 	.remove	 = __devexit_p(pxa3xx_gcu_remove),
744 	.driver	 = {
745 		.owner  = THIS_MODULE,
746 		.name   = DRV_NAME,
747 	},
748 };
749 
750 module_platform_driver(pxa3xx_gcu_driver);
751 
752 MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
753 MODULE_LICENSE("GPL");
754 MODULE_ALIAS_MISCDEV(MISCDEV_MINOR);
755 MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
756 		"Denis Oliver Kropp <dok@directfb.org>, "
757 		"Daniel Mack <daniel@caiaq.de>");
758