1 /*****************************************************************************
2 * Copyright 2004 - 2008 Broadcom Corporation.  All rights reserved.
3 *
4 * Unless you and Broadcom execute a separate written software license
5 * agreement governing use of this software, this software is licensed to you
6 * under the terms of the GNU General Public License version 2, available at
7 * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8 *
9 * Notwithstanding the above, under no circumstances may you combine this
10 * software in any way with any other Broadcom software provided under a
11 * license other than the GPL, without Broadcom's express prior written
12 * consent.
13 *****************************************************************************/
14 
15 /****************************************************************************/
16 /**
17 *   @file   dma.c
18 *
19 *   @brief  Implements the DMA interface.
20 */
21 /****************************************************************************/
22 
23 /* ---- Include Files ---------------------------------------------------- */
24 
25 #include <linux/module.h>
26 #include <linux/device.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/irqreturn.h>
31 #include <linux/proc_fs.h>
32 #include <linux/slab.h>
33 
34 #include <mach/timer.h>
35 
36 #include <linux/pfn.h>
37 #include <linux/atomic.h>
38 #include <linux/sched.h>
39 #include <mach/dma.h>
40 
41 /* ---- Public Variables ------------------------------------------------- */
42 
43 /* ---- Private Constants and Types -------------------------------------- */
44 
45 #define MAKE_HANDLE(controllerIdx, channelIdx)    (((controllerIdx) << 4) | (channelIdx))
46 
47 #define CONTROLLER_FROM_HANDLE(handle)    (((handle) >> 4) & 0x0f)
48 #define CHANNEL_FROM_HANDLE(handle)       ((handle) & 0x0f)
49 
50 
51 /* ---- Private Variables ------------------------------------------------ */
52 
53 static DMA_Global_t gDMA;
54 static struct proc_dir_entry *gDmaDir;
55 
56 #include "dma_device.c"
57 
58 /* ---- Private Function Prototypes -------------------------------------- */
59 
60 /* ---- Functions  ------------------------------------------------------- */
61 
62 /****************************************************************************/
63 /**
64 *   Displays information for /proc/dma/channels
65 */
66 /****************************************************************************/
67 
dma_proc_read_channels(char * buf,char ** start,off_t offset,int count,int * eof,void * data)68 static int dma_proc_read_channels(char *buf, char **start, off_t offset,
69 				  int count, int *eof, void *data)
70 {
71 	int controllerIdx;
72 	int channelIdx;
73 	int limit = count - 200;
74 	int len = 0;
75 	DMA_Channel_t *channel;
76 
77 	if (down_interruptible(&gDMA.lock) < 0) {
78 		return -ERESTARTSYS;
79 	}
80 
81 	for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
82 	     controllerIdx++) {
83 		for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
84 		     channelIdx++) {
85 			if (len >= limit) {
86 				break;
87 			}
88 
89 			channel =
90 			    &gDMA.controller[controllerIdx].channel[channelIdx];
91 
92 			len +=
93 			    sprintf(buf + len, "%d:%d ", controllerIdx,
94 				    channelIdx);
95 
96 			if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
97 			    0) {
98 				len +=
99 				    sprintf(buf + len, "Dedicated for %s ",
100 					    DMA_gDeviceAttribute[channel->
101 								 devType].name);
102 			} else {
103 				len += sprintf(buf + len, "Shared ");
104 			}
105 
106 			if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) {
107 				len += sprintf(buf + len, "No ISR ");
108 			}
109 
110 			if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) {
111 				len += sprintf(buf + len, "Fifo: 128 ");
112 			} else {
113 				len += sprintf(buf + len, "Fifo: 64  ");
114 			}
115 
116 			if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
117 				len +=
118 				    sprintf(buf + len, "InUse by %s",
119 					    DMA_gDeviceAttribute[channel->
120 								 devType].name);
121 #if (DMA_DEBUG_TRACK_RESERVATION)
122 				len +=
123 				    sprintf(buf + len, " (%s:%d)",
124 					    channel->fileName,
125 					    channel->lineNum);
126 #endif
127 			} else {
128 				len += sprintf(buf + len, "Avail ");
129 			}
130 
131 			if (channel->lastDevType != DMA_DEVICE_NONE) {
132 				len +=
133 				    sprintf(buf + len, "Last use: %s ",
134 					    DMA_gDeviceAttribute[channel->
135 								 lastDevType].
136 					    name);
137 			}
138 
139 			len += sprintf(buf + len, "\n");
140 		}
141 	}
142 	up(&gDMA.lock);
143 	*eof = 1;
144 
145 	return len;
146 }
147 
148 /****************************************************************************/
149 /**
150 *   Displays information for /proc/dma/devices
151 */
152 /****************************************************************************/
153 
dma_proc_read_devices(char * buf,char ** start,off_t offset,int count,int * eof,void * data)154 static int dma_proc_read_devices(char *buf, char **start, off_t offset,
155 				 int count, int *eof, void *data)
156 {
157 	int limit = count - 200;
158 	int len = 0;
159 	int devIdx;
160 
161 	if (down_interruptible(&gDMA.lock) < 0) {
162 		return -ERESTARTSYS;
163 	}
164 
165 	for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
166 		DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
167 
168 		if (devAttr->name == NULL) {
169 			continue;
170 		}
171 
172 		if (len >= limit) {
173 			break;
174 		}
175 
176 		len += sprintf(buf + len, "%-12s ", devAttr->name);
177 
178 		if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
179 			len +=
180 			    sprintf(buf + len, "Dedicated %d:%d ",
181 				    devAttr->dedicatedController,
182 				    devAttr->dedicatedChannel);
183 		} else {
184 			len += sprintf(buf + len, "Shared DMA:");
185 			if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) {
186 				len += sprintf(buf + len, "0");
187 			}
188 			if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) {
189 				len += sprintf(buf + len, "1");
190 			}
191 			len += sprintf(buf + len, " ");
192 		}
193 		if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) {
194 			len += sprintf(buf + len, "NoISR ");
195 		}
196 		if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) {
197 			len += sprintf(buf + len, "Allow-128 ");
198 		}
199 
200 		len +=
201 		    sprintf(buf + len,
202 			    "Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n",
203 			    devAttr->numTransfers, devAttr->transferTicks,
204 			    devAttr->transferBytes,
205 			    devAttr->ring.bytesAllocated);
206 
207 	}
208 
209 	up(&gDMA.lock);
210 	*eof = 1;
211 
212 	return len;
213 }
214 
215 /****************************************************************************/
216 /**
217 *   Determines if a DMA_Device_t is "valid".
218 *
219 *   @return
220 *       TRUE        - dma device is valid
221 *       FALSE       - dma device isn't valid
222 */
223 /****************************************************************************/
224 
IsDeviceValid(DMA_Device_t device)225 static inline int IsDeviceValid(DMA_Device_t device)
226 {
227 	return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES);
228 }
229 
230 /****************************************************************************/
231 /**
232 *   Translates a DMA handle into a pointer to a channel.
233 *
234 *   @return
235 *       non-NULL    - pointer to DMA_Channel_t
236 *       NULL        - DMA Handle was invalid
237 */
238 /****************************************************************************/
239 
HandleToChannel(DMA_Handle_t handle)240 static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle)
241 {
242 	int controllerIdx;
243 	int channelIdx;
244 
245 	controllerIdx = CONTROLLER_FROM_HANDLE(handle);
246 	channelIdx = CHANNEL_FROM_HANDLE(handle);
247 
248 	if ((controllerIdx > DMA_NUM_CONTROLLERS)
249 	    || (channelIdx > DMA_NUM_CHANNELS)) {
250 		return NULL;
251 	}
252 	return &gDMA.controller[controllerIdx].channel[channelIdx];
253 }
254 
255 /****************************************************************************/
256 /**
257 *   Interrupt handler which is called to process DMA interrupts.
258 */
259 /****************************************************************************/
260 
dma_interrupt_handler(int irq,void * dev_id)261 static irqreturn_t dma_interrupt_handler(int irq, void *dev_id)
262 {
263 	DMA_Channel_t *channel;
264 	DMA_DeviceAttribute_t *devAttr;
265 	int irqStatus;
266 
267 	channel = (DMA_Channel_t *) dev_id;
268 
269 	/* Figure out why we were called, and knock down the interrupt */
270 
271 	irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle);
272 	dmacHw_clearInterrupt(channel->dmacHwHandle);
273 
274 	if ((channel->devType < 0)
275 	    || (channel->devType > DMA_NUM_DEVICE_ENTRIES)) {
276 		printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n",
277 		       channel->devType);
278 		return IRQ_NONE;
279 	}
280 	devAttr = &DMA_gDeviceAttribute[channel->devType];
281 
282 	/* Update stats */
283 
284 	if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) {
285 		devAttr->transferTicks +=
286 		    (timer_get_tick_count() - devAttr->transferStartTime);
287 	}
288 
289 	if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) {
290 		printk(KERN_ERR
291 		       "dma_interrupt_handler: devType :%d DMA error (%s)\n",
292 		       channel->devType, devAttr->name);
293 	} else {
294 		devAttr->numTransfers++;
295 		devAttr->transferBytes += devAttr->numBytes;
296 	}
297 
298 	/* Call any installed handler */
299 
300 	if (devAttr->devHandler != NULL) {
301 		devAttr->devHandler(channel->devType, irqStatus,
302 				    devAttr->userData);
303 	}
304 
305 	return IRQ_HANDLED;
306 }
307 
308 /****************************************************************************/
309 /**
310 *   Allocates memory to hold a descriptor ring. The descriptor ring then
311 *   needs to be populated by making one or more calls to
312 *   dna_add_descriptors.
313 *
314 *   The returned descriptor ring will be automatically initialized.
315 *
316 *   @return
317 *       0           Descriptor ring was allocated successfully
318 *       -EINVAL     Invalid parameters passed in
319 *       -ENOMEM     Unable to allocate memory for the desired number of descriptors.
320 */
321 /****************************************************************************/
322 
dma_alloc_descriptor_ring(DMA_DescriptorRing_t * ring,int numDescriptors)323 int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring,	/* Descriptor ring to populate */
324 			      int numDescriptors	/* Number of descriptors that need to be allocated. */
325     ) {
326 	size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors);
327 
328 	if ((ring == NULL) || (numDescriptors <= 0)) {
329 		return -EINVAL;
330 	}
331 
332 	ring->physAddr = 0;
333 	ring->descriptorsAllocated = 0;
334 	ring->bytesAllocated = 0;
335 
336 	ring->virtAddr = dma_alloc_writecombine(NULL,
337 						     bytesToAlloc,
338 						     &ring->physAddr,
339 						     GFP_KERNEL);
340 	if (ring->virtAddr == NULL) {
341 		return -ENOMEM;
342 	}
343 
344 	ring->bytesAllocated = bytesToAlloc;
345 	ring->descriptorsAllocated = numDescriptors;
346 
347 	return dma_init_descriptor_ring(ring, numDescriptors);
348 }
349 
350 EXPORT_SYMBOL(dma_alloc_descriptor_ring);
351 
352 /****************************************************************************/
353 /**
354 *   Releases the memory which was previously allocated for a descriptor ring.
355 */
356 /****************************************************************************/
357 
dma_free_descriptor_ring(DMA_DescriptorRing_t * ring)358 void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring	/* Descriptor to release */
359     ) {
360 	if (ring->virtAddr != NULL) {
361 		dma_free_writecombine(NULL,
362 				      ring->bytesAllocated,
363 				      ring->virtAddr, ring->physAddr);
364 	}
365 
366 	ring->bytesAllocated = 0;
367 	ring->descriptorsAllocated = 0;
368 	ring->virtAddr = NULL;
369 	ring->physAddr = 0;
370 }
371 
372 EXPORT_SYMBOL(dma_free_descriptor_ring);
373 
374 /****************************************************************************/
375 /**
376 *   Initializes a descriptor ring, so that descriptors can be added to it.
377 *   Once a descriptor ring has been allocated, it may be reinitialized for
378 *   use with additional/different regions of memory.
379 *
380 *   Note that if 7 descriptors are allocated, it's perfectly acceptable to
381 *   initialize the ring with a smaller number of descriptors. The amount
382 *   of memory allocated for the descriptor ring will not be reduced, and
383 *   the descriptor ring may be reinitialized later
384 *
385 *   @return
386 *       0           Descriptor ring was initialized successfully
387 *       -ENOMEM     The descriptor which was passed in has insufficient space
388 *                   to hold the desired number of descriptors.
389 */
390 /****************************************************************************/
391 
dma_init_descriptor_ring(DMA_DescriptorRing_t * ring,int numDescriptors)392 int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring,	/* Descriptor ring to initialize */
393 			     int numDescriptors	/* Number of descriptors to initialize. */
394     ) {
395 	if (ring->virtAddr == NULL) {
396 		return -EINVAL;
397 	}
398 	if (dmacHw_initDescriptor(ring->virtAddr,
399 				  ring->physAddr,
400 				  ring->bytesAllocated, numDescriptors) < 0) {
401 		printk(KERN_ERR
402 		       "dma_init_descriptor_ring: dmacHw_initDescriptor failed\n");
403 		return -ENOMEM;
404 	}
405 
406 	return 0;
407 }
408 
409 EXPORT_SYMBOL(dma_init_descriptor_ring);
410 
411 /****************************************************************************/
412 /**
413 *   Determines the number of descriptors which would be required for a
414 *   transfer of the indicated memory region.
415 *
416 *   This function also needs to know which DMA device this transfer will
417 *   be destined for, so that the appropriate DMA configuration can be retrieved.
418 *   DMA parameters such as transfer width, and whether this is a memory-to-memory
419 *   or memory-to-peripheral, etc can all affect the actual number of descriptors
420 *   required.
421 *
422 *   @return
423 *       > 0     Returns the number of descriptors required for the indicated transfer
424 *       -ENODEV - Device handed in is invalid.
425 *       -EINVAL Invalid parameters
426 *       -ENOMEM Memory exhausted
427 */
428 /****************************************************************************/
429 
dma_calculate_descriptor_count(DMA_Device_t device,dma_addr_t srcData,dma_addr_t dstData,size_t numBytes)430 int dma_calculate_descriptor_count(DMA_Device_t device,	/* DMA Device that this will be associated with */
431 				   dma_addr_t srcData,	/* Place to get data to write to device */
432 				   dma_addr_t dstData,	/* Pointer to device data address */
433 				   size_t numBytes	/* Number of bytes to transfer to the device */
434     ) {
435 	int numDescriptors;
436 	DMA_DeviceAttribute_t *devAttr;
437 
438 	if (!IsDeviceValid(device)) {
439 		return -ENODEV;
440 	}
441 	devAttr = &DMA_gDeviceAttribute[device];
442 
443 	numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
444 							      (void *)srcData,
445 							      (void *)dstData,
446 							      numBytes);
447 	if (numDescriptors < 0) {
448 		printk(KERN_ERR
449 		       "dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n");
450 		return -EINVAL;
451 	}
452 
453 	return numDescriptors;
454 }
455 
456 EXPORT_SYMBOL(dma_calculate_descriptor_count);
457 
458 /****************************************************************************/
459 /**
460 *   Adds a region of memory to the descriptor ring. Note that it may take
461 *   multiple descriptors for each region of memory. It is the callers
462 *   responsibility to allocate a sufficiently large descriptor ring.
463 *
464 *   @return
465 *       0       Descriptors were added successfully
466 *       -ENODEV Device handed in is invalid.
467 *       -EINVAL Invalid parameters
468 *       -ENOMEM Memory exhausted
469 */
470 /****************************************************************************/
471 
dma_add_descriptors(DMA_DescriptorRing_t * ring,DMA_Device_t device,dma_addr_t srcData,dma_addr_t dstData,size_t numBytes)472 int dma_add_descriptors(DMA_DescriptorRing_t *ring,	/* Descriptor ring to add descriptors to */
473 			DMA_Device_t device,	/* DMA Device that descriptors are for */
474 			dma_addr_t srcData,	/* Place to get data (memory or device) */
475 			dma_addr_t dstData,	/* Place to put data (memory or device) */
476 			size_t numBytes	/* Number of bytes to transfer to the device */
477     ) {
478 	int rc;
479 	DMA_DeviceAttribute_t *devAttr;
480 
481 	if (!IsDeviceValid(device)) {
482 		return -ENODEV;
483 	}
484 	devAttr = &DMA_gDeviceAttribute[device];
485 
486 	rc = dmacHw_setDataDescriptor(&devAttr->config,
487 				      ring->virtAddr,
488 				      (void *)srcData,
489 				      (void *)dstData, numBytes);
490 	if (rc < 0) {
491 		printk(KERN_ERR
492 		       "dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n",
493 		       rc);
494 		return -ENOMEM;
495 	}
496 
497 	return 0;
498 }
499 
500 EXPORT_SYMBOL(dma_add_descriptors);
501 
502 /****************************************************************************/
503 /**
504 *   Sets the descriptor ring associated with a device.
505 *
506 *   Once set, the descriptor ring will be associated with the device, even
507 *   across channel request/free calls. Passing in a NULL descriptor ring
508 *   will release any descriptor ring currently associated with the device.
509 *
510 *   Note: If you call dma_transfer, or one of the other dma_alloc_ functions
511 *         the descriptor ring may be released and reallocated.
512 *
513 *   Note: This function will release the descriptor memory for any current
514 *         descriptor ring associated with this device.
515 *
516 *   @return
517 *       0       Descriptors were added successfully
518 *       -ENODEV Device handed in is invalid.
519 */
520 /****************************************************************************/
521 
dma_set_device_descriptor_ring(DMA_Device_t device,DMA_DescriptorRing_t * ring)522 int dma_set_device_descriptor_ring(DMA_Device_t device,	/* Device to update the descriptor ring for. */
523 				   DMA_DescriptorRing_t *ring	/* Descriptor ring to add descriptors to */
524     ) {
525 	DMA_DeviceAttribute_t *devAttr;
526 
527 	if (!IsDeviceValid(device)) {
528 		return -ENODEV;
529 	}
530 	devAttr = &DMA_gDeviceAttribute[device];
531 
532 	/* Free the previously allocated descriptor ring */
533 
534 	dma_free_descriptor_ring(&devAttr->ring);
535 
536 	if (ring != NULL) {
537 		/* Copy in the new one */
538 
539 		devAttr->ring = *ring;
540 	}
541 
542 	/* Set things up so that if dma_transfer is called then this descriptor */
543 	/* ring will get freed. */
544 
545 	devAttr->prevSrcData = 0;
546 	devAttr->prevDstData = 0;
547 	devAttr->prevNumBytes = 0;
548 
549 	return 0;
550 }
551 
552 EXPORT_SYMBOL(dma_set_device_descriptor_ring);
553 
554 /****************************************************************************/
555 /**
556 *   Retrieves the descriptor ring associated with a device.
557 *
558 *   @return
559 *       0       Descriptors were added successfully
560 *       -ENODEV Device handed in is invalid.
561 */
562 /****************************************************************************/
563 
dma_get_device_descriptor_ring(DMA_Device_t device,DMA_DescriptorRing_t * ring)564 int dma_get_device_descriptor_ring(DMA_Device_t device,	/* Device to retrieve the descriptor ring for. */
565 				   DMA_DescriptorRing_t *ring	/* Place to store retrieved ring */
566     ) {
567 	DMA_DeviceAttribute_t *devAttr;
568 
569 	memset(ring, 0, sizeof(*ring));
570 
571 	if (!IsDeviceValid(device)) {
572 		return -ENODEV;
573 	}
574 	devAttr = &DMA_gDeviceAttribute[device];
575 
576 	*ring = devAttr->ring;
577 
578 	return 0;
579 }
580 
581 EXPORT_SYMBOL(dma_get_device_descriptor_ring);
582 
583 /****************************************************************************/
584 /**
585 *   Configures a DMA channel.
586 *
587 *   @return
588 *       >= 0    - Initialization was successful.
589 *
590 *       -EBUSY  - Device is currently being used.
591 *       -ENODEV - Device handed in is invalid.
592 */
593 /****************************************************************************/
594 
ConfigChannel(DMA_Handle_t handle)595 static int ConfigChannel(DMA_Handle_t handle)
596 {
597 	DMA_Channel_t *channel;
598 	DMA_DeviceAttribute_t *devAttr;
599 	int controllerIdx;
600 
601 	channel = HandleToChannel(handle);
602 	if (channel == NULL) {
603 		return -ENODEV;
604 	}
605 	devAttr = &DMA_gDeviceAttribute[channel->devType];
606 	controllerIdx = CONTROLLER_FROM_HANDLE(handle);
607 
608 	if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) {
609 		if (devAttr->config.transferType ==
610 		    dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) {
611 			devAttr->config.dstPeripheralPort =
612 			    devAttr->dmacPort[controllerIdx];
613 		} else if (devAttr->config.transferType ==
614 			   dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) {
615 			devAttr->config.srcPeripheralPort =
616 			    devAttr->dmacPort[controllerIdx];
617 		}
618 	}
619 
620 	if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) {
621 		printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n");
622 		return -EIO;
623 	}
624 
625 	return 0;
626 }
627 
628 /****************************************************************************/
629 /**
630 *   Initializes all of the data structures associated with the DMA.
631 *   @return
632 *       >= 0    - Initialization was successful.
633 *
634 *       -EBUSY  - Device is currently being used.
635 *       -ENODEV - Device handed in is invalid.
636 */
637 /****************************************************************************/
638 
dma_init(void)639 int dma_init(void)
640 {
641 	int rc = 0;
642 	int controllerIdx;
643 	int channelIdx;
644 	DMA_Device_t devIdx;
645 	DMA_Channel_t *channel;
646 	DMA_Handle_t dedicatedHandle;
647 
648 	memset(&gDMA, 0, sizeof(gDMA));
649 
650 	sema_init(&gDMA.lock, 0);
651 	init_waitqueue_head(&gDMA.freeChannelQ);
652 
653 	/* Initialize the Hardware */
654 
655 	dmacHw_initDma();
656 
657 	/* Start off by marking all of the DMA channels as shared. */
658 
659 	for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
660 	     controllerIdx++) {
661 		for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
662 		     channelIdx++) {
663 			channel =
664 			    &gDMA.controller[controllerIdx].channel[channelIdx];
665 
666 			channel->flags = 0;
667 			channel->devType = DMA_DEVICE_NONE;
668 			channel->lastDevType = DMA_DEVICE_NONE;
669 
670 #if (DMA_DEBUG_TRACK_RESERVATION)
671 			channel->fileName = "";
672 			channel->lineNum = 0;
673 #endif
674 
675 			channel->dmacHwHandle =
676 			    dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID
677 						    (controllerIdx,
678 						     channelIdx));
679 			dmacHw_initChannel(channel->dmacHwHandle);
680 		}
681 	}
682 
683 	/* Record any special attributes that channels may have */
684 
685 	gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
686 	gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
687 	gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
688 	gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
689 
690 	/* Now walk through and record the dedicated channels. */
691 
692 	for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
693 		DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
694 
695 		if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0)
696 		    && ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) {
697 			printk(KERN_ERR
698 			       "DMA Device: %s Can only request NO_ISR for dedicated devices\n",
699 			       devAttr->name);
700 			rc = -EINVAL;
701 			goto out;
702 		}
703 
704 		if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
705 			/* This is a dedicated device. Mark the channel as being reserved. */
706 
707 			if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) {
708 				printk(KERN_ERR
709 				       "DMA Device: %s DMA Controller %d is out of range\n",
710 				       devAttr->name,
711 				       devAttr->dedicatedController);
712 				rc = -EINVAL;
713 				goto out;
714 			}
715 
716 			if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) {
717 				printk(KERN_ERR
718 				       "DMA Device: %s DMA Channel %d is out of range\n",
719 				       devAttr->name,
720 				       devAttr->dedicatedChannel);
721 				rc = -EINVAL;
722 				goto out;
723 			}
724 
725 			dedicatedHandle =
726 			    MAKE_HANDLE(devAttr->dedicatedController,
727 					devAttr->dedicatedChannel);
728 			channel = HandleToChannel(dedicatedHandle);
729 
730 			if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
731 			    0) {
732 				printk
733 				    ("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n",
734 				     devAttr->name,
735 				     devAttr->dedicatedController,
736 				     devAttr->dedicatedChannel,
737 				     DMA_gDeviceAttribute[channel->devType].
738 				     name);
739 				rc = -EBUSY;
740 				goto out;
741 			}
742 
743 			channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED;
744 			channel->devType = devIdx;
745 
746 			if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) {
747 				channel->flags |= DMA_CHANNEL_FLAG_NO_ISR;
748 			}
749 
750 			/* For dedicated channels, we can go ahead and configure the DMA channel now */
751 			/* as well. */
752 
753 			ConfigChannel(dedicatedHandle);
754 		}
755 	}
756 
757 	/* Go through and register the interrupt handlers */
758 
759 	for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
760 	     controllerIdx++) {
761 		for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
762 		     channelIdx++) {
763 			channel =
764 			    &gDMA.controller[controllerIdx].channel[channelIdx];
765 
766 			if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) {
767 				snprintf(channel->name, sizeof(channel->name),
768 					 "dma %d:%d %s", controllerIdx,
769 					 channelIdx,
770 					 channel->devType ==
771 					 DMA_DEVICE_NONE ? "" :
772 					 DMA_gDeviceAttribute[channel->devType].
773 					 name);
774 
775 				rc =
776 				     request_irq(IRQ_DMA0C0 +
777 						 (controllerIdx *
778 						  DMA_NUM_CHANNELS) +
779 						 channelIdx,
780 						 dma_interrupt_handler,
781 						 IRQF_DISABLED, channel->name,
782 						 channel);
783 				if (rc != 0) {
784 					printk(KERN_ERR
785 					       "request_irq for IRQ_DMA%dC%d failed\n",
786 					       controllerIdx, channelIdx);
787 				}
788 			}
789 		}
790 	}
791 
792 	/* Create /proc/dma/channels and /proc/dma/devices */
793 
794 	gDmaDir = proc_mkdir("dma", NULL);
795 
796 	if (gDmaDir == NULL) {
797 		printk(KERN_ERR "Unable to create /proc/dma\n");
798 	} else {
799 		create_proc_read_entry("channels", 0, gDmaDir,
800 				       dma_proc_read_channels, NULL);
801 		create_proc_read_entry("devices", 0, gDmaDir,
802 				       dma_proc_read_devices, NULL);
803 	}
804 
805 out:
806 
807 	up(&gDMA.lock);
808 
809 	return rc;
810 }
811 
812 /****************************************************************************/
813 /**
814 *   Reserves a channel for use with @a dev. If the device is setup to use
815 *   a shared channel, then this function will block until a free channel
816 *   becomes available.
817 *
818 *   @return
819 *       >= 0    - A valid DMA Handle.
820 *       -EBUSY  - Device is currently being used.
821 *       -ENODEV - Device handed in is invalid.
822 */
823 /****************************************************************************/
824 
825 #if (DMA_DEBUG_TRACK_RESERVATION)
dma_request_channel_dbg(DMA_Device_t dev,const char * fileName,int lineNum)826 DMA_Handle_t dma_request_channel_dbg
827     (DMA_Device_t dev, const char *fileName, int lineNum)
828 #else
829 DMA_Handle_t dma_request_channel(DMA_Device_t dev)
830 #endif
831 {
832 	DMA_Handle_t handle;
833 	DMA_DeviceAttribute_t *devAttr;
834 	DMA_Channel_t *channel;
835 	int controllerIdx;
836 	int controllerIdx2;
837 	int channelIdx;
838 
839 	if (down_interruptible(&gDMA.lock) < 0) {
840 		return -ERESTARTSYS;
841 	}
842 
843 	if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) {
844 		handle = -ENODEV;
845 		goto out;
846 	}
847 	devAttr = &DMA_gDeviceAttribute[dev];
848 
849 #if (DMA_DEBUG_TRACK_RESERVATION)
850 	{
851 		char *s;
852 
853 		s = strrchr(fileName, '/');
854 		if (s != NULL) {
855 			fileName = s + 1;
856 		}
857 	}
858 #endif
859 	if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) {
860 		/* This device has already been requested and not been freed */
861 
862 		printk(KERN_ERR "%s: device %s is already requested\n",
863 		       __func__, devAttr->name);
864 		handle = -EBUSY;
865 		goto out;
866 	}
867 
868 	if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
869 		/* This device has a dedicated channel. */
870 
871 		channel =
872 		    &gDMA.controller[devAttr->dedicatedController].
873 		    channel[devAttr->dedicatedChannel];
874 		if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
875 			handle = -EBUSY;
876 			goto out;
877 		}
878 
879 		channel->flags |= DMA_CHANNEL_FLAG_IN_USE;
880 		devAttr->flags |= DMA_DEVICE_FLAG_IN_USE;
881 
882 #if (DMA_DEBUG_TRACK_RESERVATION)
883 		channel->fileName = fileName;
884 		channel->lineNum = lineNum;
885 #endif
886 		handle =
887 		    MAKE_HANDLE(devAttr->dedicatedController,
888 				devAttr->dedicatedChannel);
889 		goto out;
890 	}
891 
892 	/* This device needs to use one of the shared channels. */
893 
894 	handle = DMA_INVALID_HANDLE;
895 	while (handle == DMA_INVALID_HANDLE) {
896 		/* Scan through the shared channels and see if one is available */
897 
898 		for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS;
899 		     controllerIdx2++) {
900 			/* Check to see if we should try on controller 1 first. */
901 
902 			controllerIdx = controllerIdx2;
903 			if ((devAttr->
904 			     flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) {
905 				controllerIdx = 1 - controllerIdx;
906 			}
907 
908 			/* See if the device is available on the controller being tested */
909 
910 			if ((devAttr->
911 			     flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx))
912 			    != 0) {
913 				for (channelIdx = 0;
914 				     channelIdx < DMA_NUM_CHANNELS;
915 				     channelIdx++) {
916 					channel =
917 					    &gDMA.controller[controllerIdx].
918 					    channel[channelIdx];
919 
920 					if (((channel->
921 					      flags &
922 					      DMA_CHANNEL_FLAG_IS_DEDICATED) ==
923 					     0)
924 					    &&
925 					    ((channel->
926 					      flags & DMA_CHANNEL_FLAG_IN_USE)
927 					     == 0)) {
928 						if (((channel->
929 						      flags &
930 						      DMA_CHANNEL_FLAG_LARGE_FIFO)
931 						     != 0)
932 						    &&
933 						    ((devAttr->
934 						      flags &
935 						      DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO)
936 						     == 0)) {
937 							/* This channel is a large fifo - don't tie it up */
938 							/* with devices that we don't want using it. */
939 
940 							continue;
941 						}
942 
943 						channel->flags |=
944 						    DMA_CHANNEL_FLAG_IN_USE;
945 						channel->devType = dev;
946 						devAttr->flags |=
947 						    DMA_DEVICE_FLAG_IN_USE;
948 
949 #if (DMA_DEBUG_TRACK_RESERVATION)
950 						channel->fileName = fileName;
951 						channel->lineNum = lineNum;
952 #endif
953 						handle =
954 						    MAKE_HANDLE(controllerIdx,
955 								channelIdx);
956 
957 						/* Now that we've reserved the channel - we can go ahead and configure it */
958 
959 						if (ConfigChannel(handle) != 0) {
960 							handle = -EIO;
961 							printk(KERN_ERR
962 							       "dma_request_channel: ConfigChannel failed\n");
963 						}
964 						goto out;
965 					}
966 				}
967 			}
968 		}
969 
970 		/* No channels are currently available. Let's wait for one to free up. */
971 
972 		{
973 			DEFINE_WAIT(wait);
974 
975 			prepare_to_wait(&gDMA.freeChannelQ, &wait,
976 					TASK_INTERRUPTIBLE);
977 			up(&gDMA.lock);
978 			schedule();
979 			finish_wait(&gDMA.freeChannelQ, &wait);
980 
981 			if (signal_pending(current)) {
982 				/* We don't currently hold gDMA.lock, so we return directly */
983 
984 				return -ERESTARTSYS;
985 			}
986 		}
987 
988 		if (down_interruptible(&gDMA.lock)) {
989 			return -ERESTARTSYS;
990 		}
991 	}
992 
993 out:
994 	up(&gDMA.lock);
995 
996 	return handle;
997 }
998 
999 /* Create both _dbg and non _dbg functions for modules. */
1000 
1001 #if (DMA_DEBUG_TRACK_RESERVATION)
1002 #undef dma_request_channel
dma_request_channel(DMA_Device_t dev)1003 DMA_Handle_t dma_request_channel(DMA_Device_t dev)
1004 {
1005 	return dma_request_channel_dbg(dev, __FILE__, __LINE__);
1006 }
1007 
1008 EXPORT_SYMBOL(dma_request_channel_dbg);
1009 #endif
1010 EXPORT_SYMBOL(dma_request_channel);
1011 
1012 /****************************************************************************/
1013 /**
1014 *   Frees a previously allocated DMA Handle.
1015 */
1016 /****************************************************************************/
1017 
dma_free_channel(DMA_Handle_t handle)1018 int dma_free_channel(DMA_Handle_t handle	/* DMA handle. */
1019     ) {
1020 	int rc = 0;
1021 	DMA_Channel_t *channel;
1022 	DMA_DeviceAttribute_t *devAttr;
1023 
1024 	if (down_interruptible(&gDMA.lock) < 0) {
1025 		return -ERESTARTSYS;
1026 	}
1027 
1028 	channel = HandleToChannel(handle);
1029 	if (channel == NULL) {
1030 		rc = -EINVAL;
1031 		goto out;
1032 	}
1033 
1034 	devAttr = &DMA_gDeviceAttribute[channel->devType];
1035 
1036 	if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) {
1037 		channel->lastDevType = channel->devType;
1038 		channel->devType = DMA_DEVICE_NONE;
1039 	}
1040 	channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE;
1041 	devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE;
1042 
1043 out:
1044 	up(&gDMA.lock);
1045 
1046 	wake_up_interruptible(&gDMA.freeChannelQ);
1047 
1048 	return rc;
1049 }
1050 
1051 EXPORT_SYMBOL(dma_free_channel);
1052 
1053 /****************************************************************************/
1054 /**
1055 *   Determines if a given device has been configured as using a shared
1056 *   channel.
1057 *
1058 *   @return
1059 *       0           Device uses a dedicated channel
1060 *       > zero      Device uses a shared channel
1061 *       < zero      Error code
1062 */
1063 /****************************************************************************/
1064 
dma_device_is_channel_shared(DMA_Device_t device)1065 int dma_device_is_channel_shared(DMA_Device_t device	/* Device to check. */
1066     ) {
1067 	DMA_DeviceAttribute_t *devAttr;
1068 
1069 	if (!IsDeviceValid(device)) {
1070 		return -ENODEV;
1071 	}
1072 	devAttr = &DMA_gDeviceAttribute[device];
1073 
1074 	return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0);
1075 }
1076 
1077 EXPORT_SYMBOL(dma_device_is_channel_shared);
1078 
1079 /****************************************************************************/
1080 /**
1081 *   Allocates buffers for the descriptors. This is normally done automatically
1082 *   but needs to be done explicitly when initiating a dma from interrupt
1083 *   context.
1084 *
1085 *   @return
1086 *       0       Descriptors were allocated successfully
1087 *       -EINVAL Invalid device type for this kind of transfer
1088 *               (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1089 *       -ENOMEM Memory exhausted
1090 */
1091 /****************************************************************************/
1092 
dma_alloc_descriptors(DMA_Handle_t handle,dmacHw_TRANSFER_TYPE_e transferType,dma_addr_t srcData,dma_addr_t dstData,size_t numBytes)1093 int dma_alloc_descriptors(DMA_Handle_t handle,	/* DMA Handle */
1094 			  dmacHw_TRANSFER_TYPE_e transferType,	/* Type of transfer being performed */
1095 			  dma_addr_t srcData,	/* Place to get data to write to device */
1096 			  dma_addr_t dstData,	/* Pointer to device data address */
1097 			  size_t numBytes	/* Number of bytes to transfer to the device */
1098     ) {
1099 	DMA_Channel_t *channel;
1100 	DMA_DeviceAttribute_t *devAttr;
1101 	int numDescriptors;
1102 	size_t ringBytesRequired;
1103 	int rc = 0;
1104 
1105 	channel = HandleToChannel(handle);
1106 	if (channel == NULL) {
1107 		return -ENODEV;
1108 	}
1109 
1110 	devAttr = &DMA_gDeviceAttribute[channel->devType];
1111 
1112 	if (devAttr->config.transferType != transferType) {
1113 		return -EINVAL;
1114 	}
1115 
1116 	/* Figure out how many descriptors we need. */
1117 
1118 	/* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1119 	/*        srcData, dstData, numBytes); */
1120 
1121 	numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
1122 							      (void *)srcData,
1123 							      (void *)dstData,
1124 							      numBytes);
1125 	if (numDescriptors < 0) {
1126 		printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n",
1127 		       __func__);
1128 		return -EINVAL;
1129 	}
1130 
1131 	/* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1132 	/* a new one. */
1133 
1134 	ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1135 
1136 	/* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1137 
1138 	if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1139 		/* Make sure that this code path is never taken from interrupt context. */
1140 		/* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1141 		/* allocation needs to have already been done. */
1142 
1143 		might_sleep();
1144 
1145 		/* Free the old descriptor ring and allocate a new one. */
1146 
1147 		dma_free_descriptor_ring(&devAttr->ring);
1148 
1149 		/* And allocate a new one. */
1150 
1151 		rc =
1152 		     dma_alloc_descriptor_ring(&devAttr->ring,
1153 					       numDescriptors);
1154 		if (rc < 0) {
1155 			printk(KERN_ERR
1156 			       "%s: dma_alloc_descriptor_ring(%d) failed\n",
1157 			       __func__, numDescriptors);
1158 			return rc;
1159 		}
1160 		/* Setup the descriptor for this transfer */
1161 
1162 		if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1163 					  devAttr->ring.physAddr,
1164 					  devAttr->ring.bytesAllocated,
1165 					  numDescriptors) < 0) {
1166 			printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n",
1167 			       __func__);
1168 			return -EINVAL;
1169 		}
1170 	} else {
1171 		/* We've already got enough ring buffer allocated. All we need to do is reset */
1172 		/* any control information, just in case the previous DMA was stopped. */
1173 
1174 		dmacHw_resetDescriptorControl(devAttr->ring.virtAddr);
1175 	}
1176 
1177 	/* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1178 	/* as last time, then we don't need to call setDataDescriptor again. */
1179 
1180 	if (dmacHw_setDataDescriptor(&devAttr->config,
1181 				     devAttr->ring.virtAddr,
1182 				     (void *)srcData,
1183 				     (void *)dstData, numBytes) < 0) {
1184 		printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n",
1185 		       __func__);
1186 		return -EINVAL;
1187 	}
1188 
1189 	/* Remember the critical information for this transfer so that we can eliminate */
1190 	/* another call to dma_alloc_descriptors if the caller reuses the same buffers */
1191 
1192 	devAttr->prevSrcData = srcData;
1193 	devAttr->prevDstData = dstData;
1194 	devAttr->prevNumBytes = numBytes;
1195 
1196 	return 0;
1197 }
1198 
1199 EXPORT_SYMBOL(dma_alloc_descriptors);
1200 
1201 /****************************************************************************/
1202 /**
1203 *   Allocates and sets up descriptors for a double buffered circular buffer.
1204 *
1205 *   This is primarily intended to be used for things like the ingress samples
1206 *   from a microphone.
1207 *
1208 *   @return
1209 *       > 0     Number of descriptors actually allocated.
1210 *       -EINVAL Invalid device type for this kind of transfer
1211 *               (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1212 *       -ENOMEM Memory exhausted
1213 */
1214 /****************************************************************************/
1215 
dma_alloc_double_dst_descriptors(DMA_Handle_t handle,dma_addr_t srcData,dma_addr_t dstData1,dma_addr_t dstData2,size_t numBytes)1216 int dma_alloc_double_dst_descriptors(DMA_Handle_t handle,	/* DMA Handle */
1217 				     dma_addr_t srcData,	/* Physical address of source data */
1218 				     dma_addr_t dstData1,	/* Physical address of first destination buffer */
1219 				     dma_addr_t dstData2,	/* Physical address of second destination buffer */
1220 				     size_t numBytes	/* Number of bytes in each destination buffer */
1221     ) {
1222 	DMA_Channel_t *channel;
1223 	DMA_DeviceAttribute_t *devAttr;
1224 	int numDst1Descriptors;
1225 	int numDst2Descriptors;
1226 	int numDescriptors;
1227 	size_t ringBytesRequired;
1228 	int rc = 0;
1229 
1230 	channel = HandleToChannel(handle);
1231 	if (channel == NULL) {
1232 		return -ENODEV;
1233 	}
1234 
1235 	devAttr = &DMA_gDeviceAttribute[channel->devType];
1236 
1237 	/* Figure out how many descriptors we need. */
1238 
1239 	/* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1240 	/*        srcData, dstData, numBytes); */
1241 
1242 	numDst1Descriptors =
1243 	     dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1244 					     (void *)dstData1, numBytes);
1245 	if (numDst1Descriptors < 0) {
1246 		return -EINVAL;
1247 	}
1248 	numDst2Descriptors =
1249 	     dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1250 					     (void *)dstData2, numBytes);
1251 	if (numDst2Descriptors < 0) {
1252 		return -EINVAL;
1253 	}
1254 	numDescriptors = numDst1Descriptors + numDst2Descriptors;
1255 	/* printk("numDescriptors: %d\n", numDescriptors); */
1256 
1257 	/* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1258 	/* a new one. */
1259 
1260 	ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1261 
1262 	/* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1263 
1264 	if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1265 		/* Make sure that this code path is never taken from interrupt context. */
1266 		/* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1267 		/* allocation needs to have already been done. */
1268 
1269 		might_sleep();
1270 
1271 		/* Free the old descriptor ring and allocate a new one. */
1272 
1273 		dma_free_descriptor_ring(&devAttr->ring);
1274 
1275 		/* And allocate a new one. */
1276 
1277 		rc =
1278 		     dma_alloc_descriptor_ring(&devAttr->ring,
1279 					       numDescriptors);
1280 		if (rc < 0) {
1281 			printk(KERN_ERR
1282 			       "%s: dma_alloc_descriptor_ring(%d) failed\n",
1283 			       __func__, ringBytesRequired);
1284 			return rc;
1285 		}
1286 	}
1287 
1288 	/* Setup the descriptor for this transfer. Since this function is used with */
1289 	/* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */
1290 	/* setDataDescriptor will keep trying to append onto the end. */
1291 
1292 	if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1293 				  devAttr->ring.physAddr,
1294 				  devAttr->ring.bytesAllocated,
1295 				  numDescriptors) < 0) {
1296 		printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__);
1297 		return -EINVAL;
1298 	}
1299 
1300 	/* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1301 	/* as last time, then we don't need to call setDataDescriptor again. */
1302 
1303 	if (dmacHw_setDataDescriptor(&devAttr->config,
1304 				     devAttr->ring.virtAddr,
1305 				     (void *)srcData,
1306 				     (void *)dstData1, numBytes) < 0) {
1307 		printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n",
1308 		       __func__);
1309 		return -EINVAL;
1310 	}
1311 	if (dmacHw_setDataDescriptor(&devAttr->config,
1312 				     devAttr->ring.virtAddr,
1313 				     (void *)srcData,
1314 				     (void *)dstData2, numBytes) < 0) {
1315 		printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n",
1316 		       __func__);
1317 		return -EINVAL;
1318 	}
1319 
1320 	/* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */
1321 	/* try to make the 'prev' variables right. */
1322 
1323 	devAttr->prevSrcData = 0;
1324 	devAttr->prevDstData = 0;
1325 	devAttr->prevNumBytes = 0;
1326 
1327 	return numDescriptors;
1328 }
1329 
1330 EXPORT_SYMBOL(dma_alloc_double_dst_descriptors);
1331 
1332 /****************************************************************************/
1333 /**
1334 *   Initiates a transfer when the descriptors have already been setup.
1335 *
1336 *   This is a special case, and normally, the dma_transfer_xxx functions should
1337 *   be used.
1338 *
1339 *   @return
1340 *       0       Transfer was started successfully
1341 *       -ENODEV Invalid handle
1342 */
1343 /****************************************************************************/
1344 
dma_start_transfer(DMA_Handle_t handle)1345 int dma_start_transfer(DMA_Handle_t handle)
1346 {
1347 	DMA_Channel_t *channel;
1348 	DMA_DeviceAttribute_t *devAttr;
1349 
1350 	channel = HandleToChannel(handle);
1351 	if (channel == NULL) {
1352 		return -ENODEV;
1353 	}
1354 	devAttr = &DMA_gDeviceAttribute[channel->devType];
1355 
1356 	dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1357 				devAttr->ring.virtAddr);
1358 
1359 	/* Since we got this far, everything went successfully */
1360 
1361 	return 0;
1362 }
1363 
1364 EXPORT_SYMBOL(dma_start_transfer);
1365 
1366 /****************************************************************************/
1367 /**
1368 *   Stops a previously started DMA transfer.
1369 *
1370 *   @return
1371 *       0       Transfer was stopped successfully
1372 *       -ENODEV Invalid handle
1373 */
1374 /****************************************************************************/
1375 
dma_stop_transfer(DMA_Handle_t handle)1376 int dma_stop_transfer(DMA_Handle_t handle)
1377 {
1378 	DMA_Channel_t *channel;
1379 
1380 	channel = HandleToChannel(handle);
1381 	if (channel == NULL) {
1382 		return -ENODEV;
1383 	}
1384 
1385 	dmacHw_stopTransfer(channel->dmacHwHandle);
1386 
1387 	return 0;
1388 }
1389 
1390 EXPORT_SYMBOL(dma_stop_transfer);
1391 
1392 /****************************************************************************/
1393 /**
1394 *   Waits for a DMA to complete by polling. This function is only intended
1395 *   to be used for testing. Interrupts should be used for most DMA operations.
1396 */
1397 /****************************************************************************/
1398 
dma_wait_transfer_done(DMA_Handle_t handle)1399 int dma_wait_transfer_done(DMA_Handle_t handle)
1400 {
1401 	DMA_Channel_t *channel;
1402 	dmacHw_TRANSFER_STATUS_e status;
1403 
1404 	channel = HandleToChannel(handle);
1405 	if (channel == NULL) {
1406 		return -ENODEV;
1407 	}
1408 
1409 	while ((status =
1410 		dmacHw_transferCompleted(channel->dmacHwHandle)) ==
1411 	       dmacHw_TRANSFER_STATUS_BUSY) {
1412 		;
1413 	}
1414 
1415 	if (status == dmacHw_TRANSFER_STATUS_ERROR) {
1416 		printk(KERN_ERR "%s: DMA transfer failed\n", __func__);
1417 		return -EIO;
1418 	}
1419 	return 0;
1420 }
1421 
1422 EXPORT_SYMBOL(dma_wait_transfer_done);
1423 
1424 /****************************************************************************/
1425 /**
1426 *   Initiates a DMA, allocating the descriptors as required.
1427 *
1428 *   @return
1429 *       0       Transfer was started successfully
1430 *       -EINVAL Invalid device type for this kind of transfer
1431 *               (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV)
1432 */
1433 /****************************************************************************/
1434 
dma_transfer(DMA_Handle_t handle,dmacHw_TRANSFER_TYPE_e transferType,dma_addr_t srcData,dma_addr_t dstData,size_t numBytes)1435 int dma_transfer(DMA_Handle_t handle,	/* DMA Handle */
1436 		 dmacHw_TRANSFER_TYPE_e transferType,	/* Type of transfer being performed */
1437 		 dma_addr_t srcData,	/* Place to get data to write to device */
1438 		 dma_addr_t dstData,	/* Pointer to device data address */
1439 		 size_t numBytes	/* Number of bytes to transfer to the device */
1440     ) {
1441 	DMA_Channel_t *channel;
1442 	DMA_DeviceAttribute_t *devAttr;
1443 	int rc = 0;
1444 
1445 	channel = HandleToChannel(handle);
1446 	if (channel == NULL) {
1447 		return -ENODEV;
1448 	}
1449 
1450 	devAttr = &DMA_gDeviceAttribute[channel->devType];
1451 
1452 	if (devAttr->config.transferType != transferType) {
1453 		return -EINVAL;
1454 	}
1455 
1456 	/* We keep track of the information about the previous request for this */
1457 	/* device, and if the attributes match, then we can use the descriptors we setup */
1458 	/* the last time, and not have to reinitialize everything. */
1459 
1460 	{
1461 		rc =
1462 		     dma_alloc_descriptors(handle, transferType, srcData,
1463 					   dstData, numBytes);
1464 		if (rc != 0) {
1465 			return rc;
1466 		}
1467 	}
1468 
1469 	/* And kick off the transfer */
1470 
1471 	devAttr->numBytes = numBytes;
1472 	devAttr->transferStartTime = timer_get_tick_count();
1473 
1474 	dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1475 				devAttr->ring.virtAddr);
1476 
1477 	/* Since we got this far, everything went successfully */
1478 
1479 	return 0;
1480 }
1481 
1482 EXPORT_SYMBOL(dma_transfer);
1483 
1484 /****************************************************************************/
1485 /**
1486 *   Set the callback function which will be called when a transfer completes.
1487 *   If a NULL callback function is set, then no callback will occur.
1488 *
1489 *   @note   @a devHandler will be called from IRQ context.
1490 *
1491 *   @return
1492 *       0       - Success
1493 *       -ENODEV - Device handed in is invalid.
1494 */
1495 /****************************************************************************/
1496 
dma_set_device_handler(DMA_Device_t dev,DMA_DeviceHandler_t devHandler,void * userData)1497 int dma_set_device_handler(DMA_Device_t dev,	/* Device to set the callback for. */
1498 			   DMA_DeviceHandler_t devHandler,	/* Function to call when the DMA completes */
1499 			   void *userData	/* Pointer which will be passed to devHandler. */
1500     ) {
1501 	DMA_DeviceAttribute_t *devAttr;
1502 	unsigned long flags;
1503 
1504 	if (!IsDeviceValid(dev)) {
1505 		return -ENODEV;
1506 	}
1507 	devAttr = &DMA_gDeviceAttribute[dev];
1508 
1509 	local_irq_save(flags);
1510 
1511 	devAttr->userData = userData;
1512 	devAttr->devHandler = devHandler;
1513 
1514 	local_irq_restore(flags);
1515 
1516 	return 0;
1517 }
1518 
1519 EXPORT_SYMBOL(dma_set_device_handler);
1520