1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 #ifndef DMAENGINE_H
22 #define DMAENGINE_H
23 
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/scatterlist.h>
27 #include <linux/bitmap.h>
28 #include <asm/page.h>
29 
30 /**
31  * typedef dma_cookie_t - an opaque DMA cookie
32  *
33  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
34  */
35 typedef s32 dma_cookie_t;
36 #define DMA_MIN_COOKIE	1
37 #define DMA_MAX_COOKIE	INT_MAX
38 
39 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
40 
41 /**
42  * enum dma_status - DMA transaction status
43  * @DMA_SUCCESS: transaction completed successfully
44  * @DMA_IN_PROGRESS: transaction not yet processed
45  * @DMA_PAUSED: transaction is paused
46  * @DMA_ERROR: transaction failed
47  */
48 enum dma_status {
49 	DMA_SUCCESS,
50 	DMA_IN_PROGRESS,
51 	DMA_PAUSED,
52 	DMA_ERROR,
53 };
54 
55 /**
56  * enum dma_transaction_type - DMA transaction types/indexes
57  *
58  * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
59  * automatically set as dma devices are registered.
60  */
61 enum dma_transaction_type {
62 	DMA_MEMCPY,
63 	DMA_XOR,
64 	DMA_PQ,
65 	DMA_XOR_VAL,
66 	DMA_PQ_VAL,
67 	DMA_MEMSET,
68 	DMA_INTERRUPT,
69 	DMA_SG,
70 	DMA_PRIVATE,
71 	DMA_ASYNC_TX,
72 	DMA_SLAVE,
73 	DMA_CYCLIC,
74 	DMA_INTERLEAVE,
75 /* last transaction type for creation of the capabilities mask */
76 	DMA_TX_TYPE_END,
77 };
78 
79 /**
80  * enum dma_transfer_direction - dma transfer mode and direction indicator
81  * @DMA_MEM_TO_MEM: Async/Memcpy mode
82  * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
83  * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
84  * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
85  */
86 enum dma_transfer_direction {
87 	DMA_MEM_TO_MEM,
88 	DMA_MEM_TO_DEV,
89 	DMA_DEV_TO_MEM,
90 	DMA_DEV_TO_DEV,
91 	DMA_TRANS_NONE,
92 };
93 
94 /**
95  * Interleaved Transfer Request
96  * ----------------------------
97  * A chunk is collection of contiguous bytes to be transfered.
98  * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
99  * ICGs may or maynot change between chunks.
100  * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
101  *  that when repeated an integral number of times, specifies the transfer.
102  * A transfer template is specification of a Frame, the number of times
103  *  it is to be repeated and other per-transfer attributes.
104  *
105  * Practically, a client driver would have ready a template for each
106  *  type of transfer it is going to need during its lifetime and
107  *  set only 'src_start' and 'dst_start' before submitting the requests.
108  *
109  *
110  *  |      Frame-1        |       Frame-2       | ~ |       Frame-'numf'  |
111  *  |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
112  *
113  *    ==  Chunk size
114  *    ... ICG
115  */
116 
117 /**
118  * struct data_chunk - Element of scatter-gather list that makes a frame.
119  * @size: Number of bytes to read from source.
120  *	  size_dst := fn(op, size_src), so doesn't mean much for destination.
121  * @icg: Number of bytes to jump after last src/dst address of this
122  *	 chunk and before first src/dst address for next chunk.
123  *	 Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
124  *	 Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
125  */
126 struct data_chunk {
127 	size_t size;
128 	size_t icg;
129 };
130 
131 /**
132  * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
133  *	 and attributes.
134  * @src_start: Bus address of source for the first chunk.
135  * @dst_start: Bus address of destination for the first chunk.
136  * @dir: Specifies the type of Source and Destination.
137  * @src_inc: If the source address increments after reading from it.
138  * @dst_inc: If the destination address increments after writing to it.
139  * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
140  *		Otherwise, source is read contiguously (icg ignored).
141  *		Ignored if src_inc is false.
142  * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
143  *		Otherwise, destination is filled contiguously (icg ignored).
144  *		Ignored if dst_inc is false.
145  * @numf: Number of frames in this template.
146  * @frame_size: Number of chunks in a frame i.e, size of sgl[].
147  * @sgl: Array of {chunk,icg} pairs that make up a frame.
148  */
149 struct dma_interleaved_template {
150 	dma_addr_t src_start;
151 	dma_addr_t dst_start;
152 	enum dma_transfer_direction dir;
153 	bool src_inc;
154 	bool dst_inc;
155 	bool src_sgl;
156 	bool dst_sgl;
157 	size_t numf;
158 	size_t frame_size;
159 	struct data_chunk sgl[0];
160 };
161 
162 /**
163  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
164  *  control completion, and communicate status.
165  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
166  *  this transaction
167  * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
168  *  acknowledges receipt, i.e. has has a chance to establish any dependency
169  *  chains
170  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
171  * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
172  * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
173  * 	(if not set, do the source dma-unmapping as page)
174  * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
175  * 	(if not set, do the destination dma-unmapping as page)
176  * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
177  * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
178  * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
179  *  sources that were the result of a previous operation, in the case of a PQ
180  *  operation it continues the calculation with new sources
181  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
182  *  on the result of this operation
183  */
184 enum dma_ctrl_flags {
185 	DMA_PREP_INTERRUPT = (1 << 0),
186 	DMA_CTRL_ACK = (1 << 1),
187 	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
188 	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
189 	DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
190 	DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
191 	DMA_PREP_PQ_DISABLE_P = (1 << 6),
192 	DMA_PREP_PQ_DISABLE_Q = (1 << 7),
193 	DMA_PREP_CONTINUE = (1 << 8),
194 	DMA_PREP_FENCE = (1 << 9),
195 };
196 
197 /**
198  * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
199  * on a running channel.
200  * @DMA_TERMINATE_ALL: terminate all ongoing transfers
201  * @DMA_PAUSE: pause ongoing transfers
202  * @DMA_RESUME: resume paused transfer
203  * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
204  * that need to runtime reconfigure the slave channels (as opposed to passing
205  * configuration data in statically from the platform). An additional
206  * argument of struct dma_slave_config must be passed in with this
207  * command.
208  * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
209  * into external start mode.
210  */
211 enum dma_ctrl_cmd {
212 	DMA_TERMINATE_ALL,
213 	DMA_PAUSE,
214 	DMA_RESUME,
215 	DMA_SLAVE_CONFIG,
216 	FSLDMA_EXTERNAL_START,
217 };
218 
219 /**
220  * enum sum_check_bits - bit position of pq_check_flags
221  */
222 enum sum_check_bits {
223 	SUM_CHECK_P = 0,
224 	SUM_CHECK_Q = 1,
225 };
226 
227 /**
228  * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
229  * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
230  * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
231  */
232 enum sum_check_flags {
233 	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
234 	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
235 };
236 
237 
238 /**
239  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
240  * See linux/cpumask.h
241  */
242 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
243 
244 /**
245  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
246  * @memcpy_count: transaction counter
247  * @bytes_transferred: byte counter
248  */
249 
250 struct dma_chan_percpu {
251 	/* stats */
252 	unsigned long memcpy_count;
253 	unsigned long bytes_transferred;
254 };
255 
256 /**
257  * struct dma_chan - devices supply DMA channels, clients use them
258  * @device: ptr to the dma device who supplies this channel, always !%NULL
259  * @cookie: last cookie value returned to client
260  * @chan_id: channel ID for sysfs
261  * @dev: class device for sysfs
262  * @device_node: used to add this to the device chan list
263  * @local: per-cpu pointer to a struct dma_chan_percpu
264  * @client-count: how many clients are using this channel
265  * @table_count: number of appearances in the mem-to-mem allocation table
266  * @private: private data for certain client-channel associations
267  */
268 struct dma_chan {
269 	struct dma_device *device;
270 	dma_cookie_t cookie;
271 
272 	/* sysfs */
273 	int chan_id;
274 	struct dma_chan_dev *dev;
275 
276 	struct list_head device_node;
277 	struct dma_chan_percpu __percpu *local;
278 	int client_count;
279 	int table_count;
280 	void *private;
281 };
282 
283 /**
284  * struct dma_chan_dev - relate sysfs device node to backing channel device
285  * @chan - driver channel device
286  * @device - sysfs device
287  * @dev_id - parent dma_device dev_id
288  * @idr_ref - reference count to gate release of dma_device dev_id
289  */
290 struct dma_chan_dev {
291 	struct dma_chan *chan;
292 	struct device device;
293 	int dev_id;
294 	atomic_t *idr_ref;
295 };
296 
297 /**
298  * enum dma_slave_buswidth - defines bus with of the DMA slave
299  * device, source or target buses
300  */
301 enum dma_slave_buswidth {
302 	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
303 	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
304 	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
305 	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
306 	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
307 };
308 
309 /**
310  * struct dma_slave_config - dma slave channel runtime config
311  * @direction: whether the data shall go in or out on this slave
312  * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
313  * legal values, DMA_BIDIRECTIONAL is not acceptable since we
314  * need to differentiate source and target addresses.
315  * @src_addr: this is the physical address where DMA slave data
316  * should be read (RX), if the source is memory this argument is
317  * ignored.
318  * @dst_addr: this is the physical address where DMA slave data
319  * should be written (TX), if the source is memory this argument
320  * is ignored.
321  * @src_addr_width: this is the width in bytes of the source (RX)
322  * register where DMA data shall be read. If the source
323  * is memory this may be ignored depending on architecture.
324  * Legal values: 1, 2, 4, 8.
325  * @dst_addr_width: same as src_addr_width but for destination
326  * target (TX) mutatis mutandis.
327  * @src_maxburst: the maximum number of words (note: words, as in
328  * units of the src_addr_width member, not bytes) that can be sent
329  * in one burst to the device. Typically something like half the
330  * FIFO depth on I/O peripherals so you don't overflow it. This
331  * may or may not be applicable on memory sources.
332  * @dst_maxburst: same as src_maxburst but for destination target
333  * mutatis mutandis.
334  *
335  * This struct is passed in as configuration data to a DMA engine
336  * in order to set up a certain channel for DMA transport at runtime.
337  * The DMA device/engine has to provide support for an additional
338  * command in the channel config interface, DMA_SLAVE_CONFIG
339  * and this struct will then be passed in as an argument to the
340  * DMA engine device_control() function.
341  *
342  * The rationale for adding configuration information to this struct
343  * is as follows: if it is likely that most DMA slave controllers in
344  * the world will support the configuration option, then make it
345  * generic. If not: if it is fixed so that it be sent in static from
346  * the platform data, then prefer to do that. Else, if it is neither
347  * fixed at runtime, nor generic enough (such as bus mastership on
348  * some CPU family and whatnot) then create a custom slave config
349  * struct and pass that, then make this config a member of that
350  * struct, if applicable.
351  */
352 struct dma_slave_config {
353 	enum dma_transfer_direction direction;
354 	dma_addr_t src_addr;
355 	dma_addr_t dst_addr;
356 	enum dma_slave_buswidth src_addr_width;
357 	enum dma_slave_buswidth dst_addr_width;
358 	u32 src_maxburst;
359 	u32 dst_maxburst;
360 };
361 
dma_chan_name(struct dma_chan * chan)362 static inline const char *dma_chan_name(struct dma_chan *chan)
363 {
364 	return dev_name(&chan->dev->device);
365 }
366 
367 void dma_chan_cleanup(struct kref *kref);
368 
369 /**
370  * typedef dma_filter_fn - callback filter for dma_request_channel
371  * @chan: channel to be reviewed
372  * @filter_param: opaque parameter passed through dma_request_channel
373  *
374  * When this optional parameter is specified in a call to dma_request_channel a
375  * suitable channel is passed to this routine for further dispositioning before
376  * being returned.  Where 'suitable' indicates a non-busy channel that
377  * satisfies the given capability mask.  It returns 'true' to indicate that the
378  * channel is suitable.
379  */
380 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
381 
382 typedef void (*dma_async_tx_callback)(void *dma_async_param);
383 /**
384  * struct dma_async_tx_descriptor - async transaction descriptor
385  * ---dma generic offload fields---
386  * @cookie: tracking cookie for this transaction, set to -EBUSY if
387  *	this tx is sitting on a dependency list
388  * @flags: flags to augment operation preparation, control completion, and
389  * 	communicate status
390  * @phys: physical address of the descriptor
391  * @chan: target channel for this operation
392  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
393  * @callback: routine to call after this operation is complete
394  * @callback_param: general parameter to pass to the callback routine
395  * ---async_tx api specific fields---
396  * @next: at completion submit this descriptor
397  * @parent: pointer to the next level up in the dependency chain
398  * @lock: protect the parent and next pointers
399  */
400 struct dma_async_tx_descriptor {
401 	dma_cookie_t cookie;
402 	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
403 	dma_addr_t phys;
404 	struct dma_chan *chan;
405 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
406 	dma_async_tx_callback callback;
407 	void *callback_param;
408 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
409 	struct dma_async_tx_descriptor *next;
410 	struct dma_async_tx_descriptor *parent;
411 	spinlock_t lock;
412 #endif
413 };
414 
415 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
txd_lock(struct dma_async_tx_descriptor * txd)416 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
417 {
418 }
txd_unlock(struct dma_async_tx_descriptor * txd)419 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
420 {
421 }
txd_chain(struct dma_async_tx_descriptor * txd,struct dma_async_tx_descriptor * next)422 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
423 {
424 	BUG();
425 }
txd_clear_parent(struct dma_async_tx_descriptor * txd)426 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
427 {
428 }
txd_clear_next(struct dma_async_tx_descriptor * txd)429 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
430 {
431 }
txd_next(struct dma_async_tx_descriptor * txd)432 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
433 {
434 	return NULL;
435 }
txd_parent(struct dma_async_tx_descriptor * txd)436 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
437 {
438 	return NULL;
439 }
440 
441 #else
txd_lock(struct dma_async_tx_descriptor * txd)442 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
443 {
444 	spin_lock_bh(&txd->lock);
445 }
txd_unlock(struct dma_async_tx_descriptor * txd)446 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
447 {
448 	spin_unlock_bh(&txd->lock);
449 }
txd_chain(struct dma_async_tx_descriptor * txd,struct dma_async_tx_descriptor * next)450 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
451 {
452 	txd->next = next;
453 	next->parent = txd;
454 }
txd_clear_parent(struct dma_async_tx_descriptor * txd)455 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
456 {
457 	txd->parent = NULL;
458 }
txd_clear_next(struct dma_async_tx_descriptor * txd)459 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
460 {
461 	txd->next = NULL;
462 }
txd_parent(struct dma_async_tx_descriptor * txd)463 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
464 {
465 	return txd->parent;
466 }
txd_next(struct dma_async_tx_descriptor * txd)467 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
468 {
469 	return txd->next;
470 }
471 #endif
472 
473 /**
474  * struct dma_tx_state - filled in to report the status of
475  * a transfer.
476  * @last: last completed DMA cookie
477  * @used: last issued DMA cookie (i.e. the one in progress)
478  * @residue: the remaining number of bytes left to transmit
479  *	on the selected transfer for states DMA_IN_PROGRESS and
480  *	DMA_PAUSED if this is implemented in the driver, else 0
481  */
482 struct dma_tx_state {
483 	dma_cookie_t last;
484 	dma_cookie_t used;
485 	u32 residue;
486 };
487 
488 /**
489  * struct dma_device - info on the entity supplying DMA services
490  * @chancnt: how many DMA channels are supported
491  * @privatecnt: how many DMA channels are requested by dma_request_channel
492  * @channels: the list of struct dma_chan
493  * @global_node: list_head for global dma_device_list
494  * @cap_mask: one or more dma_capability flags
495  * @max_xor: maximum number of xor sources, 0 if no capability
496  * @max_pq: maximum number of PQ sources and PQ-continue capability
497  * @copy_align: alignment shift for memcpy operations
498  * @xor_align: alignment shift for xor operations
499  * @pq_align: alignment shift for pq operations
500  * @fill_align: alignment shift for memset operations
501  * @dev_id: unique device ID
502  * @dev: struct device reference for dma mapping api
503  * @device_alloc_chan_resources: allocate resources and return the
504  *	number of allocated descriptors
505  * @device_free_chan_resources: release DMA channel's resources
506  * @device_prep_dma_memcpy: prepares a memcpy operation
507  * @device_prep_dma_xor: prepares a xor operation
508  * @device_prep_dma_xor_val: prepares a xor validation operation
509  * @device_prep_dma_pq: prepares a pq operation
510  * @device_prep_dma_pq_val: prepares a pqzero_sum operation
511  * @device_prep_dma_memset: prepares a memset operation
512  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
513  * @device_prep_slave_sg: prepares a slave dma operation
514  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
515  *	The function takes a buffer of size buf_len. The callback function will
516  *	be called after period_len bytes have been transferred.
517  * @device_prep_interleaved_dma: Transfer expression in a generic way.
518  * @device_control: manipulate all pending operations on a channel, returns
519  *	zero or error code
520  * @device_tx_status: poll for transaction completion, the optional
521  *	txstate parameter can be supplied with a pointer to get a
522  *	struct with auxiliary transfer status information, otherwise the call
523  *	will just return a simple status code
524  * @device_issue_pending: push pending transactions to hardware
525  */
526 struct dma_device {
527 
528 	unsigned int chancnt;
529 	unsigned int privatecnt;
530 	struct list_head channels;
531 	struct list_head global_node;
532 	dma_cap_mask_t  cap_mask;
533 	unsigned short max_xor;
534 	unsigned short max_pq;
535 	u8 copy_align;
536 	u8 xor_align;
537 	u8 pq_align;
538 	u8 fill_align;
539 	#define DMA_HAS_PQ_CONTINUE (1 << 15)
540 
541 	int dev_id;
542 	struct device *dev;
543 
544 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
545 	void (*device_free_chan_resources)(struct dma_chan *chan);
546 
547 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
548 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
549 		size_t len, unsigned long flags);
550 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
551 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
552 		unsigned int src_cnt, size_t len, unsigned long flags);
553 	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
554 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
555 		size_t len, enum sum_check_flags *result, unsigned long flags);
556 	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
557 		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
558 		unsigned int src_cnt, const unsigned char *scf,
559 		size_t len, unsigned long flags);
560 	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
561 		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
562 		unsigned int src_cnt, const unsigned char *scf, size_t len,
563 		enum sum_check_flags *pqres, unsigned long flags);
564 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
565 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
566 		unsigned long flags);
567 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
568 		struct dma_chan *chan, unsigned long flags);
569 	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
570 		struct dma_chan *chan,
571 		struct scatterlist *dst_sg, unsigned int dst_nents,
572 		struct scatterlist *src_sg, unsigned int src_nents,
573 		unsigned long flags);
574 
575 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
576 		struct dma_chan *chan, struct scatterlist *sgl,
577 		unsigned int sg_len, enum dma_transfer_direction direction,
578 		unsigned long flags);
579 	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
580 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
581 		size_t period_len, enum dma_transfer_direction direction);
582 	struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
583 		struct dma_chan *chan, struct dma_interleaved_template *xt,
584 		unsigned long flags);
585 	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
586 		unsigned long arg);
587 
588 	enum dma_status (*device_tx_status)(struct dma_chan *chan,
589 					    dma_cookie_t cookie,
590 					    struct dma_tx_state *txstate);
591 	void (*device_issue_pending)(struct dma_chan *chan);
592 };
593 
dmaengine_device_control(struct dma_chan * chan,enum dma_ctrl_cmd cmd,unsigned long arg)594 static inline int dmaengine_device_control(struct dma_chan *chan,
595 					   enum dma_ctrl_cmd cmd,
596 					   unsigned long arg)
597 {
598 	return chan->device->device_control(chan, cmd, arg);
599 }
600 
dmaengine_slave_config(struct dma_chan * chan,struct dma_slave_config * config)601 static inline int dmaengine_slave_config(struct dma_chan *chan,
602 					  struct dma_slave_config *config)
603 {
604 	return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
605 			(unsigned long)config);
606 }
607 
dmaengine_prep_slave_single(struct dma_chan * chan,void * buf,size_t len,enum dma_transfer_direction dir,unsigned long flags)608 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
609 	struct dma_chan *chan, void *buf, size_t len,
610 	enum dma_transfer_direction dir, unsigned long flags)
611 {
612 	struct scatterlist sg;
613 	sg_init_one(&sg, buf, len);
614 
615 	return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
616 }
617 
dmaengine_terminate_all(struct dma_chan * chan)618 static inline int dmaengine_terminate_all(struct dma_chan *chan)
619 {
620 	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
621 }
622 
dmaengine_pause(struct dma_chan * chan)623 static inline int dmaengine_pause(struct dma_chan *chan)
624 {
625 	return dmaengine_device_control(chan, DMA_PAUSE, 0);
626 }
627 
dmaengine_resume(struct dma_chan * chan)628 static inline int dmaengine_resume(struct dma_chan *chan)
629 {
630 	return dmaengine_device_control(chan, DMA_RESUME, 0);
631 }
632 
dmaengine_submit(struct dma_async_tx_descriptor * desc)633 static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
634 {
635 	return desc->tx_submit(desc);
636 }
637 
dmaengine_check_align(u8 align,size_t off1,size_t off2,size_t len)638 static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
639 {
640 	size_t mask;
641 
642 	if (!align)
643 		return true;
644 	mask = (1 << align) - 1;
645 	if (mask & (off1 | off2 | len))
646 		return false;
647 	return true;
648 }
649 
is_dma_copy_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)650 static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
651 				       size_t off2, size_t len)
652 {
653 	return dmaengine_check_align(dev->copy_align, off1, off2, len);
654 }
655 
is_dma_xor_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)656 static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
657 				      size_t off2, size_t len)
658 {
659 	return dmaengine_check_align(dev->xor_align, off1, off2, len);
660 }
661 
is_dma_pq_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)662 static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
663 				     size_t off2, size_t len)
664 {
665 	return dmaengine_check_align(dev->pq_align, off1, off2, len);
666 }
667 
is_dma_fill_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)668 static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
669 				       size_t off2, size_t len)
670 {
671 	return dmaengine_check_align(dev->fill_align, off1, off2, len);
672 }
673 
674 static inline void
dma_set_maxpq(struct dma_device * dma,int maxpq,int has_pq_continue)675 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
676 {
677 	dma->max_pq = maxpq;
678 	if (has_pq_continue)
679 		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
680 }
681 
dmaf_continue(enum dma_ctrl_flags flags)682 static inline bool dmaf_continue(enum dma_ctrl_flags flags)
683 {
684 	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
685 }
686 
dmaf_p_disabled_continue(enum dma_ctrl_flags flags)687 static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
688 {
689 	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
690 
691 	return (flags & mask) == mask;
692 }
693 
dma_dev_has_pq_continue(struct dma_device * dma)694 static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
695 {
696 	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
697 }
698 
dma_dev_to_maxpq(struct dma_device * dma)699 static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
700 {
701 	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
702 }
703 
704 /* dma_maxpq - reduce maxpq in the face of continued operations
705  * @dma - dma device with PQ capability
706  * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
707  *
708  * When an engine does not support native continuation we need 3 extra
709  * source slots to reuse P and Q with the following coefficients:
710  * 1/ {00} * P : remove P from Q', but use it as a source for P'
711  * 2/ {01} * Q : use Q to continue Q' calculation
712  * 3/ {00} * Q : subtract Q from P' to cancel (2)
713  *
714  * In the case where P is disabled we only need 1 extra source:
715  * 1/ {01} * Q : use Q to continue Q' calculation
716  */
dma_maxpq(struct dma_device * dma,enum dma_ctrl_flags flags)717 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
718 {
719 	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
720 		return dma_dev_to_maxpq(dma);
721 	else if (dmaf_p_disabled_continue(flags))
722 		return dma_dev_to_maxpq(dma) - 1;
723 	else if (dmaf_continue(flags))
724 		return dma_dev_to_maxpq(dma) - 3;
725 	BUG();
726 }
727 
728 /* --- public DMA engine API --- */
729 
730 #ifdef CONFIG_DMA_ENGINE
731 void dmaengine_get(void);
732 void dmaengine_put(void);
733 #else
dmaengine_get(void)734 static inline void dmaengine_get(void)
735 {
736 }
dmaengine_put(void)737 static inline void dmaengine_put(void)
738 {
739 }
740 #endif
741 
742 #ifdef CONFIG_NET_DMA
743 #define net_dmaengine_get()	dmaengine_get()
744 #define net_dmaengine_put()	dmaengine_put()
745 #else
net_dmaengine_get(void)746 static inline void net_dmaengine_get(void)
747 {
748 }
net_dmaengine_put(void)749 static inline void net_dmaengine_put(void)
750 {
751 }
752 #endif
753 
754 #ifdef CONFIG_ASYNC_TX_DMA
755 #define async_dmaengine_get()	dmaengine_get()
756 #define async_dmaengine_put()	dmaengine_put()
757 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
758 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
759 #else
760 #define async_dma_find_channel(type) dma_find_channel(type)
761 #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
762 #else
async_dmaengine_get(void)763 static inline void async_dmaengine_get(void)
764 {
765 }
async_dmaengine_put(void)766 static inline void async_dmaengine_put(void)
767 {
768 }
769 static inline struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)770 async_dma_find_channel(enum dma_transaction_type type)
771 {
772 	return NULL;
773 }
774 #endif /* CONFIG_ASYNC_TX_DMA */
775 
776 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
777 	void *dest, void *src, size_t len);
778 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
779 	struct page *page, unsigned int offset, void *kdata, size_t len);
780 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
781 	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
782 	unsigned int src_off, size_t len);
783 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
784 	struct dma_chan *chan);
785 
async_tx_ack(struct dma_async_tx_descriptor * tx)786 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
787 {
788 	tx->flags |= DMA_CTRL_ACK;
789 }
790 
async_tx_clear_ack(struct dma_async_tx_descriptor * tx)791 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
792 {
793 	tx->flags &= ~DMA_CTRL_ACK;
794 }
795 
async_tx_test_ack(struct dma_async_tx_descriptor * tx)796 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
797 {
798 	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
799 }
800 
801 #define first_dma_cap(mask) __first_dma_cap(&(mask))
__first_dma_cap(const dma_cap_mask_t * srcp)802 static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
803 {
804 	return min_t(int, DMA_TX_TYPE_END,
805 		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
806 }
807 
808 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
__next_dma_cap(int n,const dma_cap_mask_t * srcp)809 static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
810 {
811 	return min_t(int, DMA_TX_TYPE_END,
812 		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
813 }
814 
815 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
816 static inline void
__dma_cap_set(enum dma_transaction_type tx_type,dma_cap_mask_t * dstp)817 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
818 {
819 	set_bit(tx_type, dstp->bits);
820 }
821 
822 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
823 static inline void
__dma_cap_clear(enum dma_transaction_type tx_type,dma_cap_mask_t * dstp)824 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
825 {
826 	clear_bit(tx_type, dstp->bits);
827 }
828 
829 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
__dma_cap_zero(dma_cap_mask_t * dstp)830 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
831 {
832 	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
833 }
834 
835 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
836 static inline int
__dma_has_cap(enum dma_transaction_type tx_type,dma_cap_mask_t * srcp)837 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
838 {
839 	return test_bit(tx_type, srcp->bits);
840 }
841 
842 #define for_each_dma_cap_mask(cap, mask) \
843 	for ((cap) = first_dma_cap(mask);	\
844 		(cap) < DMA_TX_TYPE_END;	\
845 		(cap) = next_dma_cap((cap), (mask)))
846 
847 /**
848  * dma_async_issue_pending - flush pending transactions to HW
849  * @chan: target DMA channel
850  *
851  * This allows drivers to push copies to HW in batches,
852  * reducing MMIO writes where possible.
853  */
dma_async_issue_pending(struct dma_chan * chan)854 static inline void dma_async_issue_pending(struct dma_chan *chan)
855 {
856 	chan->device->device_issue_pending(chan);
857 }
858 
859 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
860 
861 /**
862  * dma_async_is_tx_complete - poll for transaction completion
863  * @chan: DMA channel
864  * @cookie: transaction identifier to check status of
865  * @last: returns last completed cookie, can be NULL
866  * @used: returns last issued cookie, can be NULL
867  *
868  * If @last and @used are passed in, upon return they reflect the driver
869  * internal state and can be used with dma_async_is_complete() to check
870  * the status of multiple cookies without re-checking hardware state.
871  */
dma_async_is_tx_complete(struct dma_chan * chan,dma_cookie_t cookie,dma_cookie_t * last,dma_cookie_t * used)872 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
873 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
874 {
875 	struct dma_tx_state state;
876 	enum dma_status status;
877 
878 	status = chan->device->device_tx_status(chan, cookie, &state);
879 	if (last)
880 		*last = state.last;
881 	if (used)
882 		*used = state.used;
883 	return status;
884 }
885 
886 #define dma_async_memcpy_complete(chan, cookie, last, used)\
887 	dma_async_is_tx_complete(chan, cookie, last, used)
888 
889 /**
890  * dma_async_is_complete - test a cookie against chan state
891  * @cookie: transaction identifier to test status of
892  * @last_complete: last know completed transaction
893  * @last_used: last cookie value handed out
894  *
895  * dma_async_is_complete() is used in dma_async_memcpy_complete()
896  * the test logic is separated for lightweight testing of multiple cookies
897  */
dma_async_is_complete(dma_cookie_t cookie,dma_cookie_t last_complete,dma_cookie_t last_used)898 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
899 			dma_cookie_t last_complete, dma_cookie_t last_used)
900 {
901 	if (last_complete <= last_used) {
902 		if ((cookie <= last_complete) || (cookie > last_used))
903 			return DMA_SUCCESS;
904 	} else {
905 		if ((cookie <= last_complete) && (cookie > last_used))
906 			return DMA_SUCCESS;
907 	}
908 	return DMA_IN_PROGRESS;
909 }
910 
911 static inline void
dma_set_tx_state(struct dma_tx_state * st,dma_cookie_t last,dma_cookie_t used,u32 residue)912 dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
913 {
914 	if (st) {
915 		st->last = last;
916 		st->used = used;
917 		st->residue = residue;
918 	}
919 }
920 
921 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
922 #ifdef CONFIG_DMA_ENGINE
923 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
924 void dma_issue_pending_all(void);
925 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
926 void dma_release_channel(struct dma_chan *chan);
927 #else
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)928 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
929 {
930 	return DMA_SUCCESS;
931 }
dma_issue_pending_all(void)932 static inline void dma_issue_pending_all(void)
933 {
934 }
__dma_request_channel(dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param)935 static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
936 					      dma_filter_fn fn, void *fn_param)
937 {
938 	return NULL;
939 }
dma_release_channel(struct dma_chan * chan)940 static inline void dma_release_channel(struct dma_chan *chan)
941 {
942 }
943 #endif
944 
945 /* --- DMA device --- */
946 
947 int dma_async_device_register(struct dma_device *device);
948 void dma_async_device_unregister(struct dma_device *device);
949 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
950 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
951 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
952 
953 /* --- Helper iov-locking functions --- */
954 
955 struct dma_page_list {
956 	char __user *base_address;
957 	int nr_pages;
958 	struct page **pages;
959 };
960 
961 struct dma_pinned_list {
962 	int nr_iovecs;
963 	struct dma_page_list page_list[0];
964 };
965 
966 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
967 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
968 
969 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
970 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
971 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
972 	struct dma_pinned_list *pinned_list, struct page *page,
973 	unsigned int offset, size_t len);
974 
975 #endif /* DMAENGINE_H */
976