xref: /linux/include/linux/iio/buffer-dma.h (revision 505d195b0f96fd613a51b13dde37aa5ad301eb32)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright 2013-2015 Analog Devices Inc.
4  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5  */
6 
7 #ifndef __INDUSTRIALIO_DMA_BUFFER_H__
8 #define __INDUSTRIALIO_DMA_BUFFER_H__
9 
10 #include <linux/atomic.h>
11 #include <linux/list.h>
12 #include <linux/kref.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/iio/buffer_impl.h>
16 
17 struct iio_dma_buffer_queue;
18 struct iio_dma_buffer_ops;
19 struct device;
20 struct dma_buf_attachment;
21 struct dma_fence;
22 struct sg_table;
23 
24 /**
25  * enum iio_block_state - State of a struct iio_dma_buffer_block
26  * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
27  * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
28  * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
29  * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
30  */
31 enum iio_block_state {
32 	IIO_BLOCK_STATE_QUEUED,
33 	IIO_BLOCK_STATE_ACTIVE,
34 	IIO_BLOCK_STATE_DONE,
35 	IIO_BLOCK_STATE_DEAD,
36 };
37 
38 /**
39  * struct iio_dma_buffer_block - IIO buffer block
40  * @head: List head
41  * @size: Total size of the block in bytes
42  * @bytes_used: Number of bytes that contain valid data
43  * @vaddr: Virutal address of the blocks memory
44  * @phys_addr: Physical address of the blocks memory
45  * @queue: Parent DMA buffer queue
46  * @kref: kref used to manage the lifetime of block
47  * @state: Current state of the block
48  * @cyclic: True if this is a cyclic buffer
49  * @fileio: True if this buffer is used for fileio mode
50  * @sg_table: DMA table for the transfer when transferring a DMABUF
51  * @fence: DMA fence to be signaled when a DMABUF transfer is complete
52  */
53 struct iio_dma_buffer_block {
54 	/* May only be accessed by the owner of the block */
55 	struct list_head head;
56 	size_t bytes_used;
57 
58 	/*
59 	 * Set during allocation, constant thereafter. May be accessed read-only
60 	 * by anybody holding a reference to the block.
61 	 */
62 	void *vaddr;
63 	dma_addr_t phys_addr;
64 	size_t size;
65 	struct iio_dma_buffer_queue *queue;
66 
67 	/* Must not be accessed outside the core. */
68 	struct kref kref;
69 	/*
70 	 * Must not be accessed outside the core. Access needs to hold
71 	 * queue->list_lock if the block is not owned by the core.
72 	 */
73 	enum iio_block_state state;
74 
75 	bool cyclic;
76 	bool fileio;
77 
78 	struct sg_table *sg_table;
79 	struct dma_fence *fence;
80 };
81 
82 /**
83  * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
84  * @blocks: Buffer blocks used for fileio
85  * @active_block: Block being used in read()
86  * @pos: Read offset in the active block
87  * @block_size: Size of each block
88  * @next_dequeue: index of next block that will be dequeued
89  * @enabled: Whether the buffer is operating in fileio mode
90  */
91 struct iio_dma_buffer_queue_fileio {
92 	struct iio_dma_buffer_block *blocks[2];
93 	struct iio_dma_buffer_block *active_block;
94 	size_t pos;
95 	size_t block_size;
96 
97 	unsigned int next_dequeue;
98 	bool enabled;
99 };
100 
101 /**
102  * struct iio_dma_buffer_queue - DMA buffer base structure
103  * @buffer: IIO buffer base structure
104  * @dev: Parent device
105  * @ops: DMA buffer callbacks
106  * @lock: Protects the incoming list, active and the fields in the fileio
107  *   substruct
108  * @list_lock: Protects lists that contain blocks which can be modified in
109  *   atomic context as well as blocks on those lists. This is the outgoing queue
110  *   list and typically also a list of active blocks in the part that handles
111  *   the DMA controller
112  * @incoming: List of buffers on the incoming queue
113  * @active: Whether the buffer is currently active
114  * @num_dmabufs: Total number of DMABUFs attached to this queue
115  * @fileio: FileIO state
116  */
117 struct iio_dma_buffer_queue {
118 	struct iio_buffer buffer;
119 	struct device *dev;
120 	const struct iio_dma_buffer_ops *ops;
121 
122 	/*
123 	 * A mutex to protect accessing, configuring (eg: enqueuing DMA blocks)
124 	 * and do file IO on struct iio_dma_buffer_queue objects.
125 	 */
126 	struct mutex lock;
127 	/* A spin lock to protect adding/removing blocks to the queue list */
128 	spinlock_t list_lock;
129 	struct list_head incoming;
130 
131 	bool active;
132 	atomic_t num_dmabufs;
133 
134 	struct iio_dma_buffer_queue_fileio fileio;
135 };
136 
137 /**
138  * struct iio_dma_buffer_ops - DMA buffer callback operations
139  * @submit: Called when a block is submitted to the DMA controller
140  * @abort: Should abort all pending transfers
141  */
142 struct iio_dma_buffer_ops {
143 	int (*submit)(struct iio_dma_buffer_queue *queue,
144 		      struct iio_dma_buffer_block *block);
145 	void (*abort)(struct iio_dma_buffer_queue *queue);
146 };
147 
148 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
149 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
150 				     struct list_head *list);
151 
152 int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev);
153 int iio_dma_buffer_disable(struct iio_buffer *buffer,
154 			   struct iio_dev *indio_dev);
155 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
156 			char __user *user_buffer);
157 int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
158 			 const char __user *user_buffer);
159 size_t iio_dma_buffer_usage(struct iio_buffer *buffer);
160 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
161 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
162 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
163 
164 void iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dev,
165 			 const struct iio_dma_buffer_ops *ops);
166 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
167 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
168 
169 struct iio_dma_buffer_block *
170 iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
171 			     struct dma_buf_attachment *attach);
172 void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
173 				  struct iio_dma_buffer_block *block);
174 int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
175 				  struct iio_dma_buffer_block *block,
176 				  struct dma_fence *fence,
177 				  struct sg_table *sgt,
178 				  size_t size, bool cyclic);
179 void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
180 void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
181 struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer);
182 
183 #endif
184