Lines Matching full:block
26 * For DMA buffers the storage is sub-divided into so called blocks. Each block
27 * has its own memory buffer. The size of the block is the granularity at which
29 * basic unit of data exchange from one sample to one block decreases the
32 * sample the overhead will be x for each sample. Whereas when using a block
40 * them with data. Block on the outgoing queue have been filled with data and
43 * A block can be in one of the following states:
45 * the block.
48 * * Owned by the DMA controller: The DMA controller is processing the block
53 * * Dead: A block that is dead has been marked as to be freed. It might still
56 * incoming or outgoing queue the block will be freed.
59 * with both the block structure as well as the storage memory for the block
60 * will be freed when the last reference to the block is dropped. This means a
61 * block must not be accessed without holding a reference.
69 * converter to the memory region of the block. Once the DMA transfer has been
71 * block.
73 * Prior to this it must set the bytes_used field of the block contains
75 * size of the block, but if the DMA hardware has certain alignment requirements
78 * datum, i.e. the block must not contain partial samples.
80 * The driver must call iio_dma_buffer_block_done() for each block it has
82 * perform a DMA transfer for the block, e.g. because the buffer was disabled
83 * before the block transfer was started. In this case it should set bytes_used
100 struct iio_dma_buffer_block *block = container_of(kref, in iio_buffer_block_release() local
102 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release()
104 WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD); in iio_buffer_block_release()
106 if (block->fileio) { in iio_buffer_block_release()
107 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
108 block->vaddr, block->phys_addr); in iio_buffer_block_release()
114 kfree(block); in iio_buffer_block_release()
117 static void iio_buffer_block_get(struct iio_dma_buffer_block *block) in iio_buffer_block_get() argument
119 kref_get(&block->kref); in iio_buffer_block_get()
122 static void iio_buffer_block_put(struct iio_dma_buffer_block *block) in iio_buffer_block_put() argument
124 kref_put(&block->kref, iio_buffer_block_release); in iio_buffer_block_put()
136 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_cleanup_worker() local
142 list_for_each_entry_safe(block, _block, &block_list, head) in iio_dma_buffer_cleanup_worker()
143 iio_buffer_block_release(&block->kref); in iio_dma_buffer_cleanup_worker()
149 struct iio_dma_buffer_block *block; in iio_buffer_block_release_atomic() local
151 block = container_of(kref, struct iio_dma_buffer_block, kref); in iio_buffer_block_release_atomic()
154 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); in iio_buffer_block_release_atomic()
162 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block) in iio_buffer_block_put_atomic() argument
164 kref_put(&block->kref, iio_buffer_block_release_atomic); in iio_buffer_block_put_atomic()
176 struct iio_dma_buffer_block *block __free(kfree) = in iio_dma_buffer_alloc_block()
177 kzalloc_obj(*block); in iio_dma_buffer_alloc_block()
178 if (!block) in iio_dma_buffer_alloc_block()
182 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
183 &block->phys_addr, GFP_KERNEL); in iio_dma_buffer_alloc_block()
184 if (!block->vaddr) in iio_dma_buffer_alloc_block()
188 block->fileio = fileio; in iio_dma_buffer_alloc_block()
189 block->size = size; in iio_dma_buffer_alloc_block()
190 block->state = IIO_BLOCK_STATE_DONE; in iio_dma_buffer_alloc_block()
191 block->queue = queue; in iio_dma_buffer_alloc_block()
192 INIT_LIST_HEAD(&block->head); in iio_dma_buffer_alloc_block()
193 kref_init(&block->kref); in iio_dma_buffer_alloc_block()
200 return_ptr(block); in iio_dma_buffer_alloc_block()
203 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in _iio_dma_buffer_block_done() argument
205 if (block->state != IIO_BLOCK_STATE_DEAD) in _iio_dma_buffer_block_done()
206 block->state = IIO_BLOCK_STATE_DONE; in _iio_dma_buffer_block_done()
222 * iio_dma_buffer_block_done() - Indicate that a block has been completed
223 * @block: The completed block
225 * Should be called when the DMA controller has finished handling the block to
226 * pass back ownership of the block to the queue.
228 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in iio_dma_buffer_block_done() argument
230 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done()
236 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_done()
238 if (!block->fileio) in iio_dma_buffer_block_done()
239 iio_buffer_signal_dmabuf_done(block->fence, 0); in iio_dma_buffer_block_done()
241 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_done()
248 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
254 * stopped. This will set bytes_used to 0 for each block in the list and then
260 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_block_list_abort() local
266 list_for_each_entry_safe(block, _block, list, head) { in iio_dma_buffer_block_list_abort()
267 list_del(&block->head); in iio_dma_buffer_block_list_abort()
268 block->bytes_used = 0; in iio_dma_buffer_block_list_abort()
269 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_list_abort()
271 if (!block->fileio) in iio_dma_buffer_block_list_abort()
272 iio_buffer_signal_dmabuf_done(block->fence, in iio_dma_buffer_block_list_abort()
274 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_list_abort()
286 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) in iio_dma_block_reusable() argument
289 * If the core owns the block it can be re-used. This should be the in iio_dma_block_reusable()
291 * not support abort and has not given back the block yet. in iio_dma_block_reusable()
293 switch (block->state) { in iio_dma_block_reusable()
322 struct iio_dma_buffer_block *block; in iio_dma_buffer_request_update() local
329 * buffering scheme with usually one block at a time being used by the in iio_dma_buffer_request_update()
352 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
355 if (block && (!iio_dma_block_reusable(block) || !try_reuse)) in iio_dma_buffer_request_update()
356 block->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_request_update()
370 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
371 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_request_update()
373 iio_buffer_block_put(block); in iio_dma_buffer_request_update()
374 block = NULL; in iio_dma_buffer_request_update()
376 block->size = size; in iio_dma_buffer_request_update()
379 block = NULL; in iio_dma_buffer_request_update()
382 if (!block) { in iio_dma_buffer_request_update()
383 block = iio_dma_buffer_alloc_block(queue, size, true); in iio_dma_buffer_request_update()
384 if (!block) in iio_dma_buffer_request_update()
387 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
391 * block->bytes_used may have been modified previously, e.g. by in iio_dma_buffer_request_update()
393 * block's so that iio_dma_buffer_io() will work. in iio_dma_buffer_request_update()
395 block->bytes_used = block->size; in iio_dma_buffer_request_update()
398 * If it's an input buffer, mark the block as queued, and in iio_dma_buffer_request_update()
403 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_request_update()
404 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
406 block->state = IIO_BLOCK_STATE_DONE; in iio_dma_buffer_request_update()
438 struct iio_dma_buffer_block *block) in iio_dma_buffer_submit_block() argument
443 * If the hardware has already been removed we put the block into in iio_dma_buffer_submit_block()
450 block->state = IIO_BLOCK_STATE_ACTIVE; in iio_dma_buffer_submit_block()
451 iio_buffer_block_get(block); in iio_dma_buffer_submit_block()
453 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
455 if (!block->fileio) in iio_dma_buffer_submit_block()
456 iio_buffer_signal_dmabuf_done(block->fence, ret); in iio_dma_buffer_submit_block()
468 iio_buffer_block_put(block); in iio_dma_buffer_submit_block()
485 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_enable() local
489 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
490 list_del(&block->head); in iio_dma_buffer_enable()
491 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
521 struct iio_dma_buffer_block *block) in iio_dma_buffer_enqueue() argument
523 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_enqueue()
524 iio_buffer_block_put(block); in iio_dma_buffer_enqueue()
526 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
528 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_enqueue()
529 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
536 struct iio_dma_buffer_block *block; in iio_dma_buffer_dequeue() local
542 block = queue->fileio.blocks[idx]; in iio_dma_buffer_dequeue()
544 if (block->state != IIO_BLOCK_STATE_DONE) in iio_dma_buffer_dequeue()
550 return block; in iio_dma_buffer_dequeue()
557 struct iio_dma_buffer_block *block; in iio_dma_buffer_io() local
567 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_io()
568 if (!block) in iio_dma_buffer_io()
572 queue->fileio.active_block = block; in iio_dma_buffer_io()
574 block = queue->fileio.active_block; in iio_dma_buffer_io()
578 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_io()
579 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_io()
580 addr = block->vaddr + queue->fileio.pos; in iio_dma_buffer_io()
591 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_io()
593 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_io()
643 struct iio_dma_buffer_block *block; in iio_dma_buffer_usage() local
648 * For counting the available bytes we'll use the size of the block not in iio_dma_buffer_usage()
649 * the number of actual bytes available in the block. Otherwise it is in iio_dma_buffer_usage()
661 block = queue->fileio.blocks[i]; in iio_dma_buffer_usage()
663 if (block != queue->fileio.active_block && block->state == IIO_BLOCK_STATE_DONE) in iio_dma_buffer_usage()
664 data_available += block->size; in iio_dma_buffer_usage()
676 struct iio_dma_buffer_block *block; in iio_dma_buffer_attach_dmabuf() local
687 block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false); in iio_dma_buffer_attach_dmabuf()
688 if (!block) in iio_dma_buffer_attach_dmabuf()
694 return block; in iio_dma_buffer_attach_dmabuf()
699 struct iio_dma_buffer_block *block) in iio_dma_buffer_detach_dmabuf() argument
701 block->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_detach_dmabuf()
702 iio_buffer_block_put_atomic(block); in iio_dma_buffer_detach_dmabuf()
706 static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block) in iio_dma_can_enqueue_block() argument
708 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_can_enqueue_block()
714 switch (block->state) { in iio_dma_can_enqueue_block()
728 struct iio_dma_buffer_block *block, in iio_dma_buffer_enqueue_dmabuf() argument
741 ret = iio_dma_can_enqueue_block(block); in iio_dma_buffer_enqueue_dmabuf()
745 block->bytes_used = size; in iio_dma_buffer_enqueue_dmabuf()
746 block->cyclic = cyclic; in iio_dma_buffer_enqueue_dmabuf()
747 block->sg_table = sgt; in iio_dma_buffer_enqueue_dmabuf()
748 block->fence = fence; in iio_dma_buffer_enqueue_dmabuf()
750 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_enqueue_dmabuf()