Lines Matching full:queue
33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
51 * incoming or outgoing queue the block will be freed.
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release()
166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
183 block->queue = queue; in iio_dma_buffer_alloc_block()
187 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block()
203 * pass back ownership of the block to the queue.
207 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done() local
210 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_done()
212 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_done()
215 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_done()
222 * @queue: Queue for which to complete blocks.
223 * @list: List of aborted blocks. All blocks in this list must be from @queue.
227 * hand the blocks back to the queue.
229 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_block_list_abort() argument
235 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
242 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
244 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_list_abort()
273 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_request_update() local
285 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * in iio_dma_buffer_request_update()
286 queue->buffer.length, 2); in iio_dma_buffer_request_update()
288 mutex_lock(&queue->lock); in iio_dma_buffer_request_update()
291 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) in iio_dma_buffer_request_update()
294 queue->fileio.block_size = size; in iio_dma_buffer_request_update()
295 queue->fileio.active_block = NULL; in iio_dma_buffer_request_update()
297 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
298 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
299 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
311 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
313 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_request_update()
315 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
316 if (queue->fileio.blocks[i]) { in iio_dma_buffer_request_update()
317 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
330 block = iio_dma_buffer_alloc_block(queue, size); in iio_dma_buffer_request_update()
335 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
339 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
343 mutex_unlock(&queue->lock); in iio_dma_buffer_request_update()
349 static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_fileio_free() argument
353 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_fileio_free()
354 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_fileio_free()
355 if (!queue->fileio.blocks[i]) in iio_dma_buffer_fileio_free()
357 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_fileio_free()
359 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_fileio_free()
361 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_fileio_free()
363 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_fileio_free()
364 if (!queue->fileio.blocks[i]) in iio_dma_buffer_fileio_free()
366 iio_buffer_block_put(queue->fileio.blocks[i]); in iio_dma_buffer_fileio_free()
367 queue->fileio.blocks[i] = NULL; in iio_dma_buffer_fileio_free()
369 queue->fileio.active_block = NULL; in iio_dma_buffer_fileio_free()
372 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_submit_block() argument
382 if (!queue->ops) in iio_dma_buffer_submit_block()
387 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
416 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_enable() local
419 mutex_lock(&queue->lock); in iio_dma_buffer_enable()
420 queue->active = true; in iio_dma_buffer_enable()
421 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
423 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
425 mutex_unlock(&queue->lock); in iio_dma_buffer_enable()
442 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_disable() local
444 mutex_lock(&queue->lock); in iio_dma_buffer_disable()
445 queue->active = false; in iio_dma_buffer_disable()
447 if (queue->ops && queue->ops->abort) in iio_dma_buffer_disable()
448 queue->ops->abort(queue); in iio_dma_buffer_disable()
449 mutex_unlock(&queue->lock); in iio_dma_buffer_disable()
455 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_enqueue() argument
460 } else if (queue->active) { in iio_dma_buffer_enqueue()
461 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
464 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
469 struct iio_dma_buffer_queue *queue) in iio_dma_buffer_dequeue() argument
474 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
476 idx = queue->fileio.next_dequeue; in iio_dma_buffer_dequeue()
477 block = queue->fileio.blocks[idx]; in iio_dma_buffer_dequeue()
480 idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks); in iio_dma_buffer_dequeue()
481 queue->fileio.next_dequeue = idx; in iio_dma_buffer_dequeue()
486 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
503 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_read() local
510 mutex_lock(&queue->lock); in iio_dma_buffer_read()
512 if (!queue->fileio.active_block) { in iio_dma_buffer_read()
513 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_read()
518 queue->fileio.pos = 0; in iio_dma_buffer_read()
519 queue->fileio.active_block = block; in iio_dma_buffer_read()
521 block = queue->fileio.active_block; in iio_dma_buffer_read()
525 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_read()
526 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_read()
528 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { in iio_dma_buffer_read()
533 queue->fileio.pos += n; in iio_dma_buffer_read()
535 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_read()
536 queue->fileio.active_block = NULL; in iio_dma_buffer_read()
537 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_read()
543 mutex_unlock(&queue->lock); in iio_dma_buffer_read()
558 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); in iio_dma_buffer_data_available() local
570 mutex_lock(&queue->lock); in iio_dma_buffer_data_available()
571 if (queue->fileio.active_block) in iio_dma_buffer_data_available()
572 data_available += queue->fileio.active_block->size; in iio_dma_buffer_data_available()
574 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
576 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_data_available()
577 block = queue->fileio.blocks[i]; in iio_dma_buffer_data_available()
579 if (block != queue->fileio.active_block in iio_dma_buffer_data_available()
584 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
585 mutex_unlock(&queue->lock); in iio_dma_buffer_data_available()
628 * iio_dma_buffer_init() - Initialize DMA buffer queue
629 * @queue: Buffer to initialize
631 * @ops: DMA buffer queue callback operations
633 * The DMA device will be used by the queue to do DMA memory allocations. So it
637 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_init() argument
640 iio_buffer_init(&queue->buffer); in iio_dma_buffer_init()
641 queue->buffer.length = PAGE_SIZE; in iio_dma_buffer_init()
642 queue->buffer.watermark = queue->buffer.length / 2; in iio_dma_buffer_init()
643 queue->dev = dev; in iio_dma_buffer_init()
644 queue->ops = ops; in iio_dma_buffer_init()
646 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_init()
648 mutex_init(&queue->lock); in iio_dma_buffer_init()
649 spin_lock_init(&queue->list_lock); in iio_dma_buffer_init()
656 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
657 * @queue: Buffer to cleanup
662 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_exit() argument
664 mutex_lock(&queue->lock); in iio_dma_buffer_exit()
666 iio_dma_buffer_fileio_free(queue); in iio_dma_buffer_exit()
667 queue->ops = NULL; in iio_dma_buffer_exit()
669 mutex_unlock(&queue->lock); in iio_dma_buffer_exit()
675 * @queue: Buffer to release
681 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_release() argument
683 mutex_destroy(&queue->lock); in iio_dma_buffer_release()