Lines Matching full:queue
37 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
38 * incoming queue are waiting for the DMA controller to pick them up and fill
39 * them with data. Block on the outgoing queue have been filled with data and
55 * incoming or outgoing queue the block will be freed.
101 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local
106 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
109 atomic_dec(&queue->num_dmabufs); in iio_buffer_block_release()
112 iio_buffer_put(&queue->buffer); in iio_buffer_block_release()
175 struct iio_dma_buffer_queue *queue, size_t size, bool fileio) in iio_dma_buffer_alloc_block() argument
184 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
195 block->queue = queue; in iio_dma_buffer_alloc_block()
199 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block()
202 atomic_inc(&queue->num_dmabufs); in iio_dma_buffer_alloc_block()
213 static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_queue_wake() argument
217 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) in iio_dma_buffer_queue_wake()
222 wake_up_interruptible_poll(&queue->buffer.pollq, flags); in iio_dma_buffer_queue_wake()
230 * pass back ownership of the block to the queue.
234 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done() local
240 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_done()
242 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_done()
248 iio_dma_buffer_queue_wake(queue); in iio_dma_buffer_block_done()
256 * @queue: Queue for which to complete blocks.
257 * @list: List of aborted blocks. All blocks in this list must be from @queue.
261 * hand the blocks back to the queue.
263 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_block_list_abort() argument
272 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
282 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
284 if (queue->fileio.enabled) in iio_dma_buffer_block_list_abort()
285 queue->fileio.enabled = false; in iio_dma_buffer_block_list_abort()
287 iio_dma_buffer_queue_wake(queue); in iio_dma_buffer_block_list_abort()
308 static bool iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_can_use_fileio() argument
311 * Note that queue->num_dmabufs cannot increase while the queue is in iio_dma_buffer_can_use_fileio()
315 return queue->fileio.enabled || !atomic_read(&queue->num_dmabufs); in iio_dma_buffer_can_use_fileio()
327 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_request_update() local
339 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * in iio_dma_buffer_request_update()
340 queue->buffer.length, 2); in iio_dma_buffer_request_update()
342 mutex_lock(&queue->lock); in iio_dma_buffer_request_update()
344 queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue); in iio_dma_buffer_request_update()
347 if (!queue->fileio.enabled) in iio_dma_buffer_request_update()
351 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) in iio_dma_buffer_request_update()
354 queue->fileio.block_size = size; in iio_dma_buffer_request_update()
355 queue->fileio.active_block = NULL; in iio_dma_buffer_request_update()
357 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
358 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
359 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
371 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
373 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_request_update()
375 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
376 if (queue->fileio.blocks[i]) { in iio_dma_buffer_request_update()
377 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
390 block = iio_dma_buffer_alloc_block(queue, size, true); in iio_dma_buffer_request_update()
395 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
410 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) { in iio_dma_buffer_request_update()
412 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
419 mutex_unlock(&queue->lock); in iio_dma_buffer_request_update()
425 static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_fileio_free() argument
429 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_fileio_free()
430 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_fileio_free()
431 if (!queue->fileio.blocks[i]) in iio_dma_buffer_fileio_free()
433 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_fileio_free()
435 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_fileio_free()
437 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_fileio_free()
439 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_fileio_free()
440 if (!queue->fileio.blocks[i]) in iio_dma_buffer_fileio_free()
442 iio_buffer_block_put(queue->fileio.blocks[i]); in iio_dma_buffer_fileio_free()
443 queue->fileio.blocks[i] = NULL; in iio_dma_buffer_fileio_free()
445 queue->fileio.active_block = NULL; in iio_dma_buffer_fileio_free()
448 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_submit_block() argument
458 if (!queue->ops) in iio_dma_buffer_submit_block()
464 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
496 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_enable() local
499 mutex_lock(&queue->lock); in iio_dma_buffer_enable()
500 queue->active = true; in iio_dma_buffer_enable()
501 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
503 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
505 mutex_unlock(&queue->lock); in iio_dma_buffer_enable()
522 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_disable() local
524 mutex_lock(&queue->lock); in iio_dma_buffer_disable()
525 queue->active = false; in iio_dma_buffer_disable()
527 if (queue->ops && queue->ops->abort) in iio_dma_buffer_disable()
528 queue->ops->abort(queue); in iio_dma_buffer_disable()
529 mutex_unlock(&queue->lock); in iio_dma_buffer_disable()
535 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_enqueue() argument
540 } else if (queue->active) { in iio_dma_buffer_enqueue()
541 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
544 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
549 struct iio_dma_buffer_queue *queue) in iio_dma_buffer_dequeue() argument
554 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
556 idx = queue->fileio.next_dequeue; in iio_dma_buffer_dequeue()
557 block = queue->fileio.blocks[idx]; in iio_dma_buffer_dequeue()
560 idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks); in iio_dma_buffer_dequeue()
561 queue->fileio.next_dequeue = idx; in iio_dma_buffer_dequeue()
566 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
574 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_io() local
582 mutex_lock(&queue->lock); in iio_dma_buffer_io()
584 if (!queue->fileio.active_block) { in iio_dma_buffer_io()
585 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_io()
590 queue->fileio.pos = 0; in iio_dma_buffer_io()
591 queue->fileio.active_block = block; in iio_dma_buffer_io()
593 block = queue->fileio.active_block; in iio_dma_buffer_io()
597 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_io()
598 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_io()
599 addr = block->vaddr + queue->fileio.pos; in iio_dma_buffer_io()
610 queue->fileio.pos += n; in iio_dma_buffer_io()
612 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_io()
613 queue->fileio.active_block = NULL; in iio_dma_buffer_io()
614 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_io()
620 mutex_unlock(&queue->lock); in iio_dma_buffer_io()
668 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); in iio_dma_buffer_usage() local
680 mutex_lock(&queue->lock); in iio_dma_buffer_usage()
681 if (queue->fileio.active_block) in iio_dma_buffer_usage()
682 data_available += queue->fileio.active_block->size; in iio_dma_buffer_usage()
684 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_usage()
686 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_usage()
687 block = queue->fileio.blocks[i]; in iio_dma_buffer_usage()
689 if (block != queue->fileio.active_block in iio_dma_buffer_usage()
694 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_usage()
695 mutex_unlock(&queue->lock); in iio_dma_buffer_usage()
705 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_attach_dmabuf() local
708 guard(mutex)(&queue->lock); in iio_dma_buffer_attach_dmabuf()
714 if (queue->fileio.enabled) in iio_dma_buffer_attach_dmabuf()
717 block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false); in iio_dma_buffer_attach_dmabuf()
722 iio_dma_buffer_fileio_free(queue); in iio_dma_buffer_attach_dmabuf()
738 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_can_enqueue_block() local
741 if (queue->fileio.enabled) in iio_dma_can_enqueue_block()
763 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_enqueue_dmabuf() local
767 WARN_ON(!mutex_is_locked(&queue->lock)); in iio_dma_buffer_enqueue_dmabuf()
780 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_enqueue_dmabuf()
791 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_lock_queue() local
793 mutex_lock(&queue->lock); in iio_dma_buffer_lock_queue()
799 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_unlock_queue() local
801 mutex_unlock(&queue->lock); in iio_dma_buffer_unlock_queue()
842 * iio_dma_buffer_init() - Initialize DMA buffer queue
843 * @queue: Buffer to initialize
845 * @ops: DMA buffer queue callback operations
847 * The DMA device will be used by the queue to do DMA memory allocations. So it
851 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_init() argument
854 iio_buffer_init(&queue->buffer); in iio_dma_buffer_init()
855 queue->buffer.length = PAGE_SIZE; in iio_dma_buffer_init()
856 queue->buffer.watermark = queue->buffer.length / 2; in iio_dma_buffer_init()
857 queue->dev = dev; in iio_dma_buffer_init()
858 queue->ops = ops; in iio_dma_buffer_init()
860 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_init()
862 mutex_init(&queue->lock); in iio_dma_buffer_init()
863 spin_lock_init(&queue->list_lock); in iio_dma_buffer_init()
870 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
871 * @queue: Buffer to cleanup
876 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_exit() argument
878 mutex_lock(&queue->lock); in iio_dma_buffer_exit()
880 iio_dma_buffer_fileio_free(queue); in iio_dma_buffer_exit()
881 queue->ops = NULL; in iio_dma_buffer_exit()
883 mutex_unlock(&queue->lock); in iio_dma_buffer_exit()
889 * @queue: Buffer to release
895 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_release() argument
897 mutex_destroy(&queue->lock); in iio_dma_buffer_release()