Lines Matching +full:free +full:- +full:running

1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas R-Car Gen2/Gen3 DMA Controller Driver
5 * Copyright (C) 2014-2019 Renesas Electronics Inc.
11 #include <linux/dma-mapping.h>
28 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
43 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
56 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
63 * @running: the transfer chunk being currently processed
80 struct rcar_dmac_xfer_chunk *running; member
97 * struct rcar_dmac_desc_page - One page worth of descriptors
112 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
119 * struct rcar_dmac_chan_slave - Slave configuration
129 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
141 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
150 * @desc.free: list of free descriptors
155 * @desc.running: the descriptor being processed (a member of the active list)
156 * @desc.chunks_free: list of free transfer chunk descriptors
173 struct list_head free; member
178 struct rcar_dmac_desc *running; member
189 * struct rcar_dmac - R-Car Gen2 DMA Controller
215 for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \
216 if (!((dmac)->channels_mask & BIT(i))) continue; else
219 * struct rcar_dmac_of_data - This driver's OF data
228 /* -----------------------------------------------------------------------------
239 #define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */
302 /* For R-Car Gen4 */
308 /* -----------------------------------------------------------------------------
315 writew(data, dmac->dmac_base + reg); in rcar_dmac_write()
317 writel(data, dmac->dmac_base + reg); in rcar_dmac_write()
323 return readw(dmac->dmac_base + reg); in rcar_dmac_read()
325 return readl(dmac->dmac_base + reg); in rcar_dmac_read()
331 return readw(chan->iomem + reg); in rcar_dmac_chan_read()
333 return readl(chan->iomem + reg); in rcar_dmac_chan_read()
339 writew(data, chan->iomem + reg); in rcar_dmac_chan_write()
341 writel(data, chan->iomem + reg); in rcar_dmac_chan_write()
347 if (dmac->chan_base) in rcar_dmac_chan_clear()
350 rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index)); in rcar_dmac_chan_clear()
358 if (dmac->chan_base) { in rcar_dmac_chan_clear_all()
362 rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); in rcar_dmac_chan_clear_all()
366 /* -----------------------------------------------------------------------------
379 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_chan_start_xfer()
380 u32 chcr = desc->chcr; in rcar_dmac_chan_start_xfer()
384 if (chan->mid_rid >= 0) in rcar_dmac_chan_start_xfer()
385 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); in rcar_dmac_chan_start_xfer()
387 if (desc->hwdescs.use) { in rcar_dmac_chan_start_xfer()
389 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer()
392 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_start_xfer()
394 chan->index, desc, desc->nchunks, &desc->hwdescs.dma); in rcar_dmac_chan_start_xfer()
398 chunk->src_addr >> 32); in rcar_dmac_chan_start_xfer()
400 chunk->dst_addr >> 32); in rcar_dmac_chan_start_xfer()
402 desc->hwdescs.dma >> 32); in rcar_dmac_chan_start_xfer()
405 (desc->hwdescs.dma & 0xfffffff0) | in rcar_dmac_chan_start_xfer()
408 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | in rcar_dmac_chan_start_xfer()
419 chunk->dst_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
434 if (!desc->cyclic) in rcar_dmac_chan_start_xfer()
440 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer()
449 struct rcar_dmac_xfer_chunk *chunk = desc->running; in rcar_dmac_chan_start_xfer()
451 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_start_xfer()
452 "chan%u: queue chunk %p: %u@%pad -> %pad\n", in rcar_dmac_chan_start_xfer()
453 chan->index, chunk, chunk->size, &chunk->src_addr, in rcar_dmac_chan_start_xfer()
454 &chunk->dst_addr); in rcar_dmac_chan_start_xfer()
458 chunk->src_addr >> 32); in rcar_dmac_chan_start_xfer()
460 chunk->dst_addr >> 32); in rcar_dmac_chan_start_xfer()
463 chunk->src_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
465 chunk->dst_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
467 chunk->size >> desc->xfer_shift); in rcar_dmac_chan_start_xfer()
487 dev_warn(dmac->dev, "DMAOR initialization failed.\n"); in rcar_dmac_init()
488 return -EIO; in rcar_dmac_init()
494 /* -----------------------------------------------------------------------------
500 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); in rcar_dmac_tx_submit()
505 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_tx_submit()
509 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", in rcar_dmac_tx_submit()
510 chan->index, tx->cookie, desc); in rcar_dmac_tx_submit()
512 list_add_tail(&desc->node, &chan->desc.pending); in rcar_dmac_tx_submit()
513 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit()
516 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_tx_submit()
521 /* -----------------------------------------------------------------------------
522 * Descriptors allocation and free
526 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
539 return -ENOMEM; in rcar_dmac_desc_alloc()
542 struct rcar_dmac_desc *desc = &page->descs[i]; in rcar_dmac_desc_alloc()
544 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in rcar_dmac_desc_alloc()
545 desc->async_tx.tx_submit = rcar_dmac_tx_submit; in rcar_dmac_desc_alloc()
546 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc()
548 list_add_tail(&desc->node, &list); in rcar_dmac_desc_alloc()
551 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_alloc()
552 list_splice_tail(&list, &chan->desc.free); in rcar_dmac_desc_alloc()
553 list_add_tail(&page->node, &chan->desc.pages); in rcar_dmac_desc_alloc()
554 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_alloc()
560 * rcar_dmac_desc_put - Release a DMA transfer descriptor
565 * free descriptors lists. The descriptor's chunks list will be reinitialized to
576 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_put()
577 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put()
578 list_add(&desc->node, &chan->desc.free); in rcar_dmac_desc_put()
579 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_put()
594 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
595 list_splice_init(&chan->desc.wait, &list); in rcar_dmac_desc_recycle_acked()
596 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
599 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked()
600 list_del(&desc->node); in rcar_dmac_desc_recycle_acked()
609 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
610 list_splice(&list, &chan->desc.wait); in rcar_dmac_desc_recycle_acked()
611 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
615 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
618 * Locking: This function must be called in a non-atomic context.
632 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_get()
634 while (list_empty(&chan->desc.free)) { in rcar_dmac_desc_get()
636 * No free descriptors, allocate a page worth of them and try in rcar_dmac_desc_get()
641 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_get()
645 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_get()
648 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); in rcar_dmac_desc_get()
649 list_del(&desc->node); in rcar_dmac_desc_get()
651 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_get()
657 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
670 return -ENOMEM; in rcar_dmac_xfer_chunk_alloc()
673 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc()
675 list_add_tail(&chunk->node, &list); in rcar_dmac_xfer_chunk_alloc()
678 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_alloc()
679 list_splice_tail(&list, &chan->desc.chunks_free); in rcar_dmac_xfer_chunk_alloc()
680 list_add_tail(&page->node, &chan->desc.pages); in rcar_dmac_xfer_chunk_alloc()
681 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_alloc()
687 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
690 * Locking: This function must be called in a non-atomic context.
702 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
704 while (list_empty(&chan->desc.chunks_free)) { in rcar_dmac_xfer_chunk_get()
706 * No free descriptors, allocate a page worth of them and try in rcar_dmac_xfer_chunk_get()
711 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
715 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
718 chunk = list_first_entry(&chan->desc.chunks_free, in rcar_dmac_xfer_chunk_get()
720 list_del(&chunk->node); in rcar_dmac_xfer_chunk_get()
722 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
738 if (desc->hwdescs.size == size) in rcar_dmac_realloc_hwdesc()
741 if (desc->hwdescs.mem) { in rcar_dmac_realloc_hwdesc()
742 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, in rcar_dmac_realloc_hwdesc()
743 desc->hwdescs.mem, desc->hwdescs.dma); in rcar_dmac_realloc_hwdesc()
744 desc->hwdescs.mem = NULL; in rcar_dmac_realloc_hwdesc()
745 desc->hwdescs.size = 0; in rcar_dmac_realloc_hwdesc()
751 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, in rcar_dmac_realloc_hwdesc()
752 &desc->hwdescs.dma, GFP_NOWAIT); in rcar_dmac_realloc_hwdesc()
753 if (!desc->hwdescs.mem) in rcar_dmac_realloc_hwdesc()
756 desc->hwdescs.size = size; in rcar_dmac_realloc_hwdesc()
765 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); in rcar_dmac_fill_hwdesc()
767 hwdesc = desc->hwdescs.mem; in rcar_dmac_fill_hwdesc()
769 return -ENOMEM; in rcar_dmac_fill_hwdesc()
771 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc()
772 hwdesc->sar = chunk->src_addr; in rcar_dmac_fill_hwdesc()
773 hwdesc->dar = chunk->dst_addr; in rcar_dmac_fill_hwdesc()
774 hwdesc->tcr = chunk->size >> desc->xfer_shift; in rcar_dmac_fill_hwdesc()
781 /* -----------------------------------------------------------------------------
800 dev_err(chan->chan.device->dev, "CHCR DE check error\n"); in rcar_dmac_chcr_de_barrier()
831 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_chan_reinit()
833 /* Move all non-free descriptors to the local lists. */ in rcar_dmac_chan_reinit()
834 list_splice_init(&chan->desc.pending, &descs); in rcar_dmac_chan_reinit()
835 list_splice_init(&chan->desc.active, &descs); in rcar_dmac_chan_reinit()
836 list_splice_init(&chan->desc.done, &descs); in rcar_dmac_chan_reinit()
837 list_splice_init(&chan->desc.wait, &descs); in rcar_dmac_chan_reinit()
839 chan->desc.running = NULL; in rcar_dmac_chan_reinit()
841 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_chan_reinit()
844 list_del(&desc->node); in rcar_dmac_chan_reinit()
857 spin_lock_irq(&chan->lock); in rcar_dmac_stop_all_chan()
859 spin_unlock_irq(&chan->lock); in rcar_dmac_stop_all_chan()
868 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_chan_pause()
870 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_chan_pause()
875 /* -----------------------------------------------------------------------------
892 switch (desc->direction) { in rcar_dmac_chan_configure_desc()
896 xfer_size = chan->src.xfer_size; in rcar_dmac_chan_configure_desc()
902 xfer_size = chan->dst.xfer_size; in rcar_dmac_chan_configure_desc()
913 desc->xfer_shift = ilog2(xfer_size); in rcar_dmac_chan_configure_desc()
914 desc->chcr = chcr | chcr_ts[desc->xfer_shift]; in rcar_dmac_chan_configure_desc()
918 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
921 * converted to scatter-gather to guarantee consistent locking and a correct
950 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg()
951 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg()
953 desc->cyclic = cyclic; in rcar_dmac_chan_prep_sg()
954 desc->direction = dir; in rcar_dmac_chan_prep_sg()
958 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; in rcar_dmac_chan_prep_sg()
988 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { in rcar_dmac_chan_prep_sg()
989 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; in rcar_dmac_chan_prep_sg()
992 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { in rcar_dmac_chan_prep_sg()
993 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; in rcar_dmac_chan_prep_sg()
1005 chunk->src_addr = dev_addr; in rcar_dmac_chan_prep_sg()
1006 chunk->dst_addr = mem_addr; in rcar_dmac_chan_prep_sg()
1008 chunk->src_addr = mem_addr; in rcar_dmac_chan_prep_sg()
1009 chunk->dst_addr = dev_addr; in rcar_dmac_chan_prep_sg()
1012 chunk->size = size; in rcar_dmac_chan_prep_sg()
1014 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_prep_sg()
1015 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", in rcar_dmac_chan_prep_sg()
1016 chan->index, chunk, desc, i, sg, size, len, in rcar_dmac_chan_prep_sg()
1017 &chunk->src_addr, &chunk->dst_addr); in rcar_dmac_chan_prep_sg()
1023 len -= size; in rcar_dmac_chan_prep_sg()
1025 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg()
1030 desc->nchunks = nchunks; in rcar_dmac_chan_prep_sg()
1031 desc->size = full_size; in rcar_dmac_chan_prep_sg()
1041 desc->hwdescs.use = !cross_boundary && nchunks > 1; in rcar_dmac_chan_prep_sg()
1042 if (desc->hwdescs.use) { in rcar_dmac_chan_prep_sg()
1044 desc->hwdescs.use = false; in rcar_dmac_chan_prep_sg()
1047 return &desc->async_tx; in rcar_dmac_chan_prep_sg()
1050 /* -----------------------------------------------------------------------------
1059 INIT_LIST_HEAD(&rchan->desc.chunks_free); in rcar_dmac_alloc_chan_resources()
1060 INIT_LIST_HEAD(&rchan->desc.pages); in rcar_dmac_alloc_chan_resources()
1065 return -ENOMEM; in rcar_dmac_alloc_chan_resources()
1069 return -ENOMEM; in rcar_dmac_alloc_chan_resources()
1071 return pm_runtime_get_sync(chan->device->dev); in rcar_dmac_alloc_chan_resources()
1077 struct rcar_dmac *dmac = to_rcar_dmac(chan->device); in rcar_dmac_free_chan_resources()
1078 struct rcar_dmac_chan_map *map = &rchan->map; in rcar_dmac_free_chan_resources()
1084 spin_lock_irq(&rchan->lock); in rcar_dmac_free_chan_resources()
1086 spin_unlock_irq(&rchan->lock); in rcar_dmac_free_chan_resources()
1090 * running. Wait for it to finish before freeing resources. in rcar_dmac_free_chan_resources()
1092 synchronize_irq(rchan->irq); in rcar_dmac_free_chan_resources()
1094 if (rchan->mid_rid >= 0) { in rcar_dmac_free_chan_resources()
1096 clear_bit(rchan->mid_rid, dmac->modules); in rcar_dmac_free_chan_resources()
1097 rchan->mid_rid = -EINVAL; in rcar_dmac_free_chan_resources()
1100 list_splice_init(&rchan->desc.free, &list); in rcar_dmac_free_chan_resources()
1101 list_splice_init(&rchan->desc.pending, &list); in rcar_dmac_free_chan_resources()
1102 list_splice_init(&rchan->desc.active, &list); in rcar_dmac_free_chan_resources()
1103 list_splice_init(&rchan->desc.done, &list); in rcar_dmac_free_chan_resources()
1104 list_splice_init(&rchan->desc.wait, &list); in rcar_dmac_free_chan_resources()
1106 rchan->desc.running = NULL; in rcar_dmac_free_chan_resources()
1111 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { in rcar_dmac_free_chan_resources()
1112 list_del(&page->node); in rcar_dmac_free_chan_resources()
1117 if (map->slave.xfer_size) { in rcar_dmac_free_chan_resources()
1118 dma_unmap_resource(chan->device->dev, map->addr, in rcar_dmac_free_chan_resources()
1119 map->slave.xfer_size, map->dir, 0); in rcar_dmac_free_chan_resources()
1120 map->slave.xfer_size = 0; in rcar_dmac_free_chan_resources()
1123 pm_runtime_put(chan->device->dev); in rcar_dmac_free_chan_resources()
1150 struct rcar_dmac_chan_map *map = &rchan->map; in rcar_dmac_map_slave_addr()
1156 dev_addr = rchan->src.slave_addr; in rcar_dmac_map_slave_addr()
1157 dev_size = rchan->src.xfer_size; in rcar_dmac_map_slave_addr()
1160 dev_addr = rchan->dst.slave_addr; in rcar_dmac_map_slave_addr()
1161 dev_size = rchan->dst.xfer_size; in rcar_dmac_map_slave_addr()
1166 if (dev_addr == map->slave.slave_addr && in rcar_dmac_map_slave_addr()
1167 dev_size == map->slave.xfer_size && in rcar_dmac_map_slave_addr()
1168 dev_dir == map->dir) in rcar_dmac_map_slave_addr()
1172 if (map->slave.xfer_size) in rcar_dmac_map_slave_addr()
1173 dma_unmap_resource(chan->device->dev, map->addr, in rcar_dmac_map_slave_addr()
1174 map->slave.xfer_size, map->dir, 0); in rcar_dmac_map_slave_addr()
1175 map->slave.xfer_size = 0; in rcar_dmac_map_slave_addr()
1178 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, in rcar_dmac_map_slave_addr()
1181 if (dma_mapping_error(chan->device->dev, map->addr)) { in rcar_dmac_map_slave_addr()
1182 dev_err(chan->device->dev, in rcar_dmac_map_slave_addr()
1183 "chan%u: failed to map %zx@%pap", rchan->index, in rcar_dmac_map_slave_addr()
1185 return -EIO; in rcar_dmac_map_slave_addr()
1188 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", in rcar_dmac_map_slave_addr()
1189 rchan->index, dev_size, &dev_addr, &map->addr, in rcar_dmac_map_slave_addr()
1192 map->slave.slave_addr = dev_addr; in rcar_dmac_map_slave_addr()
1193 map->slave.xfer_size = dev_size; in rcar_dmac_map_slave_addr()
1194 map->dir = dev_dir; in rcar_dmac_map_slave_addr()
1207 if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { in rcar_dmac_prep_slave_sg()
1208 dev_warn(chan->device->dev, in rcar_dmac_prep_slave_sg()
1210 __func__, sg_len, rchan->mid_rid); in rcar_dmac_prep_slave_sg()
1217 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, in rcar_dmac_prep_slave_sg()
1235 if (rchan->mid_rid < 0 || buf_len < period_len) { in rcar_dmac_prep_dma_cyclic()
1236 dev_warn(chan->device->dev, in rcar_dmac_prep_dma_cyclic()
1238 __func__, buf_len, period_len, rchan->mid_rid); in rcar_dmac_prep_dma_cyclic()
1247 dev_err(chan->device->dev, in rcar_dmac_prep_dma_cyclic()
1249 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); in rcar_dmac_prep_dma_cyclic()
1272 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, in rcar_dmac_prep_dma_cyclic()
1288 rchan->src.slave_addr = cfg->src_addr; in rcar_dmac_device_config()
1289 rchan->dst.slave_addr = cfg->dst_addr; in rcar_dmac_device_config()
1290 rchan->src.xfer_size = cfg->src_addr_width; in rcar_dmac_device_config()
1291 rchan->dst.xfer_size = cfg->dst_addr_width; in rcar_dmac_device_config()
1301 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_chan_terminate_all()
1303 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_chan_terminate_all()
1307 * be running. in rcar_dmac_chan_terminate_all()
1318 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_chan_get_residue()
1319 struct rcar_dmac_xfer_chunk *running = NULL; in rcar_dmac_chan_get_residue() local
1337 status = dma_cookie_status(&chan->chan, cookie, NULL); in rcar_dmac_chan_get_residue()
1342 * If the cookie doesn't correspond to the currently running transfer in rcar_dmac_chan_get_residue()
1346 * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" in rcar_dmac_chan_get_residue()
1351 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue()
1352 list_for_each_entry(desc, &chan->desc.done, node) { in rcar_dmac_chan_get_residue()
1353 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1356 list_for_each_entry(desc, &chan->desc.pending, node) { in rcar_dmac_chan_get_residue()
1357 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1358 return desc->size; in rcar_dmac_chan_get_residue()
1360 list_for_each_entry(desc, &chan->desc.active, node) { in rcar_dmac_chan_get_residue()
1361 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1362 return desc->size; in rcar_dmac_chan_get_residue()
1393 * In descriptor mode the descriptor running pointer is not maintained in rcar_dmac_chan_get_residue()
1394 * by the interrupt handler, find the running descriptor from the in rcar_dmac_chan_get_residue()
1395 * descriptor pointer field in the CHCRB register. In non-descriptor in rcar_dmac_chan_get_residue()
1396 * mode just use the running descriptor pointer. in rcar_dmac_chan_get_residue()
1398 if (desc->hwdescs.use) { in rcar_dmac_chan_get_residue()
1401 dptr = desc->nchunks; in rcar_dmac_chan_get_residue()
1402 dptr--; in rcar_dmac_chan_get_residue()
1403 WARN_ON(dptr >= desc->nchunks); in rcar_dmac_chan_get_residue()
1405 running = desc->running; in rcar_dmac_chan_get_residue()
1409 list_for_each_entry_reverse(chunk, &desc->chunks, node) { in rcar_dmac_chan_get_residue()
1410 if (chunk == running || ++dptr == desc->nchunks) in rcar_dmac_chan_get_residue()
1413 residue += chunk->size; in rcar_dmac_chan_get_residue()
1417 residue += tcrb << desc->xfer_shift; in rcar_dmac_chan_get_residue()
1436 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_tx_status()
1438 cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; in rcar_dmac_tx_status()
1439 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_tx_status()
1455 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_issue_pending()
1457 if (list_empty(&rchan->desc.pending)) in rcar_dmac_issue_pending()
1461 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); in rcar_dmac_issue_pending()
1464 * If no transfer is running pick the first descriptor from the active in rcar_dmac_issue_pending()
1467 if (!rchan->desc.running) { in rcar_dmac_issue_pending()
1470 desc = list_first_entry(&rchan->desc.active, in rcar_dmac_issue_pending()
1472 rchan->desc.running = desc; in rcar_dmac_issue_pending()
1478 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_issue_pending()
1485 synchronize_irq(rchan->irq); in rcar_dmac_device_synchronize()
1488 /* -----------------------------------------------------------------------------
1494 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_isr_desc_stage_end()
1497 if (WARN_ON(!desc || !desc->cyclic)) { in rcar_dmac_isr_desc_stage_end()
1499 * This should never happen, there should always be a running in rcar_dmac_isr_desc_stage_end()
1516 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_isr_transfer_end()
1521 * This should never happen, there should always be a running in rcar_dmac_isr_transfer_end()
1530 * descriptor mode. Only update the running chunk pointer in in rcar_dmac_isr_transfer_end()
1531 * non-descriptor mode. in rcar_dmac_isr_transfer_end()
1533 if (!desc->hwdescs.use) { in rcar_dmac_isr_transfer_end()
1539 if (!list_is_last(&desc->running->node, &desc->chunks)) { in rcar_dmac_isr_transfer_end()
1540 desc->running = list_next_entry(desc->running, node); in rcar_dmac_isr_transfer_end()
1541 if (!desc->cyclic) in rcar_dmac_isr_transfer_end()
1550 if (desc->cyclic) { in rcar_dmac_isr_transfer_end()
1551 desc->running = in rcar_dmac_isr_transfer_end()
1552 list_first_entry(&desc->chunks, in rcar_dmac_isr_transfer_end()
1560 list_move_tail(&desc->node, &chan->desc.done); in rcar_dmac_isr_transfer_end()
1563 if (!list_empty(&chan->desc.active)) in rcar_dmac_isr_transfer_end()
1564 chan->desc.running = list_first_entry(&chan->desc.active, in rcar_dmac_isr_transfer_end()
1568 chan->desc.running = NULL; in rcar_dmac_isr_transfer_end()
1571 if (chan->desc.running) in rcar_dmac_isr_transfer_end()
1585 spin_lock(&chan->lock); in rcar_dmac_isr_channel()
1589 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); in rcar_dmac_isr_channel()
1615 spin_unlock(&chan->lock); in rcar_dmac_isr_channel()
1618 dev_err(chan->chan.device->dev, "Channel Address Error\n"); in rcar_dmac_isr_channel()
1633 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1636 if (chan->desc.running && chan->desc.running->cyclic) { in rcar_dmac_isr_channel_thread()
1637 desc = chan->desc.running; in rcar_dmac_isr_channel_thread()
1638 dmaengine_desc_get_callback(&desc->async_tx, &cb); in rcar_dmac_isr_channel_thread()
1641 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1643 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1651 while (!list_empty(&chan->desc.done)) { in rcar_dmac_isr_channel_thread()
1652 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, in rcar_dmac_isr_channel_thread()
1654 dma_cookie_complete(&desc->async_tx); in rcar_dmac_isr_channel_thread()
1655 list_del(&desc->node); in rcar_dmac_isr_channel_thread()
1657 dmaengine_desc_get_callback(&desc->async_tx, &cb); in rcar_dmac_isr_channel_thread()
1659 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1666 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1669 list_add_tail(&desc->node, &chan->desc.wait); in rcar_dmac_isr_channel_thread()
1672 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1680 /* -----------------------------------------------------------------------------
1686 struct rcar_dmac *dmac = to_rcar_dmac(chan->device); in rcar_dmac_chan_filter()
1696 if (chan->device->device_config != rcar_dmac_device_config) in rcar_dmac_chan_filter()
1699 return !test_and_set_bit(dma_spec->args[0], dmac->modules); in rcar_dmac_chan_filter()
1709 if (dma_spec->args_count != 1) in rcar_dmac_of_xlate()
1717 ofdma->of_node); in rcar_dmac_of_xlate()
1722 rchan->mid_rid = dma_spec->args[0]; in rcar_dmac_of_xlate()
1727 /* -----------------------------------------------------------------------------
1748 * - Wait for the current transfer to complete and stop the device,
1749 * - Resume transfers, if any.
1757 /* -----------------------------------------------------------------------------
1764 struct platform_device *pdev = to_platform_device(dmac->dev); in rcar_dmac_chan_probe()
1765 struct dma_chan *chan = &rchan->chan; in rcar_dmac_chan_probe()
1770 rchan->mid_rid = -EINVAL; in rcar_dmac_chan_probe()
1772 spin_lock_init(&rchan->lock); in rcar_dmac_chan_probe()
1774 INIT_LIST_HEAD(&rchan->desc.free); in rcar_dmac_chan_probe()
1775 INIT_LIST_HEAD(&rchan->desc.pending); in rcar_dmac_chan_probe()
1776 INIT_LIST_HEAD(&rchan->desc.active); in rcar_dmac_chan_probe()
1777 INIT_LIST_HEAD(&rchan->desc.done); in rcar_dmac_chan_probe()
1778 INIT_LIST_HEAD(&rchan->desc.wait); in rcar_dmac_chan_probe()
1781 sprintf(pdev_irqname, "ch%u", rchan->index); in rcar_dmac_chan_probe()
1782 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); in rcar_dmac_chan_probe()
1783 if (rchan->irq < 0) in rcar_dmac_chan_probe()
1784 return -ENODEV; in rcar_dmac_chan_probe()
1786 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", in rcar_dmac_chan_probe()
1787 dev_name(dmac->dev), rchan->index); in rcar_dmac_chan_probe()
1789 return -ENOMEM; in rcar_dmac_chan_probe()
1795 chan->device = &dmac->engine; in rcar_dmac_chan_probe()
1798 list_add_tail(&chan->device_node, &dmac->engine.channels); in rcar_dmac_chan_probe()
1800 ret = devm_request_threaded_irq(dmac->dev, rchan->irq, in rcar_dmac_chan_probe()
1805 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", in rcar_dmac_chan_probe()
1806 rchan->irq, ret); in rcar_dmac_chan_probe()
1817 struct device_node *np = dev->of_node; in rcar_dmac_parse_of()
1820 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); in rcar_dmac_parse_of()
1822 dev_err(dev, "unable to read dma-channels property\n"); in rcar_dmac_parse_of()
1827 if (dmac->n_channels <= 0 || in rcar_dmac_parse_of()
1828 dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { in rcar_dmac_parse_of()
1830 dmac->n_channels); in rcar_dmac_parse_of()
1831 return -EINVAL; in rcar_dmac_parse_of()
1835 * If the driver is unable to read dma-channel-mask property, in rcar_dmac_parse_of()
1838 dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); in rcar_dmac_parse_of()
1839 of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask); in rcar_dmac_parse_of()
1841 /* If the property has out-of-channel mask, this driver clears it */ in rcar_dmac_parse_of()
1842 dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0); in rcar_dmac_parse_of()
1861 data = of_device_get_match_data(&pdev->dev); in rcar_dmac_probe()
1863 return -EINVAL; in rcar_dmac_probe()
1865 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); in rcar_dmac_probe()
1867 return -ENOMEM; in rcar_dmac_probe()
1869 dmac->dev = &pdev->dev; in rcar_dmac_probe()
1871 dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); in rcar_dmac_probe()
1873 ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); in rcar_dmac_probe()
1877 ret = rcar_dmac_parse_of(&pdev->dev, dmac); in rcar_dmac_probe()
1889 if (device_iommu_mapped(&pdev->dev)) in rcar_dmac_probe()
1890 dmac->channels_mask &= ~BIT(0); in rcar_dmac_probe()
1892 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, in rcar_dmac_probe()
1893 sizeof(*dmac->channels), GFP_KERNEL); in rcar_dmac_probe()
1894 if (!dmac->channels) in rcar_dmac_probe()
1895 return -ENOMEM; in rcar_dmac_probe()
1898 dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0); in rcar_dmac_probe()
1899 if (IS_ERR(dmac->dmac_base)) in rcar_dmac_probe()
1900 return PTR_ERR(dmac->dmac_base); in rcar_dmac_probe()
1902 if (!data->chan_offset_base) { in rcar_dmac_probe()
1903 dmac->chan_base = devm_platform_ioremap_resource(pdev, 1); in rcar_dmac_probe()
1904 if (IS_ERR(dmac->chan_base)) in rcar_dmac_probe()
1905 return PTR_ERR(dmac->chan_base); in rcar_dmac_probe()
1907 chan_base = dmac->chan_base; in rcar_dmac_probe()
1909 chan_base = dmac->dmac_base + data->chan_offset_base; in rcar_dmac_probe()
1913 chan->index = i; in rcar_dmac_probe()
1914 chan->iomem = chan_base + i * data->chan_offset_stride; in rcar_dmac_probe()
1918 pm_runtime_enable(&pdev->dev); in rcar_dmac_probe()
1919 ret = pm_runtime_resume_and_get(&pdev->dev); in rcar_dmac_probe()
1921 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); in rcar_dmac_probe()
1926 pm_runtime_put(&pdev->dev); in rcar_dmac_probe()
1929 dev_err(&pdev->dev, "failed to reset device\n"); in rcar_dmac_probe()
1934 engine = &dmac->engine; in rcar_dmac_probe()
1936 dma_cap_set(DMA_MEMCPY, engine->cap_mask); in rcar_dmac_probe()
1937 dma_cap_set(DMA_SLAVE, engine->cap_mask); in rcar_dmac_probe()
1939 engine->dev = &pdev->dev; in rcar_dmac_probe()
1940 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); in rcar_dmac_probe()
1942 engine->src_addr_widths = widths; in rcar_dmac_probe()
1943 engine->dst_addr_widths = widths; in rcar_dmac_probe()
1944 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); in rcar_dmac_probe()
1945 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in rcar_dmac_probe()
1947 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; in rcar_dmac_probe()
1948 engine->device_free_chan_resources = rcar_dmac_free_chan_resources; in rcar_dmac_probe()
1949 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; in rcar_dmac_probe()
1950 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; in rcar_dmac_probe()
1951 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; in rcar_dmac_probe()
1952 engine->device_config = rcar_dmac_device_config; in rcar_dmac_probe()
1953 engine->device_pause = rcar_dmac_chan_pause; in rcar_dmac_probe()
1954 engine->device_terminate_all = rcar_dmac_chan_terminate_all; in rcar_dmac_probe()
1955 engine->device_tx_status = rcar_dmac_tx_status; in rcar_dmac_probe()
1956 engine->device_issue_pending = rcar_dmac_issue_pending; in rcar_dmac_probe()
1957 engine->device_synchronize = rcar_dmac_device_synchronize; in rcar_dmac_probe()
1959 INIT_LIST_HEAD(&engine->channels); in rcar_dmac_probe()
1968 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, in rcar_dmac_probe()
1976 * Default transfer size of 32 bytes requires 32-byte alignment. in rcar_dmac_probe()
1985 of_dma_controller_free(pdev->dev.of_node); in rcar_dmac_probe()
1987 pm_runtime_disable(&pdev->dev); in rcar_dmac_probe()
1995 of_dma_controller_free(pdev->dev.of_node); in rcar_dmac_remove()
1996 dma_async_device_unregister(&dmac->engine); in rcar_dmac_remove()
1998 pm_runtime_disable(&pdev->dev); in rcar_dmac_remove()
2020 .compatible = "renesas,rcar-dmac",
2023 .compatible = "renesas,rcar-gen4-dmac",
2026 .compatible = "renesas,dmac-r8a779a0",
2036 .name = "rcar-dmac",
2046 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");