Lines Matching +full:control +full:- +full:channel
1 // SPDX-License-Identifier: GPL-2.0-or-later
8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
14 * This driver is based on dw_dmac and amba-pl08x drivers.
20 #include <linux/dma-mapping.h>
139 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
146 * @node: link used for putting this into a channel queue
164 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
165 * @chan: dmaengine API channel
168 * @dma_cfg: channel number, direction
169 * @irq: interrupt number of the channel
170 * @clk: clock used by this channel
171 * @tasklet: channel specific tasklet used for callbacks
173 * @flags: flags for the channel
181 * @runtime_ctrl: M2M runtime values for the control register.
204 /* Channel is configured for cyclic transfers */
217 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
220 * @hw_setup: method which sets the channel up for operation
221 * @hw_synchronize: synchronizes DMA channel termination to current context
222 * @hw_shutdown: shuts the channel down and flushes whatever is left
230 * different on M2M and M2P channels. These methods are called with channel
256 return &edmac->chan.dev->device; in chan2dev()
266 if (device_is_compatible(chan->device->dev, "cirrus,ep9301-dma-m2p")) in ep93xx_dma_chan_is_m2p()
269 return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); in ep93xx_dma_chan_is_m2p()
273 * ep93xx_dma_chan_direction - returns direction the channel can be used
276 * channel supports given DMA direction. Only M2P channels have such
286 return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; in ep93xx_dma_chan_direction()
290 * ep93xx_dma_set_active - set new active descriptor chain
291 * @edmac: channel
298 * Called with @edmac->lock held and interrupts disabled.
303 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_set_active()
305 list_add_tail(&desc->node, &edmac->active); in ep93xx_dma_set_active()
307 /* Flatten the @desc->tx_list chain into @edmac->active list */ in ep93xx_dma_set_active()
308 while (!list_empty(&desc->tx_list)) { in ep93xx_dma_set_active()
309 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, in ep93xx_dma_set_active()
318 d->txd.callback = desc->txd.callback; in ep93xx_dma_set_active()
319 d->txd.callback_param = desc->txd.callback_param; in ep93xx_dma_set_active()
321 list_move_tail(&d->node, &edmac->active); in ep93xx_dma_set_active()
325 /* Called with @edmac->lock held and interrupts disabled */
329 return list_first_entry_or_null(&edmac->active, in ep93xx_dma_get_active()
334 * ep93xx_dma_advance_active - advances to the next active descriptor
335 * @edmac: channel
337 * Function advances active descriptor to the next in the @edmac->active and
341 * When the channel is in cyclic mode always returns %true.
343 * Called with @edmac->lock held and interrupts disabled.
349 list_rotate_left(&edmac->active); in ep93xx_dma_advance_active()
351 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_advance_active()
362 return !desc->txd.cookie; in ep93xx_dma_advance_active()
369 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) in m2p_set_control() argument
371 writel(control, edmac->regs + M2P_CONTROL); in m2p_set_control()
374 * write to the control register. in m2p_set_control()
376 readl(edmac->regs + M2P_CONTROL); in m2p_set_control()
381 u32 control; in m2p_hw_setup() local
383 writel(edmac->dma_cfg.port & 0xf, edmac->regs + M2P_PPALLOC); in m2p_hw_setup()
385 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE in m2p_hw_setup()
387 m2p_set_control(edmac, control); in m2p_hw_setup()
389 edmac->buffer = 0; in m2p_hw_setup()
396 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; in m2p_channel_state()
402 u32 control; in m2p_hw_synchronize() local
404 spin_lock_irqsave(&edmac->lock, flags); in m2p_hw_synchronize()
405 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_synchronize()
406 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); in m2p_hw_synchronize()
407 m2p_set_control(edmac, control); in m2p_hw_synchronize()
408 spin_unlock_irqrestore(&edmac->lock, flags); in m2p_hw_synchronize()
433 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) in m2p_fill_desc()
434 bus_addr = desc->src_addr; in m2p_fill_desc()
436 bus_addr = desc->dst_addr; in m2p_fill_desc()
438 if (edmac->buffer == 0) { in m2p_fill_desc()
439 writel(desc->size, edmac->regs + M2P_MAXCNT0); in m2p_fill_desc()
440 writel(bus_addr, edmac->regs + M2P_BASE0); in m2p_fill_desc()
442 writel(desc->size, edmac->regs + M2P_MAXCNT1); in m2p_fill_desc()
443 writel(bus_addr, edmac->regs + M2P_BASE1); in m2p_fill_desc()
446 edmac->buffer ^= 1; in m2p_fill_desc()
451 u32 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_submit() local
454 control |= M2P_CONTROL_STALLINT; in m2p_hw_submit()
458 control |= M2P_CONTROL_NFBINT; in m2p_hw_submit()
461 m2p_set_control(edmac, control); in m2p_hw_submit()
466 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
467 u32 control; in m2p_hw_interrupt() local
473 writel(1, edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
489 desc->txd.cookie, desc->src_addr, desc->dst_addr, in m2p_hw_interrupt()
490 desc->size); in m2p_hw_interrupt()
507 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_interrupt()
508 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); in m2p_hw_interrupt()
509 m2p_set_control(edmac, control); in m2p_hw_interrupt()
520 u32 control = 0; in m2m_hw_setup() local
522 if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { in m2m_hw_setup()
523 /* This is memcpy channel, nothing to configure */ in m2m_hw_setup()
524 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
528 switch (edmac->dma_cfg.port) { in m2m_hw_setup()
531 * This was found via experimenting - anything less than 5 in m2m_hw_setup()
532 * causes the channel to perform only a partial transfer which in m2m_hw_setup()
535 control = (5 << M2M_CONTROL_PWSC_SHIFT); in m2m_hw_setup()
536 control |= M2M_CONTROL_NO_HDSK; in m2m_hw_setup()
538 if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) { in m2m_hw_setup()
539 control |= M2M_CONTROL_DAH; in m2m_hw_setup()
540 control |= M2M_CONTROL_TM_TX; in m2m_hw_setup()
541 control |= M2M_CONTROL_RSS_SSPTX; in m2m_hw_setup()
543 control |= M2M_CONTROL_SAH; in m2m_hw_setup()
544 control |= M2M_CONTROL_TM_RX; in m2m_hw_setup()
545 control |= M2M_CONTROL_RSS_SSPRX; in m2m_hw_setup()
554 if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) { in m2m_hw_setup()
556 control = (3 << M2M_CONTROL_PWSC_SHIFT); in m2m_hw_setup()
557 control |= M2M_CONTROL_DAH; in m2m_hw_setup()
558 control |= M2M_CONTROL_TM_TX; in m2m_hw_setup()
560 control = (2 << M2M_CONTROL_PWSC_SHIFT); in m2m_hw_setup()
561 control |= M2M_CONTROL_SAH; in m2m_hw_setup()
562 control |= M2M_CONTROL_TM_RX; in m2m_hw_setup()
565 control |= M2M_CONTROL_NO_HDSK; in m2m_hw_setup()
566 control |= M2M_CONTROL_RSS_IDE; in m2m_hw_setup()
567 control |= M2M_CONTROL_PW_16; in m2m_hw_setup()
571 return -EINVAL; in m2m_hw_setup()
574 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
580 /* Just disable the channel */ in m2m_hw_shutdown()
581 writel(0, edmac->regs + M2M_CONTROL); in m2m_hw_shutdown()
594 if (edmac->buffer == 0) { in m2m_fill_desc()
595 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); in m2m_fill_desc()
596 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); in m2m_fill_desc()
597 writel(desc->size, edmac->regs + M2M_BCR0); in m2m_fill_desc()
599 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); in m2m_fill_desc()
600 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); in m2m_fill_desc()
601 writel(desc->size, edmac->regs + M2M_BCR1); in m2m_fill_desc()
604 edmac->buffer ^= 1; in m2m_fill_desc()
609 u32 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_submit() local
616 control &= ~M2M_CONTROL_PW_MASK; in m2m_hw_submit()
617 control |= edmac->runtime_ctrl; in m2m_hw_submit()
620 control |= M2M_CONTROL_DONEINT; in m2m_hw_submit()
624 control |= M2M_CONTROL_NFBINT; in m2m_hw_submit()
628 * Now we can finally enable the channel. For M2M channel this must be in m2m_hw_submit()
631 control |= M2M_CONTROL_ENABLE; in m2m_hw_submit()
632 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
634 if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { in m2m_hw_submit()
639 control |= M2M_CONTROL_START; in m2m_hw_submit()
640 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
647 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
648 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
649 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
650 * In effect, disabling the channel when only DONE bit is set could stop
652 * Control FSM to check current state of DMA channel.
656 u32 status = readl(edmac->regs + M2M_STATUS); in m2m_hw_interrupt()
661 u32 control; in m2m_hw_interrupt() local
665 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) in m2m_hw_interrupt()
670 writel(0, edmac->regs + M2M_INTERRUPT); in m2m_hw_interrupt()
675 * with DMA channel state, determines action to take in interrupt. in m2m_hw_interrupt()
678 last_done = !desc || desc->txd.cookie; in m2m_hw_interrupt()
681 * Use M2M DMA Buffer FSM and Control FSM to check current state of in m2m_hw_interrupt()
682 * DMA channel. Using DONE and NFB bits from channel status register in m2m_hw_interrupt()
683 * or bits from channel interrupt register is not reliable. in m2m_hw_interrupt()
691 * disabling the channel or polling the DONE bit. in m2m_hw_interrupt()
696 if (done && edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { in m2m_hw_interrupt()
697 /* Software trigger for memcpy channel */ in m2m_hw_interrupt()
698 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
699 control |= M2M_CONTROL_START; in m2m_hw_interrupt()
700 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
709 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state in m2m_hw_interrupt()
710 * and Control FSM is in DMA_STALL state. in m2m_hw_interrupt()
715 /* Disable interrupts and the channel */ in m2m_hw_interrupt()
716 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
717 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT in m2m_hw_interrupt()
719 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
740 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_get()
741 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { in ep93xx_dma_desc_get()
742 if (async_tx_test_ack(&desc->txd)) { in ep93xx_dma_desc_get()
743 list_del_init(&desc->node); in ep93xx_dma_desc_get()
745 /* Re-initialize the descriptor */ in ep93xx_dma_desc_get()
746 desc->src_addr = 0; in ep93xx_dma_desc_get()
747 desc->dst_addr = 0; in ep93xx_dma_desc_get()
748 desc->size = 0; in ep93xx_dma_desc_get()
749 desc->complete = false; in ep93xx_dma_desc_get()
750 desc->txd.cookie = 0; in ep93xx_dma_desc_get()
751 desc->txd.callback = NULL; in ep93xx_dma_desc_get()
752 desc->txd.callback_param = NULL; in ep93xx_dma_desc_get()
758 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_get()
768 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_put()
769 list_splice_init(&desc->tx_list, &edmac->free_list); in ep93xx_dma_desc_put()
770 list_add(&desc->node, &edmac->free_list); in ep93xx_dma_desc_put()
771 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_put()
776 * ep93xx_dma_advance_work - start processing the next pending transaction
777 * @edmac: channel
780 * function takes the next queued transaction from the @edmac->queue and
788 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_advance_work()
789 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { in ep93xx_dma_advance_work()
790 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
795 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); in ep93xx_dma_advance_work()
796 list_del_init(&new->node); in ep93xx_dma_advance_work()
801 edmac->edma->hw_submit(edmac); in ep93xx_dma_advance_work()
802 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
813 spin_lock_irq(&edmac->lock); in ep93xx_dma_tasklet()
821 if (desc->complete) { in ep93xx_dma_tasklet()
823 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_tasklet()
824 dma_cookie_complete(&desc->txd); in ep93xx_dma_tasklet()
825 list_splice_init(&edmac->active, &list); in ep93xx_dma_tasklet()
827 dmaengine_desc_get_callback(&desc->txd, &cb); in ep93xx_dma_tasklet()
829 spin_unlock_irq(&edmac->lock); in ep93xx_dma_tasklet()
836 dma_descriptor_unmap(&desc->txd); in ep93xx_dma_tasklet()
849 spin_lock(&edmac->lock); in ep93xx_dma_interrupt()
855 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
859 switch (edmac->edma->hw_interrupt(edmac)) { in ep93xx_dma_interrupt()
861 desc->complete = true; in ep93xx_dma_interrupt()
862 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
866 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_interrupt()
867 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
876 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
881 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
890 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); in ep93xx_dma_tx_submit()
895 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_tx_submit()
905 if (list_empty(&edmac->active)) { in ep93xx_dma_tx_submit()
907 edmac->edma->hw_submit(edmac); in ep93xx_dma_tx_submit()
909 list_add_tail(&desc->node, &edmac->queue); in ep93xx_dma_tx_submit()
912 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_tx_submit()
917 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
918 * @chan: channel to allocate resources
920 * Function allocates necessary resources for the given DMA channel and
921 * returns number of allocated descriptors for the channel. Negative errno
930 /* Sanity check the channel parameters */ in ep93xx_dma_alloc_chan_resources()
931 if (!edmac->edma->m2m) { in ep93xx_dma_alloc_chan_resources()
932 if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 || in ep93xx_dma_alloc_chan_resources()
933 edmac->dma_cfg.port > EP93XX_DMA_IRDA) in ep93xx_dma_alloc_chan_resources()
934 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
935 if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan)) in ep93xx_dma_alloc_chan_resources()
936 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
938 if (edmac->dma_cfg.dir != DMA_MEM_TO_MEM) { in ep93xx_dma_alloc_chan_resources()
939 switch (edmac->dma_cfg.port) { in ep93xx_dma_alloc_chan_resources()
942 if (!is_slave_direction(edmac->dma_cfg.dir)) in ep93xx_dma_alloc_chan_resources()
943 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
946 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
951 ret = clk_prepare_enable(edmac->clk); in ep93xx_dma_alloc_chan_resources()
955 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); in ep93xx_dma_alloc_chan_resources()
959 spin_lock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
960 dma_cookie_init(&edmac->chan); in ep93xx_dma_alloc_chan_resources()
961 ret = edmac->edma->hw_setup(edmac); in ep93xx_dma_alloc_chan_resources()
962 spin_unlock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
976 INIT_LIST_HEAD(&desc->tx_list); in ep93xx_dma_alloc_chan_resources()
978 dma_async_tx_descriptor_init(&desc->txd, chan); in ep93xx_dma_alloc_chan_resources()
979 desc->txd.flags = DMA_CTRL_ACK; in ep93xx_dma_alloc_chan_resources()
980 desc->txd.tx_submit = ep93xx_dma_tx_submit; in ep93xx_dma_alloc_chan_resources()
988 free_irq(edmac->irq, edmac); in ep93xx_dma_alloc_chan_resources()
990 clk_disable_unprepare(edmac->clk); in ep93xx_dma_alloc_chan_resources()
996 * ep93xx_dma_free_chan_resources - release resources for the channel
997 * @chan: channel
999 * Function releases all the resources allocated for the given channel.
1000 * The channel must be idle when this is called.
1009 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_free_chan_resources()
1010 BUG_ON(!list_empty(&edmac->queue)); in ep93xx_dma_free_chan_resources()
1012 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
1013 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_free_chan_resources()
1014 edmac->runtime_addr = 0; in ep93xx_dma_free_chan_resources()
1015 edmac->runtime_ctrl = 0; in ep93xx_dma_free_chan_resources()
1016 edmac->buffer = 0; in ep93xx_dma_free_chan_resources()
1017 list_splice_init(&edmac->free_list, &list); in ep93xx_dma_free_chan_resources()
1018 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
1023 clk_disable_unprepare(edmac->clk); in ep93xx_dma_free_chan_resources()
1024 free_irq(edmac->irq, edmac); in ep93xx_dma_free_chan_resources()
1028 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
1029 * @chan: channel
1053 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); in ep93xx_dma_prep_dma_memcpy()
1055 desc->src_addr = src + offset; in ep93xx_dma_prep_dma_memcpy()
1056 desc->dst_addr = dest + offset; in ep93xx_dma_prep_dma_memcpy()
1057 desc->size = bytes; in ep93xx_dma_prep_dma_memcpy()
1062 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_memcpy()
1065 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_memcpy()
1066 first->txd.flags = flags; in ep93xx_dma_prep_dma_memcpy()
1068 return &first->txd; in ep93xx_dma_prep_dma_memcpy()
1075 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1076 * @chan: channel
1095 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_slave_sg()
1097 "channel was configured with different direction\n"); in ep93xx_dma_prep_slave_sg()
1101 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_slave_sg()
1103 "channel is already used for cyclic transfers\n"); in ep93xx_dma_prep_slave_sg()
1107 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_slave_sg()
1126 desc->src_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1127 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1129 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1130 desc->dst_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1132 desc->size = len; in ep93xx_dma_prep_slave_sg()
1137 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_slave_sg()
1140 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_slave_sg()
1141 first->txd.flags = flags; in ep93xx_dma_prep_slave_sg()
1143 return &first->txd; in ep93xx_dma_prep_slave_sg()
1151 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1152 * @chan: channel
1163 * channel.
1176 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_dma_cyclic()
1178 "channel was configured with different direction\n"); in ep93xx_dma_prep_dma_cyclic()
1182 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_dma_cyclic()
1184 "channel is already used for cyclic transfers\n"); in ep93xx_dma_prep_dma_cyclic()
1194 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_dma_cyclic()
1206 desc->src_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1207 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1209 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1210 desc->dst_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1213 desc->size = period_len; in ep93xx_dma_prep_dma_cyclic()
1218 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_cyclic()
1221 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_cyclic()
1223 return &first->txd; in ep93xx_dma_prep_dma_cyclic()
1231 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1233 * @chan: channel
1235 * Synchronizes the DMA channel termination to the current context. When this
1246 if (edmac->edma->hw_synchronize) in ep93xx_dma_synchronize()
1247 edmac->edma->hw_synchronize(edmac); in ep93xx_dma_synchronize()
1251 * ep93xx_dma_terminate_all - terminate all transactions
1252 * @chan: channel
1255 * @edmac->free_list and callbacks are _not_ called.
1264 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1265 /* First we disable and flush the DMA channel */ in ep93xx_dma_terminate_all()
1266 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_terminate_all()
1267 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); in ep93xx_dma_terminate_all()
1268 list_splice_init(&edmac->active, &list); in ep93xx_dma_terminate_all()
1269 list_splice_init(&edmac->queue, &list); in ep93xx_dma_terminate_all()
1271 * We then re-enable the channel. This way we can continue submitting in ep93xx_dma_terminate_all()
1272 * the descriptors by just calling ->hw_submit() again. in ep93xx_dma_terminate_all()
1274 edmac->edma->hw_setup(edmac); in ep93xx_dma_terminate_all()
1275 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1288 memcpy(&edmac->slave_config, config, sizeof(*config)); in ep93xx_dma_slave_config()
1302 if (!edmac->edma->m2m) in ep93xx_dma_slave_config_write()
1303 return -EINVAL; in ep93xx_dma_slave_config_write()
1307 width = config->src_addr_width; in ep93xx_dma_slave_config_write()
1308 addr = config->src_addr; in ep93xx_dma_slave_config_write()
1312 width = config->dst_addr_width; in ep93xx_dma_slave_config_write()
1313 addr = config->dst_addr; in ep93xx_dma_slave_config_write()
1317 return -EINVAL; in ep93xx_dma_slave_config_write()
1331 return -EINVAL; in ep93xx_dma_slave_config_write()
1334 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1335 edmac->runtime_addr = addr; in ep93xx_dma_slave_config_write()
1336 edmac->runtime_ctrl = ctrl; in ep93xx_dma_slave_config_write()
1337 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1343 * ep93xx_dma_tx_status - check if a transaction is completed
1344 * @chan: channel
1358 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1359 * @chan: channel
1372 struct device *dev = &pdev->dev; in ep93xx_dma_of_probe()
1380 return ERR_PTR(dev_err_probe(dev, -ENODEV, "No device match found\n")); in ep93xx_dma_of_probe()
1382 edma = devm_kzalloc(dev, struct_size(edma, channels, data->num_channels), in ep93xx_dma_of_probe()
1385 return ERR_PTR(-ENOMEM); in ep93xx_dma_of_probe()
1387 edma->m2m = data->id; in ep93xx_dma_of_probe()
1388 edma->num_channels = data->num_channels; in ep93xx_dma_of_probe()
1389 dma_dev = &edma->dma_dev; in ep93xx_dma_of_probe()
1391 INIT_LIST_HEAD(&dma_dev->channels); in ep93xx_dma_of_probe()
1392 for (i = 0; i < edma->num_channels; i++) { in ep93xx_dma_of_probe()
1393 struct ep93xx_dma_chan *edmac = &edma->channels[i]; in ep93xx_dma_of_probe()
1396 edmac->chan.device = dma_dev; in ep93xx_dma_of_probe()
1397 edmac->regs = devm_platform_ioremap_resource(pdev, i); in ep93xx_dma_of_probe()
1398 if (IS_ERR(edmac->regs)) in ep93xx_dma_of_probe()
1399 return ERR_CAST(edmac->regs); in ep93xx_dma_of_probe()
1401 edmac->irq = fwnode_irq_get(dev_fwnode(dev), i); in ep93xx_dma_of_probe()
1402 if (edmac->irq < 0) in ep93xx_dma_of_probe()
1403 return ERR_PTR(edmac->irq); in ep93xx_dma_of_probe()
1405 edmac->edma = edma; in ep93xx_dma_of_probe()
1407 if (edma->m2m) in ep93xx_dma_of_probe()
1412 return ERR_PTR(-ENOBUFS); in ep93xx_dma_of_probe()
1414 edmac->clk = devm_clk_get(dev, dma_clk_name); in ep93xx_dma_of_probe()
1415 if (IS_ERR(edmac->clk)) { in ep93xx_dma_of_probe()
1416 dev_err_probe(dev, PTR_ERR(edmac->clk), in ep93xx_dma_of_probe()
1418 return ERR_CAST(edmac->clk); in ep93xx_dma_of_probe()
1421 spin_lock_init(&edmac->lock); in ep93xx_dma_of_probe()
1422 INIT_LIST_HEAD(&edmac->active); in ep93xx_dma_of_probe()
1423 INIT_LIST_HEAD(&edmac->queue); in ep93xx_dma_of_probe()
1424 INIT_LIST_HEAD(&edmac->free_list); in ep93xx_dma_of_probe()
1425 tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet); in ep93xx_dma_of_probe()
1427 list_add_tail(&edmac->chan.device_node, in ep93xx_dma_of_probe()
1428 &dma_dev->channels); in ep93xx_dma_of_probe()
1439 if (cfg->dir != ep93xx_dma_chan_direction(chan)) in ep93xx_m2p_dma_filter()
1442 echan->dma_cfg = *cfg; in ep93xx_m2p_dma_filter()
1449 struct ep93xx_dma_engine *edma = ofdma->of_dma_data; in ep93xx_m2p_dma_of_xlate()
1450 dma_cap_mask_t mask = edma->dma_dev.cap_mask; in ep93xx_m2p_dma_of_xlate()
1452 u8 port = dma_spec->args[0]; in ep93xx_m2p_dma_of_xlate()
1453 u8 direction = dma_spec->args[1]; in ep93xx_m2p_dma_of_xlate()
1464 return __dma_request_channel(&mask, ep93xx_m2p_dma_filter, &dma_cfg, ofdma->of_node); in ep93xx_m2p_dma_of_xlate()
1472 echan->dma_cfg = *cfg; in ep93xx_m2m_dma_filter()
1480 struct ep93xx_dma_engine *edma = ofdma->of_dma_data; in ep93xx_m2m_dma_of_xlate()
1481 dma_cap_mask_t mask = edma->dma_dev.cap_mask; in ep93xx_m2m_dma_of_xlate()
1483 u8 port = dma_spec->args[0]; in ep93xx_m2m_dma_of_xlate()
1484 u8 direction = dma_spec->args[1]; in ep93xx_m2m_dma_of_xlate()
1500 return __dma_request_channel(&mask, ep93xx_m2m_dma_filter, &dma_cfg, ofdma->of_node); in ep93xx_m2m_dma_of_xlate()
1513 dma_dev = &edma->dma_dev; in ep93xx_dma_probe()
1515 dma_cap_zero(dma_dev->cap_mask); in ep93xx_dma_probe()
1516 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); in ep93xx_dma_probe()
1517 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); in ep93xx_dma_probe()
1519 dma_dev->dev = &pdev->dev; in ep93xx_dma_probe()
1520 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; in ep93xx_dma_probe()
1521 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; in ep93xx_dma_probe()
1522 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; in ep93xx_dma_probe()
1523 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; in ep93xx_dma_probe()
1524 dma_dev->device_config = ep93xx_dma_slave_config; in ep93xx_dma_probe()
1525 dma_dev->device_synchronize = ep93xx_dma_synchronize; in ep93xx_dma_probe()
1526 dma_dev->device_terminate_all = ep93xx_dma_terminate_all; in ep93xx_dma_probe()
1527 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; in ep93xx_dma_probe()
1528 dma_dev->device_tx_status = ep93xx_dma_tx_status; in ep93xx_dma_probe()
1530 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); in ep93xx_dma_probe()
1532 if (edma->m2m) { in ep93xx_dma_probe()
1533 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in ep93xx_dma_probe()
1534 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; in ep93xx_dma_probe()
1536 edma->hw_setup = m2m_hw_setup; in ep93xx_dma_probe()
1537 edma->hw_shutdown = m2m_hw_shutdown; in ep93xx_dma_probe()
1538 edma->hw_submit = m2m_hw_submit; in ep93xx_dma_probe()
1539 edma->hw_interrupt = m2m_hw_interrupt; in ep93xx_dma_probe()
1541 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); in ep93xx_dma_probe()
1543 edma->hw_synchronize = m2p_hw_synchronize; in ep93xx_dma_probe()
1544 edma->hw_setup = m2p_hw_setup; in ep93xx_dma_probe()
1545 edma->hw_shutdown = m2p_hw_shutdown; in ep93xx_dma_probe()
1546 edma->hw_submit = m2p_hw_submit; in ep93xx_dma_probe()
1547 edma->hw_interrupt = m2p_hw_interrupt; in ep93xx_dma_probe()
1554 if (edma->m2m) { in ep93xx_dma_probe()
1555 ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2m_dma_of_xlate, in ep93xx_dma_probe()
1558 ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2p_dma_of_xlate, in ep93xx_dma_probe()
1564 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", edma->m2m ? "M" : "P"); in ep93xx_dma_probe()
1585 { .compatible = "cirrus,ep9301-dma-m2p", .data = &edma_m2p },
1586 { .compatible = "cirrus,ep9301-dma-m2m", .data = &edma_m2m },
1592 { "ep93xx-dma-m2p", 0 },
1593 { "ep93xx-dma-m2m", 1 },
1599 .name = "ep93xx-dma",