Lines Matching +full:memcpy +full:- +full:channels

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Ericsson AB 2007-2008
4 * Copyright (C) ST-Ericsson SA 2008-2010
5 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
6 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
9 #include <linux/dma-mapping.h>
32 * struct stedma40_platform_data - Configuration struct for the dma device.
34 * @disabled_channels: A vector, ending with -1, that marks physical channels
36 * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
40 * @num_of_soft_lli_chans: The number of channels that needs to be configured
43 * @num_of_memcpy_chans: The number of channels reserved for memcpy.
44 * @num_of_phy_chans: The number of physical channels implemented in HW.
45 * 0 means reading the number of channels from DMA HW but this is only valid
46 * for 'multiple of 4' channels, like 8.
59 #define D40_PHY_CHAN -1
78 /* Max number of logical channels per physical channel */
91 /* Reserved event lines for memcpy only. */
108 /* Default configuration for physical memcpy */
122 /* Default configuration for logical memcpy */
137 * enum d40_command - The different commands and/or statuses.
152 * enum d40_events - The different Event Enables for the event lines.
154 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
256 * struct d40_interrupt_lookup - lookup table for interrupt handler
301 * struct d40_reg_val - simple lookup struct
315 /* Interrupts on all logical channels */
333 /* Interrupts on all logical channels */
352 * struct d40_lli_pool - Structure for keeping LLIs in memory
371 * struct d40_desc - A descriptor is one DMA job.
376 * @lli_log: Same as above but for logical channels.
377 * @lli_pool: The pool with two entries pre-allocated.
408 * struct d40_lcla_pool - LCLA pool settings and data.
413 * This pointer is only there for clean-up on error.
414 * @pages: The number of pages needed for all physical channels.
415 * Only used later for clean-up on error
429 * struct d40_phy_res - struct for handling eventlines mapped to physical
430 * channels.
454 * struct d40_chan - Struct that describes a channel.
514 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
546 * struct d40_base - The big global struct, one for each probe'd instance.
549 * @execmd_lock: Lock for execute command usage since several channels share
556 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
558 * @num_phy_chans: The number of physical channels. Read from HW. This
559 * is the number of available channels for this driver, not counting "Secure
560 * mode" allocated physical channels.
561 * @num_log_chans: The number of logical channels. Calculated from
563 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
564 * @dma_slave: dma_device channels that can do only do slave transfers.
565 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
566 * @phy_chans: Room for all possible physical channels in system.
567 * @log_chans: Room for all possible logical channels in system.
575 * @phy_res: Vector containing all physical channels.
587 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
611 /* Physical half channels */
628 return &d40c->chan.dev->device; in chan2dev()
633 return chan->log_num == D40_PHY_CHAN; in chan_is_physical()
643 return chan->base->virtbase + D40_DREG_PCBASE + in chan_base()
644 chan->phy_chan->num * D40_DREG_PCDELTA; in chan_base()
670 base = d40d->lli_pool.pre_alloc_lli; in d40_pool_lli_alloc()
671 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); in d40_pool_lli_alloc()
672 d40d->lli_pool.base = NULL; in d40_pool_lli_alloc()
674 d40d->lli_pool.size = lli_len * 2 * align; in d40_pool_lli_alloc()
676 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); in d40_pool_lli_alloc()
677 d40d->lli_pool.base = base; in d40_pool_lli_alloc()
679 if (d40d->lli_pool.base == NULL) in d40_pool_lli_alloc()
680 return -ENOMEM; in d40_pool_lli_alloc()
684 d40d->lli_log.src = PTR_ALIGN(base, align); in d40_pool_lli_alloc()
685 d40d->lli_log.dst = d40d->lli_log.src + lli_len; in d40_pool_lli_alloc()
687 d40d->lli_pool.dma_addr = 0; in d40_pool_lli_alloc()
689 d40d->lli_phy.src = PTR_ALIGN(base, align); in d40_pool_lli_alloc()
690 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; in d40_pool_lli_alloc()
692 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, in d40_pool_lli_alloc()
693 d40d->lli_phy.src, in d40_pool_lli_alloc()
694 d40d->lli_pool.size, in d40_pool_lli_alloc()
697 if (dma_mapping_error(d40c->base->dev, in d40_pool_lli_alloc()
698 d40d->lli_pool.dma_addr)) { in d40_pool_lli_alloc()
699 kfree(d40d->lli_pool.base); in d40_pool_lli_alloc()
700 d40d->lli_pool.base = NULL; in d40_pool_lli_alloc()
701 d40d->lli_pool.dma_addr = 0; in d40_pool_lli_alloc()
702 return -ENOMEM; in d40_pool_lli_alloc()
711 if (d40d->lli_pool.dma_addr) in d40_pool_lli_free()
712 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, in d40_pool_lli_free()
713 d40d->lli_pool.size, DMA_TO_DEVICE); in d40_pool_lli_free()
715 kfree(d40d->lli_pool.base); in d40_pool_lli_free()
716 d40d->lli_pool.base = NULL; in d40_pool_lli_free()
717 d40d->lli_pool.size = 0; in d40_pool_lli_free()
718 d40d->lli_log.src = NULL; in d40_pool_lli_free()
719 d40d->lli_log.dst = NULL; in d40_pool_lli_free()
720 d40d->lli_phy.src = NULL; in d40_pool_lli_free()
721 d40d->lli_phy.dst = NULL; in d40_pool_lli_free()
729 int ret = -EINVAL; in d40_lcla_alloc_one()
731 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); in d40_lcla_alloc_one()
738 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; in d40_lcla_alloc_one()
740 if (!d40c->base->lcla_pool.alloc_map[idx]) { in d40_lcla_alloc_one()
741 d40c->base->lcla_pool.alloc_map[idx] = d40d; in d40_lcla_alloc_one()
742 d40d->lcla_alloc++; in d40_lcla_alloc_one()
748 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); in d40_lcla_alloc_one()
758 int ret = -EINVAL; in d40_lcla_free_all()
763 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); in d40_lcla_free_all()
766 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; in d40_lcla_free_all()
768 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { in d40_lcla_free_all()
769 d40c->base->lcla_pool.alloc_map[idx] = NULL; in d40_lcla_free_all()
770 d40d->lcla_alloc--; in d40_lcla_free_all()
771 if (d40d->lcla_alloc == 0) { in d40_lcla_free_all()
778 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); in d40_lcla_free_all()
786 list_del(&d40d->node); in d40_desc_remove()
793 if (!list_empty(&d40c->client)) { in d40_desc_get()
797 list_for_each_entry_safe(d, _d, &d40c->client, node) { in d40_desc_get()
798 if (async_tx_test_ack(&d->txd)) { in d40_desc_get()
808 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); in d40_desc_get()
811 INIT_LIST_HEAD(&desc->node); in d40_desc_get()
821 kmem_cache_free(d40c->base->desc_slab, d40d); in d40_desc_free()
826 list_add_tail(&desc->node, &d40c->active); in d40_desc_submit()
831 struct d40_phy_lli *lli_dst = desc->lli_phy.dst; in d40_phy_lli_load()
832 struct d40_phy_lli *lli_src = desc->lli_phy.src; in d40_phy_lli_load()
835 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); in d40_phy_lli_load()
836 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); in d40_phy_lli_load()
837 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); in d40_phy_lli_load()
838 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); in d40_phy_lli_load()
840 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); in d40_phy_lli_load()
841 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); in d40_phy_lli_load()
842 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); in d40_phy_lli_load()
843 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); in d40_phy_lli_load()
848 list_add_tail(&desc->node, &d40c->done); in d40_desc_done()
853 struct d40_lcla_pool *pool = &chan->base->lcla_pool; in d40_log_lli_to_lcxa()
854 struct d40_log_lli_bidir *lli = &desc->lli_log; in d40_log_lli_to_lcxa()
855 int lli_current = desc->lli_current; in d40_log_lli_to_lcxa()
856 int lli_len = desc->lli_len; in d40_log_lli_to_lcxa()
857 bool cyclic = desc->cyclic; in d40_log_lli_to_lcxa()
858 int curr_lcla = -EINVAL; in d40_log_lli_to_lcxa()
860 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; in d40_log_lli_to_lcxa()
873 if (linkback || (lli_len - lli_current > 1)) { in d40_log_lli_to_lcxa()
880 if (!(chan->phy_chan->use_soft_lli && in d40_log_lli_to_lcxa()
881 chan->dma_cfg.dir == DMA_DEV_TO_MEM)) in d40_log_lli_to_lcxa()
893 if (!linkback || curr_lcla == -EINVAL) { in d40_log_lli_to_lcxa()
896 if (curr_lcla == -EINVAL) in d40_log_lli_to_lcxa()
899 d40_log_lli_lcpa_write(chan->lcpa, in d40_log_lli_to_lcxa()
900 &lli->dst[lli_current], in d40_log_lli_to_lcxa()
901 &lli->src[lli_current], in d40_log_lli_to_lcxa()
911 unsigned int lcla_offset = chan->phy_chan->num * 1024 + in d40_log_lli_to_lcxa()
913 struct d40_log_lli *lcla = pool->base + lcla_offset; in d40_log_lli_to_lcxa()
920 next_lcla = linkback ? first_lcla : -EINVAL; in d40_log_lli_to_lcxa()
922 if (cyclic || next_lcla == -EINVAL) in d40_log_lli_to_lcxa()
927 d40_log_lli_lcpa_write(chan->lcpa, in d40_log_lli_to_lcxa()
928 &lli->dst[lli_current], in d40_log_lli_to_lcxa()
929 &lli->src[lli_current], in d40_log_lli_to_lcxa()
938 &lli->dst[lli_current], in d40_log_lli_to_lcxa()
939 &lli->src[lli_current], in d40_log_lli_to_lcxa()
947 dma_sync_single_range_for_device(chan->base->dev, in d40_log_lli_to_lcxa()
948 pool->dma_addr, lcla_offset, in d40_log_lli_to_lcxa()
954 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { in d40_log_lli_to_lcxa()
960 desc->lli_current = lli_current; in d40_log_lli_to_lcxa()
967 d40d->lli_current = d40d->lli_len; in d40_desc_load()
974 return list_first_entry_or_null(&d40c->active, struct d40_desc, node); in d40_first_active_get()
981 desc->is_in_client_list = false; in d40_desc_queue()
982 list_add_tail(&desc->node, &d40c->pending_queue); in d40_desc_queue()
987 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc, in d40_first_pending()
993 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node); in d40_first_queued()
998 return list_first_entry_or_null(&d40c->done, struct d40_desc, node); in d40_first_done()
1028 seg_max -= max_w; in d40_size_2_dmalen()
1031 return -EINVAL; in d40_size_2_dmalen()
1077 spin_lock_irqsave(&d40c->base->execmd_lock, flags); in __d40_execute_command_phy()
1079 if (d40c->phy_chan->num % 2 == 0) in __d40_execute_command_phy()
1080 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; in __d40_execute_command_phy()
1082 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; in __d40_execute_command_phy()
1086 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in __d40_execute_command_phy()
1087 D40_CHAN_POS(d40c->phy_chan->num); in __d40_execute_command_phy()
1093 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); in __d40_execute_command_phy()
1094 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), in __d40_execute_command_phy()
1101 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in __d40_execute_command_phy()
1102 D40_CHAN_POS(d40c->phy_chan->num); in __d40_execute_command_phy()
1119 d40c->phy_chan->num, d40c->log_num, in __d40_execute_command_phy()
1122 ret = -EBUSY; in __d40_execute_command_phy()
1127 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); in __d40_execute_command_phy()
1161 if (!list_empty(&d40c->client)) in d40_term_all()
1162 list_for_each_entry_safe(d40d, _d, &d40c->client, node) { in d40_term_all()
1168 if (!list_empty(&d40c->prepare_queue)) in d40_term_all()
1170 &d40c->prepare_queue, node) { in d40_term_all()
1175 d40c->pending_tx = 0; in d40_term_all()
1224 "status %x\n", d40c->phy_chan->num, in __d40_config_set_event()
1225 d40c->log_num, status); in __d40_config_set_event()
1236 while (--tries) { in __d40_config_set_event()
1249 100 - tries); in __d40_config_set_event()
1264 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); in d40_config_set_event()
1266 /* Enable event line connected to device (or memcpy) */ in d40_config_set_event()
1267 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || in d40_config_set_event()
1268 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) in d40_config_set_event()
1272 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) in d40_config_set_event()
1296 if (d40c->phy_chan->num % 2 == 0) in __d40_execute_command_log()
1297 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; in __d40_execute_command_log()
1299 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; in __d40_execute_command_log()
1302 spin_lock_irqsave(&d40c->phy_chan->lock, flags); in __d40_execute_command_log()
1309 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in __d40_execute_command_log()
1310 D40_CHAN_POS(d40c->phy_chan->num); in __d40_execute_command_log()
1333 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); in __d40_execute_command_log()
1366 return phy_map[d40c->dma_cfg.mode_opt]; in d40_get_prmo()
1368 return log_map[d40c->dma_cfg.mode_opt]; in d40_get_prmo()
1377 addr_base = (d40c->phy_chan->num % 2) * 4; in d40_config_write()
1380 D40_CHAN_POS(d40c->phy_chan->num); in d40_config_write()
1381 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); in d40_config_write()
1384 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); in d40_config_write()
1386 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); in d40_config_write()
1389 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) in d40_config_write()
1394 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); in d40_config_write()
1395 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); in d40_config_write()
1412 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) in d40_residue()
1420 return num_elt * d40c->dma_cfg.dst_info.data_width; in d40_residue()
1428 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; in d40_tx_is_linked()
1442 if (d40c->phy_chan == NULL) { in d40_pause()
1444 return -EINVAL; in d40_pause()
1447 if (!d40c->busy) in d40_pause()
1450 spin_lock_irqsave(&d40c->lock, flags); in d40_pause()
1451 pm_runtime_get_sync(d40c->base->dev); in d40_pause()
1455 pm_runtime_mark_last_busy(d40c->base->dev); in d40_pause()
1456 pm_runtime_put_autosuspend(d40c->base->dev); in d40_pause()
1457 spin_unlock_irqrestore(&d40c->lock, flags); in d40_pause()
1467 if (d40c->phy_chan == NULL) { in d40_resume()
1469 return -EINVAL; in d40_resume()
1472 if (!d40c->busy) in d40_resume()
1475 spin_lock_irqsave(&d40c->lock, flags); in d40_resume()
1476 pm_runtime_get_sync(d40c->base->dev); in d40_resume()
1482 pm_runtime_mark_last_busy(d40c->base->dev); in d40_resume()
1483 pm_runtime_put_autosuspend(d40c->base->dev); in d40_resume()
1484 spin_unlock_irqrestore(&d40c->lock, flags); in d40_resume()
1490 struct d40_chan *d40c = container_of(tx->chan, in d40_tx_submit()
1497 spin_lock_irqsave(&d40c->lock, flags); in d40_tx_submit()
1500 spin_unlock_irqrestore(&d40c->lock, flags); in d40_tx_submit()
1519 if (!d40c->busy) { in d40_queue_start()
1520 d40c->busy = true; in d40_queue_start()
1521 pm_runtime_get_sync(d40c->base->dev); in d40_queue_start()
1554 if (d40d->cyclic) { in dma_tc_handle()
1561 if (d40d->lli_current < d40d->lli_len in dma_tc_handle()
1568 if (d40d->lli_current == d40d->lli_len) in dma_tc_handle()
1569 d40d->lli_current = 0; in dma_tc_handle()
1574 if (d40d->lli_current < d40d->lli_len) { in dma_tc_handle()
1582 d40c->busy = false; in dma_tc_handle()
1584 pm_runtime_mark_last_busy(d40c->base->dev); in dma_tc_handle()
1585 pm_runtime_put_autosuspend(d40c->base->dev); in dma_tc_handle()
1592 d40c->pending_tx++; in dma_tc_handle()
1593 tasklet_schedule(&d40c->tasklet); in dma_tc_handle()
1605 spin_lock_irqsave(&d40c->lock, flags); in dma_tasklet()
1612 if (d40d == NULL || !d40d->cyclic) in dma_tasklet()
1616 if (!d40d->cyclic) in dma_tasklet()
1617 dma_cookie_complete(&d40d->txd); in dma_tasklet()
1623 if (d40c->pending_tx == 0) { in dma_tasklet()
1624 spin_unlock_irqrestore(&d40c->lock, flags); in dma_tasklet()
1629 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); in dma_tasklet()
1630 dmaengine_desc_get_callback(&d40d->txd, &cb); in dma_tasklet()
1632 if (!d40d->cyclic) { in dma_tasklet()
1633 if (async_tx_test_ack(&d40d->txd)) { in dma_tasklet()
1636 } else if (!d40d->is_in_client_list) { in dma_tasklet()
1639 list_add_tail(&d40d->node, &d40c->client); in dma_tasklet()
1640 d40d->is_in_client_list = true; in dma_tasklet()
1644 d40c->pending_tx--; in dma_tasklet()
1646 if (d40c->pending_tx) in dma_tasklet()
1647 tasklet_schedule(&d40c->tasklet); in dma_tasklet()
1649 spin_unlock_irqrestore(&d40c->lock, flags); in dma_tasklet()
1657 if (d40c->pending_tx > 0) in dma_tasklet()
1658 d40c->pending_tx--; in dma_tasklet()
1659 spin_unlock_irqrestore(&d40c->lock, flags); in dma_tasklet()
1667 long chan = -1; in d40_handle_interrupt()
1670 u32 *regs = base->regs_interrupt; in d40_handle_interrupt()
1671 struct d40_interrupt_lookup *il = base->gen_dmac.il; in d40_handle_interrupt()
1672 u32 il_size = base->gen_dmac.il_size; in d40_handle_interrupt()
1674 spin_lock(&base->interrupt_lock); in d40_handle_interrupt()
1676 /* Read interrupt status of both logical and physical channels */ in d40_handle_interrupt()
1678 regs[i] = readl(base->virtbase + il[i].src); in d40_handle_interrupt()
1690 idx = chan & (BITS_PER_LONG - 1); in d40_handle_interrupt()
1693 d40c = base->lookup_phy_chans[idx]; in d40_handle_interrupt()
1695 d40c = base->lookup_log_chans[il[row].offset + idx]; in d40_handle_interrupt()
1706 writel(BIT(idx), base->virtbase + il[row].clr); in d40_handle_interrupt()
1708 spin_lock(&d40c->lock); in d40_handle_interrupt()
1713 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", in d40_handle_interrupt()
1716 spin_unlock(&d40c->lock); in d40_handle_interrupt()
1719 spin_unlock(&base->interrupt_lock); in d40_handle_interrupt()
1728 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; in d40_validate_conf()
1730 if (!conf->dir) { in d40_validate_conf()
1732 res = -EINVAL; in d40_validate_conf()
1735 if ((is_log && conf->dev_type > d40c->base->num_log_chans) || in d40_validate_conf()
1736 (!is_log && conf->dev_type > d40c->base->num_phy_chans) || in d40_validate_conf()
1737 (conf->dev_type < 0)) { in d40_validate_conf()
1738 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); in d40_validate_conf()
1739 res = -EINVAL; in d40_validate_conf()
1742 if (conf->dir == DMA_DEV_TO_DEV) { in d40_validate_conf()
1748 res = -EINVAL; in d40_validate_conf()
1751 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * in d40_validate_conf()
1752 conf->src_info.data_width != in d40_validate_conf()
1753 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * in d40_validate_conf()
1754 conf->dst_info.data_width) { in d40_validate_conf()
1761 res = -EINVAL; in d40_validate_conf()
1772 spin_lock_irqsave(&phy->lock, flags); in d40_alloc_mask_set()
1774 *first_user = ((phy->allocated_src | phy->allocated_dst) in d40_alloc_mask_set()
1779 if (phy->allocated_src == D40_ALLOC_FREE && in d40_alloc_mask_set()
1780 phy->allocated_dst == D40_ALLOC_FREE) { in d40_alloc_mask_set()
1781 phy->allocated_dst = D40_ALLOC_PHY; in d40_alloc_mask_set()
1782 phy->allocated_src = D40_ALLOC_PHY; in d40_alloc_mask_set()
1790 if (phy->allocated_src == D40_ALLOC_PHY) in d40_alloc_mask_set()
1793 if (phy->allocated_src == D40_ALLOC_FREE) in d40_alloc_mask_set()
1794 phy->allocated_src = D40_ALLOC_LOG_FREE; in d40_alloc_mask_set()
1796 if (!(phy->allocated_src & BIT(log_event_line))) { in d40_alloc_mask_set()
1797 phy->allocated_src |= BIT(log_event_line); in d40_alloc_mask_set()
1802 if (phy->allocated_dst == D40_ALLOC_PHY) in d40_alloc_mask_set()
1805 if (phy->allocated_dst == D40_ALLOC_FREE) in d40_alloc_mask_set()
1806 phy->allocated_dst = D40_ALLOC_LOG_FREE; in d40_alloc_mask_set()
1808 if (!(phy->allocated_dst & BIT(log_event_line))) { in d40_alloc_mask_set()
1809 phy->allocated_dst |= BIT(log_event_line); in d40_alloc_mask_set()
1814 spin_unlock_irqrestore(&phy->lock, flags); in d40_alloc_mask_set()
1817 spin_unlock_irqrestore(&phy->lock, flags); in d40_alloc_mask_set()
1827 spin_lock_irqsave(&phy->lock, flags); in d40_alloc_mask_free()
1829 phy->allocated_dst = D40_ALLOC_FREE; in d40_alloc_mask_free()
1830 phy->allocated_src = D40_ALLOC_FREE; in d40_alloc_mask_free()
1837 phy->allocated_src &= ~BIT(log_event_line); in d40_alloc_mask_free()
1838 if (phy->allocated_src == D40_ALLOC_LOG_FREE) in d40_alloc_mask_free()
1839 phy->allocated_src = D40_ALLOC_FREE; in d40_alloc_mask_free()
1841 phy->allocated_dst &= ~BIT(log_event_line); in d40_alloc_mask_free()
1842 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) in d40_alloc_mask_free()
1843 phy->allocated_dst = D40_ALLOC_FREE; in d40_alloc_mask_free()
1846 is_free = ((phy->allocated_src | phy->allocated_dst) == in d40_alloc_mask_free()
1849 spin_unlock_irqrestore(&phy->lock, flags); in d40_alloc_mask_free()
1856 int dev_type = d40c->dma_cfg.dev_type; in d40_allocate_channel()
1865 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; in d40_allocate_channel()
1867 phys = d40c->base->phy_res; in d40_allocate_channel()
1868 num_phy_chans = d40c->base->num_phy_chans; in d40_allocate_channel()
1870 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { in d40_allocate_channel()
1873 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || in d40_allocate_channel()
1874 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { in d40_allocate_channel()
1875 /* dst event lines are used for logical memcpy */ in d40_allocate_channel()
1879 return -EINVAL; in d40_allocate_channel()
1885 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { in d40_allocate_channel()
1887 if (d40c->dma_cfg.use_fixed_channel) { in d40_allocate_channel()
1888 i = d40c->dma_cfg.phy_channel; in d40_allocate_channel()
1902 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { in d40_allocate_channel()
1913 return -EINVAL; in d40_allocate_channel()
1915 d40c->phy_chan = &phys[i]; in d40_allocate_channel()
1916 d40c->log_num = D40_PHY_CHAN; in d40_allocate_channel()
1919 if (dev_type == -1) in d40_allocate_channel()
1920 return -EINVAL; in d40_allocate_channel()
1923 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { in d40_allocate_channel()
1926 if (d40c->dma_cfg.use_fixed_channel) { in d40_allocate_channel()
1927 i = d40c->dma_cfg.phy_channel; in d40_allocate_channel()
1932 return -EINVAL; in d40_allocate_channel()
1941 return -EINVAL; in d40_allocate_channel()
1945 * Spread logical channels across all available physical rather in d40_allocate_channel()
1947 * channels. in d40_allocate_channel()
1957 for (i = phy_num + 1; i >= phy_num; i--) { in d40_allocate_channel()
1965 return -EINVAL; in d40_allocate_channel()
1968 d40c->phy_chan = &phys[i]; in d40_allocate_channel()
1969 d40c->log_num = log_num; in d40_allocate_channel()
1973 d40c->base->lookup_log_chans[d40c->log_num] = d40c; in d40_allocate_channel()
1975 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; in d40_allocate_channel()
1983 dma_cap_mask_t cap = d40c->chan.device->cap_mask; in d40_config_memcpy()
1986 d40c->dma_cfg = dma40_memcpy_conf_log; in d40_config_memcpy()
1987 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; in d40_config_memcpy()
1989 d40_log_cfg(&d40c->dma_cfg, in d40_config_memcpy()
1990 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); in d40_config_memcpy()
1994 d40c->dma_cfg = dma40_memcpy_conf_phy; in d40_config_memcpy()
1997 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); in d40_config_memcpy()
2000 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); in d40_config_memcpy()
2001 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); in d40_config_memcpy()
2004 chan_err(d40c, "No memcpy\n"); in d40_config_memcpy()
2005 return -EINVAL; in d40_config_memcpy()
2015 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); in d40_free_dma()
2016 struct d40_phy_res *phy = d40c->phy_chan; in d40_free_dma()
2024 return -EINVAL; in d40_free_dma()
2027 if (phy->allocated_src == D40_ALLOC_FREE && in d40_free_dma()
2028 phy->allocated_dst == D40_ALLOC_FREE) { in d40_free_dma()
2030 return -EINVAL; in d40_free_dma()
2033 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || in d40_free_dma()
2034 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) in d40_free_dma()
2036 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) in d40_free_dma()
2040 return -EINVAL; in d40_free_dma()
2043 pm_runtime_get_sync(d40c->base->dev); in d40_free_dma()
2053 d40c->base->lookup_log_chans[d40c->log_num] = NULL; in d40_free_dma()
2055 d40c->base->lookup_phy_chans[phy->num] = NULL; in d40_free_dma()
2057 if (d40c->busy) { in d40_free_dma()
2058 pm_runtime_mark_last_busy(d40c->base->dev); in d40_free_dma()
2059 pm_runtime_put_autosuspend(d40c->base->dev); in d40_free_dma()
2062 d40c->busy = false; in d40_free_dma()
2063 d40c->phy_chan = NULL; in d40_free_dma()
2064 d40c->configured = false; in d40_free_dma()
2066 pm_runtime_mark_last_busy(d40c->base->dev); in d40_free_dma()
2067 pm_runtime_put_autosuspend(d40c->base->dev); in d40_free_dma()
2078 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); in d40_is_paused()
2080 spin_lock_irqsave(&d40c->lock, flags); in d40_is_paused()
2083 if (d40c->phy_chan->num % 2 == 0) in d40_is_paused()
2084 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; in d40_is_paused()
2086 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; in d40_is_paused()
2089 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> in d40_is_paused()
2090 D40_CHAN_POS(d40c->phy_chan->num); in d40_is_paused()
2096 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || in d40_is_paused()
2097 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { in d40_is_paused()
2099 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { in d40_is_paused()
2112 spin_unlock_irqrestore(&d40c->lock, flags); in d40_is_paused()
2124 spin_lock_irqsave(&d40c->lock, flags); in stedma40_residue()
2126 spin_unlock_irqrestore(&d40c->lock, flags); in stedma40_residue()
2137 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; in d40_prep_sg_log()
2138 struct stedma40_half_channel_info *src_info = &cfg->src_info; in d40_prep_sg_log()
2139 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; in d40_prep_sg_log()
2144 desc->lli_log.src, in d40_prep_sg_log()
2145 chan->log_def.lcsp1, in d40_prep_sg_log()
2146 src_info->data_width, in d40_prep_sg_log()
2147 dst_info->data_width); in d40_prep_sg_log()
2151 desc->lli_log.dst, in d40_prep_sg_log()
2152 chan->log_def.lcsp3, in d40_prep_sg_log()
2153 dst_info->data_width, in d40_prep_sg_log()
2154 src_info->data_width); in d40_prep_sg_log()
2165 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; in d40_prep_sg_phy()
2166 struct stedma40_half_channel_info *src_info = &cfg->src_info; in d40_prep_sg_phy()
2167 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; in d40_prep_sg_phy()
2171 if (desc->cyclic) in d40_prep_sg_phy()
2175 desc->lli_phy.src, in d40_prep_sg_phy()
2176 virt_to_phys(desc->lli_phy.src), in d40_prep_sg_phy()
2177 chan->src_def_cfg, in d40_prep_sg_phy()
2181 desc->lli_phy.dst, in d40_prep_sg_phy()
2182 virt_to_phys(desc->lli_phy.dst), in d40_prep_sg_phy()
2183 chan->dst_def_cfg, in d40_prep_sg_phy()
2186 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, in d40_prep_sg_phy()
2187 desc->lli_pool.size, DMA_TO_DEVICE); in d40_prep_sg_phy()
2204 cfg = &chan->dma_cfg; in d40_prep_desc()
2205 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, in d40_prep_desc()
2206 cfg->dst_info.data_width); in d40_prep_desc()
2207 if (desc->lli_len < 0) { in d40_prep_desc()
2212 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); in d40_prep_desc()
2218 desc->lli_current = 0; in d40_prep_desc()
2219 desc->txd.flags = dma_flags; in d40_prep_desc()
2220 desc->txd.tx_submit = d40_tx_submit; in d40_prep_desc()
2222 dma_async_tx_descriptor_init(&desc->txd, &chan->chan); in d40_prep_desc()
2242 if (!chan->phy_chan) { in d40_prep_sg()
2247 d40_set_runtime_config_write(dchan, &chan->slave_config, direction); in d40_prep_sg()
2249 spin_lock_irqsave(&chan->lock, flags); in d40_prep_sg()
2255 if (sg_next(&sg_src[sg_len - 1]) == sg_src) in d40_prep_sg()
2256 desc->cyclic = true; in d40_prep_sg()
2261 src_dev_addr = chan->runtime_addr; in d40_prep_sg()
2263 dst_dev_addr = chan->runtime_addr; in d40_prep_sg()
2282 list_add_tail(&desc->node, &chan->prepare_queue); in d40_prep_sg()
2284 spin_unlock_irqrestore(&chan->lock, flags); in d40_prep_sg()
2286 return &desc->txd; in d40_prep_sg()
2290 spin_unlock_irqrestore(&chan->lock, flags); in d40_prep_sg()
2304 d40c->dma_cfg = *info; in stedma40_filter()
2309 d40c->configured = true; in stedma40_filter()
2316 bool realtime = d40c->dma_cfg.realtime; in __d40_set_prio_rt()
2317 bool highprio = d40c->dma_cfg.high_priority; in __d40_set_prio_rt()
2323 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; in __d40_set_prio_rt()
2325 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; in __d40_set_prio_rt()
2332 * destination event lines that trigger logical channels. in __d40_set_prio_rt()
2337 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; in __d40_set_prio_rt()
2343 writel(bit, d40c->base->virtbase + prioreg + group * 4); in __d40_set_prio_rt()
2344 writel(bit, d40c->base->virtbase + rtreg + group * 4); in __d40_set_prio_rt()
2349 if (d40c->base->rev < 3) in d40_set_prio_realtime()
2352 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || in d40_set_prio_realtime()
2353 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) in d40_set_prio_realtime()
2354 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); in d40_set_prio_realtime()
2356 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || in d40_set_prio_realtime()
2357 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) in d40_set_prio_realtime()
2358 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); in d40_set_prio_realtime()
2379 cfg.dev_type = dma_spec->args[0]; in d40_xlate()
2380 flags = dma_spec->args[2]; in d40_xlate()
2399 cfg.phy_channel = dma_spec->args[1]; in d40_xlate()
2417 spin_lock_irqsave(&d40c->lock, flags); in d40_alloc_chan_resources()
2421 /* If no dma configuration is set use default configuration (memcpy) */ in d40_alloc_chan_resources()
2422 if (!d40c->configured) { in d40_alloc_chan_resources()
2425 chan_err(d40c, "Failed to configure memcpy channel\n"); in d40_alloc_chan_resources()
2433 d40c->configured = false; in d40_alloc_chan_resources()
2437 pm_runtime_get_sync(d40c->base->dev); in d40_alloc_chan_resources()
2442 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) in d40_alloc_chan_resources()
2443 d40c->lcpa = d40c->base->lcpa_base + in d40_alloc_chan_resources()
2444 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; in d40_alloc_chan_resources()
2446 d40c->lcpa = d40c->base->lcpa_base + in d40_alloc_chan_resources()
2447 d40c->dma_cfg.dev_type * in d40_alloc_chan_resources()
2451 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); in d40_alloc_chan_resources()
2452 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); in d40_alloc_chan_resources()
2457 d40c->phy_chan->num, in d40_alloc_chan_resources()
2458 d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); in d40_alloc_chan_resources()
2463 * resource is free. In case of multiple logical channels in d40_alloc_chan_resources()
2469 pm_runtime_mark_last_busy(d40c->base->dev); in d40_alloc_chan_resources()
2470 pm_runtime_put_autosuspend(d40c->base->dev); in d40_alloc_chan_resources()
2471 spin_unlock_irqrestore(&d40c->lock, flags); in d40_alloc_chan_resources()
2482 if (d40c->phy_chan == NULL) { in d40_free_chan_resources()
2487 spin_lock_irqsave(&d40c->lock, flags); in d40_free_chan_resources()
2493 spin_unlock_irqrestore(&d40c->lock, flags); in d40_free_chan_resources()
2566 if (d40c->phy_chan == NULL) { in d40_tx_status()
2568 return -EINVAL; in d40_tx_status()
2586 if (d40c->phy_chan == NULL) { in d40_issue_pending()
2591 spin_lock_irqsave(&d40c->lock, flags); in d40_issue_pending()
2593 list_splice_tail_init(&d40c->pending_queue, &d40c->queue); in d40_issue_pending()
2596 if (!d40c->busy) in d40_issue_pending()
2599 spin_unlock_irqrestore(&d40c->lock, flags); in d40_issue_pending()
2608 if (d40c->phy_chan == NULL) { in d40_terminate_all()
2610 return -EINVAL; in d40_terminate_all()
2613 spin_lock_irqsave(&d40c->lock, flags); in d40_terminate_all()
2615 pm_runtime_get_sync(d40c->base->dev); in d40_terminate_all()
2621 pm_runtime_mark_last_busy(d40c->base->dev); in d40_terminate_all()
2622 pm_runtime_put_autosuspend(d40c->base->dev); in d40_terminate_all()
2623 if (d40c->busy) { in d40_terminate_all()
2624 pm_runtime_mark_last_busy(d40c->base->dev); in d40_terminate_all()
2625 pm_runtime_put_autosuspend(d40c->base->dev); in d40_terminate_all()
2627 d40c->busy = false; in d40_terminate_all()
2629 spin_unlock_irqrestore(&d40c->lock, flags); in d40_terminate_all()
2660 info->psize = psize; in dma40_config_to_halfchannel()
2661 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; in dma40_config_to_halfchannel()
2671 memcpy(&d40c->slave_config, config, sizeof(*config)); in d40_set_runtime_config()
2682 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; in d40_set_runtime_config_write()
2688 if (d40c->phy_chan == NULL) { in d40_set_runtime_config_write()
2690 return -EINVAL; in d40_set_runtime_config_write()
2693 src_addr_width = config->src_addr_width; in d40_set_runtime_config_write()
2694 src_maxburst = config->src_maxburst; in d40_set_runtime_config_write()
2695 dst_addr_width = config->dst_addr_width; in d40_set_runtime_config_write()
2696 dst_maxburst = config->dst_maxburst; in d40_set_runtime_config_write()
2699 config_addr = config->src_addr; in d40_set_runtime_config_write()
2701 if (cfg->dir != DMA_DEV_TO_MEM) in d40_set_runtime_config_write()
2702 dev_dbg(d40c->base->dev, in d40_set_runtime_config_write()
2705 cfg->dir); in d40_set_runtime_config_write()
2706 cfg->dir = DMA_DEV_TO_MEM; in d40_set_runtime_config_write()
2715 config_addr = config->dst_addr; in d40_set_runtime_config_write()
2717 if (cfg->dir != DMA_MEM_TO_DEV) in d40_set_runtime_config_write()
2718 dev_dbg(d40c->base->dev, in d40_set_runtime_config_write()
2721 cfg->dir); in d40_set_runtime_config_write()
2722 cfg->dir = DMA_MEM_TO_DEV; in d40_set_runtime_config_write()
2730 dev_err(d40c->base->dev, in d40_set_runtime_config_write()
2733 return -EINVAL; in d40_set_runtime_config_write()
2737 dev_err(d40c->base->dev, "no address supplied\n"); in d40_set_runtime_config_write()
2738 return -EINVAL; in d40_set_runtime_config_write()
2742 dev_err(d40c->base->dev, in d40_set_runtime_config_write()
2748 return -EINVAL; in d40_set_runtime_config_write()
2766 return -EINVAL; in d40_set_runtime_config_write()
2768 cfg->src_info.data_width = src_addr_width; in d40_set_runtime_config_write()
2769 cfg->dst_info.data_width = dst_addr_width; in d40_set_runtime_config_write()
2771 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, in d40_set_runtime_config_write()
2776 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, in d40_set_runtime_config_write()
2783 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); in d40_set_runtime_config_write()
2785 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); in d40_set_runtime_config_write()
2788 d40c->runtime_addr = config_addr; in d40_set_runtime_config_write()
2789 d40c->runtime_direction = direction; in d40_set_runtime_config_write()
2790 dev_dbg(d40c->base->dev, in d40_set_runtime_config_write()
2810 INIT_LIST_HEAD(&dma->channels); in d40_chan_init()
2814 d40c->base = base; in d40_chan_init()
2815 d40c->chan.device = dma; in d40_chan_init()
2817 spin_lock_init(&d40c->lock); in d40_chan_init()
2819 d40c->log_num = D40_PHY_CHAN; in d40_chan_init()
2821 INIT_LIST_HEAD(&d40c->done); in d40_chan_init()
2822 INIT_LIST_HEAD(&d40c->active); in d40_chan_init()
2823 INIT_LIST_HEAD(&d40c->queue); in d40_chan_init()
2824 INIT_LIST_HEAD(&d40c->pending_queue); in d40_chan_init()
2825 INIT_LIST_HEAD(&d40c->client); in d40_chan_init()
2826 INIT_LIST_HEAD(&d40c->prepare_queue); in d40_chan_init()
2828 tasklet_setup(&d40c->tasklet, dma_tasklet); in d40_chan_init()
2830 list_add_tail(&d40c->chan.device_node, in d40_chan_init()
2831 &dma->channels); in d40_chan_init()
2837 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) { in d40_ops_init()
2838 dev->device_prep_slave_sg = d40_prep_slave_sg; in d40_ops_init()
2839 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in d40_ops_init()
2842 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { in d40_ops_init()
2843 dev->device_prep_dma_memcpy = d40_prep_memcpy; in d40_ops_init()
2844 dev->directions = BIT(DMA_MEM_TO_MEM); in d40_ops_init()
2849 dev->copy_align = DMAENGINE_ALIGN_4_BYTES; in d40_ops_init()
2852 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) in d40_ops_init()
2853 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; in d40_ops_init()
2855 dev->device_alloc_chan_resources = d40_alloc_chan_resources; in d40_ops_init()
2856 dev->device_free_chan_resources = d40_free_chan_resources; in d40_ops_init()
2857 dev->device_issue_pending = d40_issue_pending; in d40_ops_init()
2858 dev->device_tx_status = d40_tx_status; in d40_ops_init()
2859 dev->device_config = d40_set_runtime_config; in d40_ops_init()
2860 dev->device_pause = d40_pause; in d40_ops_init()
2861 dev->device_resume = d40_resume; in d40_ops_init()
2862 dev->device_terminate_all = d40_terminate_all; in d40_ops_init()
2863 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in d40_ops_init()
2864 dev->dev = base->dev; in d40_ops_init()
2872 d40_chan_init(base, &base->dma_slave, base->log_chans, in d40_dmaengine_init()
2873 0, base->num_log_chans); in d40_dmaengine_init()
2875 dma_cap_zero(base->dma_slave.cap_mask); in d40_dmaengine_init()
2876 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); in d40_dmaengine_init()
2877 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); in d40_dmaengine_init()
2879 d40_ops_init(base, &base->dma_slave); in d40_dmaengine_init()
2881 err = dmaenginem_async_device_register(&base->dma_slave); in d40_dmaengine_init()
2884 d40_err(base->dev, "Failed to register slave channels\n"); in d40_dmaengine_init()
2888 d40_chan_init(base, &base->dma_memcpy, base->log_chans, in d40_dmaengine_init()
2889 base->num_log_chans, base->num_memcpy_chans); in d40_dmaengine_init()
2891 dma_cap_zero(base->dma_memcpy.cap_mask); in d40_dmaengine_init()
2892 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); in d40_dmaengine_init()
2894 d40_ops_init(base, &base->dma_memcpy); in d40_dmaengine_init()
2896 err = dmaenginem_async_device_register(&base->dma_memcpy); in d40_dmaengine_init()
2899 d40_err(base->dev, in d40_dmaengine_init()
2900 "Failed to register memcpy only channels\n"); in d40_dmaengine_init()
2904 d40_chan_init(base, &base->dma_both, base->phy_chans, in d40_dmaengine_init()
2907 dma_cap_zero(base->dma_both.cap_mask); in d40_dmaengine_init()
2908 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); in d40_dmaengine_init()
2909 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); in d40_dmaengine_init()
2910 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); in d40_dmaengine_init()
2912 d40_ops_init(base, &base->dma_both); in d40_dmaengine_init()
2913 err = dmaenginem_async_device_register(&base->dma_both); in d40_dmaengine_init()
2916 d40_err(base->dev, in d40_dmaengine_init()
2917 "Failed to register logical and physical capable channels\n"); in d40_dmaengine_init()
2936 if (base->lcpa_regulator) in dma40_suspend()
2937 ret = regulator_disable(base->lcpa_regulator); in dma40_suspend()
2946 if (base->lcpa_regulator) { in dma40_resume()
2947 ret = regulator_enable(base->lcpa_regulator); in dma40_resume()
2977 for (i = 0; i < base->num_phy_chans; i++) { in d40_save_restore_registers()
2981 if (base->phy_res[i].reserved) in d40_save_restore_registers()
2984 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; in d40_save_restore_registers()
2987 dma40_backup(addr, &base->reg_val_backup_chan[idx], in d40_save_restore_registers()
2994 dma40_backup(base->virtbase, base->reg_val_backup, in d40_save_restore_registers()
2999 if (base->gen_dmac.backup) in d40_save_restore_registers()
3000 dma40_backup(base->virtbase, base->reg_val_backup_v4, in d40_save_restore_registers()
3001 base->gen_dmac.backup, in d40_save_restore_registers()
3002 base->gen_dmac.backup_size, in d40_save_restore_registers()
3013 if (base->rev != 1) in dma40_runtime_suspend()
3014 writel_relaxed(base->gcc_pwr_off_mask, in dma40_runtime_suspend()
3015 base->virtbase + D40_DREG_GCC); in dma40_runtime_suspend()
3027 base->virtbase + D40_DREG_GCC); in dma40_runtime_resume()
3046 int odd_even_bit = -2; in d40_phy_res_init()
3049 val[0] = readl(base->virtbase + D40_DREG_PRSME); in d40_phy_res_init()
3050 val[1] = readl(base->virtbase + D40_DREG_PRSMO); in d40_phy_res_init()
3052 for (i = 0; i < base->num_phy_chans; i++) { in d40_phy_res_init()
3053 base->phy_res[i].num = i; in d40_phy_res_init()
3056 /* Mark security only channels as occupied */ in d40_phy_res_init()
3057 base->phy_res[i].allocated_src = D40_ALLOC_PHY; in d40_phy_res_init()
3058 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; in d40_phy_res_init()
3059 base->phy_res[i].reserved = true; in d40_phy_res_init()
3067 base->phy_res[i].allocated_src = D40_ALLOC_FREE; in d40_phy_res_init()
3068 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; in d40_phy_res_init()
3069 base->phy_res[i].reserved = false; in d40_phy_res_init()
3072 spin_lock_init(&base->phy_res[i].lock); in d40_phy_res_init()
3075 /* Mark disabled channels as occupied */ in d40_phy_res_init()
3076 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { in d40_phy_res_init()
3077 int chan = base->plat_data->disabled_channels[i]; in d40_phy_res_init()
3079 base->phy_res[chan].allocated_src = D40_ALLOC_PHY; in d40_phy_res_init()
3080 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; in d40_phy_res_init()
3081 base->phy_res[chan].reserved = true; in d40_phy_res_init()
3086 num_phy_chans_avail--; in d40_phy_res_init()
3089 /* Mark soft_lli channels */ in d40_phy_res_init()
3090 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { in d40_phy_res_init()
3091 int chan = base->plat_data->soft_lli_chans[i]; in d40_phy_res_init()
3093 base->phy_res[chan].use_soft_lli = true; in d40_phy_res_init()
3096 dev_info(base->dev, "%d of %d physical DMA channels available\n", in d40_phy_res_init()
3097 num_phy_chans_avail, base->num_phy_chans); in d40_phy_res_init()
3100 val[0] = readl(base->virtbase + D40_DREG_PRTYP); in d40_phy_res_init()
3102 for (i = 0; i < base->num_phy_chans; i++) { in d40_phy_res_init()
3104 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && in d40_phy_res_init()
3106 dev_info(base->dev, in d40_phy_res_init()
3116 * The clocks for the event lines on which reserved channels exists in d40_phy_res_init()
3119 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); in d40_phy_res_init()
3120 base->gcc_pwr_off_mask = gcc; in d40_phy_res_init()
3136 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); in d40_hw_detect_init()
3137 struct device *dev = &pdev->dev; in d40_hw_detect_init()
3161 pid |= (readl(virtbase + SZ_4K - 0x20 + 4 * i) in d40_hw_detect_init()
3164 cid |= (readl(virtbase + SZ_4K - 0x10 + 4 * i) in d40_hw_detect_init()
3169 return -EINVAL; in d40_hw_detect_init()
3175 return -EINVAL; in d40_hw_detect_init()
3189 return -EINVAL; in d40_hw_detect_init()
3192 /* The number of physical channels on this HW */ in d40_hw_detect_init()
3193 if (plat_data->num_of_phy_chans) in d40_hw_detect_init()
3194 num_phy_chans = plat_data->num_of_phy_chans; in d40_hw_detect_init()
3198 /* The number of channels used for memcpy */ in d40_hw_detect_init()
3199 if (plat_data->num_of_memcpy_chans) in d40_hw_detect_init()
3200 num_memcpy_chans = plat_data->num_of_memcpy_chans; in d40_hw_detect_init()
3207 "hardware rev: %d with %d physical and %d logical channels\n", in d40_hw_detect_init()
3216 return -ENOMEM; in d40_hw_detect_init()
3218 base->rev = rev; in d40_hw_detect_init()
3219 base->clk = clk; in d40_hw_detect_init()
3220 base->num_memcpy_chans = num_memcpy_chans; in d40_hw_detect_init()
3221 base->num_phy_chans = num_phy_chans; in d40_hw_detect_init()
3222 base->num_log_chans = num_log_chans; in d40_hw_detect_init()
3223 base->virtbase = virtbase; in d40_hw_detect_init()
3224 base->plat_data = plat_data; in d40_hw_detect_init()
3225 base->dev = dev; in d40_hw_detect_init()
3226 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); in d40_hw_detect_init()
3227 base->log_chans = &base->phy_chans[num_phy_chans]; in d40_hw_detect_init()
3229 if (base->plat_data->num_of_phy_chans == 14) { in d40_hw_detect_init()
3230 base->gen_dmac.backup = d40_backup_regs_v4b; in d40_hw_detect_init()
3231 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; in d40_hw_detect_init()
3232 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; in d40_hw_detect_init()
3233 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; in d40_hw_detect_init()
3234 base->gen_dmac.realtime_en = D40_DREG_CRSEG1; in d40_hw_detect_init()
3235 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; in d40_hw_detect_init()
3236 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; in d40_hw_detect_init()
3237 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; in d40_hw_detect_init()
3238 base->gen_dmac.il = il_v4b; in d40_hw_detect_init()
3239 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); in d40_hw_detect_init()
3240 base->gen_dmac.init_reg = dma_init_reg_v4b; in d40_hw_detect_init()
3241 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); in d40_hw_detect_init()
3243 if (base->rev >= 3) { in d40_hw_detect_init()
3244 base->gen_dmac.backup = d40_backup_regs_v4a; in d40_hw_detect_init()
3245 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; in d40_hw_detect_init()
3247 base->gen_dmac.interrupt_en = D40_DREG_PCMIS; in d40_hw_detect_init()
3248 base->gen_dmac.interrupt_clear = D40_DREG_PCICR; in d40_hw_detect_init()
3249 base->gen_dmac.realtime_en = D40_DREG_RSEG1; in d40_hw_detect_init()
3250 base->gen_dmac.realtime_clear = D40_DREG_RCEG1; in d40_hw_detect_init()
3251 base->gen_dmac.high_prio_en = D40_DREG_PSEG1; in d40_hw_detect_init()
3252 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; in d40_hw_detect_init()
3253 base->gen_dmac.il = il_v4a; in d40_hw_detect_init()
3254 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); in d40_hw_detect_init()
3255 base->gen_dmac.init_reg = dma_init_reg_v4a; in d40_hw_detect_init()
3256 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); in d40_hw_detect_init()
3259 base->phy_res = devm_kcalloc(dev, num_phy_chans, in d40_hw_detect_init()
3260 sizeof(*base->phy_res), in d40_hw_detect_init()
3262 if (!base->phy_res) in d40_hw_detect_init()
3263 return -ENOMEM; in d40_hw_detect_init()
3265 base->lookup_phy_chans = devm_kcalloc(dev, num_phy_chans, in d40_hw_detect_init()
3266 sizeof(*base->lookup_phy_chans), in d40_hw_detect_init()
3268 if (!base->lookup_phy_chans) in d40_hw_detect_init()
3269 return -ENOMEM; in d40_hw_detect_init()
3271 base->lookup_log_chans = devm_kcalloc(dev, num_log_chans, in d40_hw_detect_init()
3272 sizeof(*base->lookup_log_chans), in d40_hw_detect_init()
3274 if (!base->lookup_log_chans) in d40_hw_detect_init()
3275 return -ENOMEM; in d40_hw_detect_init()
3277 base->reg_val_backup_chan = devm_kmalloc_array(dev, base->num_phy_chans, in d40_hw_detect_init()
3280 if (!base->reg_val_backup_chan) in d40_hw_detect_init()
3281 return -ENOMEM; in d40_hw_detect_init()
3283 base->lcla_pool.alloc_map = devm_kcalloc(dev, num_phy_chans in d40_hw_detect_init()
3285 sizeof(*base->lcla_pool.alloc_map), in d40_hw_detect_init()
3287 if (!base->lcla_pool.alloc_map) in d40_hw_detect_init()
3288 return -ENOMEM; in d40_hw_detect_init()
3290 base->regs_interrupt = devm_kmalloc_array(dev, base->gen_dmac.il_size, in d40_hw_detect_init()
3291 sizeof(*base->regs_interrupt), in d40_hw_detect_init()
3293 if (!base->regs_interrupt) in d40_hw_detect_init()
3294 return -ENOMEM; in d40_hw_detect_init()
3296 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), in d40_hw_detect_init()
3299 if (!base->desc_slab) in d40_hw_detect_init()
3300 return -ENOMEM; in d40_hw_detect_init()
3303 base->desc_slab); in d40_hw_detect_init()
3320 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; in d40_hw_init()
3321 u32 reg_size = base->gen_dmac.init_reg_size; in d40_hw_init()
3325 base->virtbase + dma_init_reg[i].reg); in d40_hw_init()
3327 /* Configure all our dma channels to default settings */ in d40_hw_init()
3328 for (i = 0; i < base->num_phy_chans; i++) { in d40_hw_init()
3332 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src in d40_hw_init()
3350 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); in d40_hw_init()
3351 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); in d40_hw_init()
3352 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); in d40_hw_init()
3353 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); in d40_hw_init()
3356 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); in d40_hw_init()
3359 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); in d40_hw_init()
3362 base->gen_dmac.init_reg = NULL; in d40_hw_init()
3363 base->gen_dmac.init_reg_size = 0; in d40_hw_init()
3368 struct d40_lcla_pool *pool = &base->lcla_pool; in d40_lcla_allocate()
3382 return -ENOMEM; in d40_lcla_allocate()
3385 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; in d40_lcla_allocate()
3389 base->lcla_pool.pages); in d40_lcla_allocate()
3392 d40_err(base->dev, "Failed to allocate %d pages.\n", in d40_lcla_allocate()
3393 base->lcla_pool.pages); in d40_lcla_allocate()
3394 ret = -ENOMEM; in d40_lcla_allocate()
3397 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3402 (LCLA_ALIGNMENT - 1)) == 0) in d40_lcla_allocate()
3407 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate()
3410 base->lcla_pool.base = (void *)page_list[i]; in d40_lcla_allocate()
3416 dev_warn(base->dev, in d40_lcla_allocate()
3418 __func__, base->lcla_pool.pages); in d40_lcla_allocate()
3419 base->lcla_pool.base_unaligned = kmalloc(SZ_1K * in d40_lcla_allocate()
3420 base->num_phy_chans + in d40_lcla_allocate()
3423 if (!base->lcla_pool.base_unaligned) { in d40_lcla_allocate()
3424 ret = -ENOMEM; in d40_lcla_allocate()
3428 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, in d40_lcla_allocate()
3432 pool->dma_addr = dma_map_single(base->dev, pool->base, in d40_lcla_allocate()
3433 SZ_1K * base->num_phy_chans, in d40_lcla_allocate()
3435 if (dma_mapping_error(base->dev, pool->dma_addr)) { in d40_lcla_allocate()
3436 pool->dma_addr = 0; in d40_lcla_allocate()
3437 ret = -ENOMEM; in d40_lcla_allocate()
3441 writel(virt_to_phys(base->lcla_pool.base), in d40_lcla_allocate()
3442 base->virtbase + D40_DREG_LCLA); in d40_lcla_allocate()
3458 return -ENOMEM; in d40_of_probe()
3461 of_property_read_u32(np, "dma-channels", &num_phy); in d40_of_probe()
3463 pdata->num_of_phy_chans = num_phy; in d40_of_probe()
3465 list = of_get_property(np, "memcpy-channels", &num_memcpy); in d40_of_probe()
3470 "Invalid number of memcpy channels specified (%d)\n", in d40_of_probe()
3472 return -EINVAL; in d40_of_probe()
3474 pdata->num_of_memcpy_chans = num_memcpy; in d40_of_probe()
3476 of_property_read_u32_array(np, "memcpy-channels", in d40_of_probe()
3480 list = of_get_property(np, "disabled-channels", &num_disabled); in d40_of_probe()
3485 "Invalid number of disabled channels specified (%d)\n", in d40_of_probe()
3487 return -EINVAL; in d40_of_probe()
3490 of_property_read_u32_array(np, "disabled-channels", in d40_of_probe()
3491 pdata->disabled_channels, in d40_of_probe()
3493 pdata->disabled_channels[num_disabled] = -1; in d40_of_probe()
3495 dev->platform_data = pdata; in d40_of_probe()
3502 struct device *dev = &pdev->dev; in d40_probe()
3503 struct device_node *np = pdev->dev.of_node; in d40_probe()
3513 ret = -ENOMEM; in d40_probe()
3525 spin_lock_init(&base->interrupt_lock); in d40_probe()
3526 spin_lock_init(&base->execmd_lock); in d40_probe()
3532 ret = -EINVAL; in d40_probe()
3541 base->lcpa_size = resource_size(&res_lcpa); in d40_probe()
3542 base->phy_lcpa = res_lcpa.start; in d40_probe()
3544 &base->phy_lcpa, &base->lcpa_size); in d40_probe()
3547 val = readl(base->virtbase + D40_DREG_LCPA); in d40_probe()
3548 if (base->phy_lcpa != val && val != 0) { in d40_probe()
3551 __func__, val, (u32)base->phy_lcpa); in d40_probe()
3553 writel(base->phy_lcpa, base->virtbase + D40_DREG_LCPA); in d40_probe()
3555 base->lcpa_base = devm_ioremap(dev, base->phy_lcpa, base->lcpa_size); in d40_probe()
3556 if (!base->lcpa_base) { in d40_probe()
3557 ret = -ENOMEM; in d40_probe()
3562 if (base->plat_data->use_esram_lcla) { in d40_probe()
3566 ret = -ENOENT; in d40_probe()
3571 base->lcla_pool.base = devm_ioremap(dev, res->start, in d40_probe()
3573 if (!base->lcla_pool.base) { in d40_probe()
3574 ret = -ENOMEM; in d40_probe()
3578 writel(res->start, base->virtbase + D40_DREG_LCLA); in d40_probe()
3588 spin_lock_init(&base->lcla_pool.lock); in d40_probe()
3590 base->irq = platform_get_irq(pdev, 0); in d40_probe()
3591 if (base->irq < 0) { in d40_probe()
3592 ret = base->irq; in d40_probe()
3596 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); in d40_probe()
3602 if (base->plat_data->use_esram_lcla) { in d40_probe()
3604 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); in d40_probe()
3605 if (IS_ERR(base->lcpa_regulator)) { in d40_probe()
3607 ret = PTR_ERR(base->lcpa_regulator); in d40_probe()
3608 base->lcpa_regulator = NULL; in d40_probe()
3612 ret = regulator_enable(base->lcpa_regulator); in d40_probe()
3616 regulator_put(base->lcpa_regulator); in d40_probe()
3617 base->lcpa_regulator = NULL; in d40_probe()
3622 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); in d40_probe()
3624 pm_runtime_irq_safe(base->dev); in d40_probe()
3625 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); in d40_probe()
3626 pm_runtime_use_autosuspend(base->dev); in d40_probe()
3627 pm_runtime_mark_last_busy(base->dev); in d40_probe()
3628 pm_runtime_set_active(base->dev); in d40_probe()
3629 pm_runtime_enable(base->dev); in d40_probe()
3635 dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); in d40_probe()
3646 dev_info(base->dev, "initialized\n"); in d40_probe()
3650 if (base->lcla_pool.dma_addr) in d40_probe()
3651 dma_unmap_single(base->dev, base->lcla_pool.dma_addr, in d40_probe()
3652 SZ_1K * base->num_phy_chans, in d40_probe()
3655 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) in d40_probe()
3656 free_pages((unsigned long)base->lcla_pool.base, in d40_probe()
3657 base->lcla_pool.pages); in d40_probe()
3659 kfree(base->lcla_pool.base_unaligned); in d40_probe()
3661 if (base->lcpa_regulator) { in d40_probe()
3662 regulator_disable(base->lcpa_regulator); in d40_probe()
3663 regulator_put(base->lcpa_regulator); in d40_probe()
3665 pm_runtime_disable(base->dev); in d40_probe()