Lines Matching +full:phy +full:- +full:device

1 // SPDX-License-Identifier: GPL-2.0-only
11 #include <linux/dma-mapping.h>
15 #include <linux/device.h>
32 #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
34 #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
35 #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
61 #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
68 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
89 struct device *dev;
92 struct mmp_pdma_phy *phy; member
124 struct device *dev;
125 struct dma_device device; member
126 struct mmp_pdma_phy *phy; member
127 spinlock_t phy_lock; /* protect alloc/free phy channels */
137 container_of(dmadev, struct mmp_pdma_device, device)
143 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) in set_desc() argument
145 u32 reg = (phy->idx << 4) + DDADR; in set_desc()
147 writel(addr, phy->base + reg); in set_desc()
150 static void enable_chan(struct mmp_pdma_phy *phy) in enable_chan() argument
154 if (!phy->vchan) in enable_chan()
157 reg = DRCMR(phy->vchan->drcmr); in enable_chan()
158 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); in enable_chan()
160 dalgn = readl(phy->base + DALGN); in enable_chan()
161 if (phy->vchan->byte_align) in enable_chan()
162 dalgn |= 1 << phy->idx; in enable_chan()
164 dalgn &= ~(1 << phy->idx); in enable_chan()
165 writel(dalgn, phy->base + DALGN); in enable_chan()
167 reg = (phy->idx << 2) + DCSR; in enable_chan()
168 writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg); in enable_chan()
171 static void disable_chan(struct mmp_pdma_phy *phy) in disable_chan() argument
175 if (!phy) in disable_chan()
178 reg = (phy->idx << 2) + DCSR; in disable_chan()
179 writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg); in disable_chan()
182 static int clear_chan_irq(struct mmp_pdma_phy *phy) in clear_chan_irq() argument
185 u32 dint = readl(phy->base + DINT); in clear_chan_irq()
186 u32 reg = (phy->idx << 2) + DCSR; in clear_chan_irq()
188 if (!(dint & BIT(phy->idx))) in clear_chan_irq()
189 return -EAGAIN; in clear_chan_irq()
192 dcsr = readl(phy->base + reg); in clear_chan_irq()
193 writel(dcsr, phy->base + reg); in clear_chan_irq()
194 if ((dcsr & DCSR_BUSERR) && (phy->vchan)) in clear_chan_irq()
195 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); in clear_chan_irq()
202 struct mmp_pdma_phy *phy = dev_id; in mmp_pdma_chan_handler() local
204 if (clear_chan_irq(phy) != 0) in mmp_pdma_chan_handler()
207 tasklet_schedule(&phy->vchan->tasklet); in mmp_pdma_chan_handler()
214 struct mmp_pdma_phy *phy; in mmp_pdma_int_handler() local
215 u32 dint = readl(pdev->base + DINT); in mmp_pdma_int_handler()
222 if (i >= pdev->dma_channels) in mmp_pdma_int_handler()
224 dint &= (dint - 1); in mmp_pdma_int_handler()
225 phy = &pdev->phy[i]; in mmp_pdma_int_handler()
226 ret = mmp_pdma_chan_handler(irq, phy); in mmp_pdma_int_handler()
237 /* lookup free phy channel as descending priority */
241 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); in lookup_phy()
242 struct mmp_pdma_phy *phy, *found = NULL; in lookup_phy() local
247 * ch 0 - 3, 16 - 19 <--> (0) in lookup_phy()
248 * ch 4 - 7, 20 - 23 <--> (1) in lookup_phy()
249 * ch 8 - 11, 24 - 27 <--> (2) in lookup_phy()
250 * ch 12 - 15, 28 - 31 <--> (3) in lookup_phy()
253 spin_lock_irqsave(&pdev->phy_lock, flags); in lookup_phy()
254 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { in lookup_phy()
255 for (i = 0; i < pdev->dma_channels; i++) { in lookup_phy()
258 phy = &pdev->phy[i]; in lookup_phy()
259 if (!phy->vchan) { in lookup_phy()
260 phy->vchan = pchan; in lookup_phy()
261 found = phy; in lookup_phy()
268 spin_unlock_irqrestore(&pdev->phy_lock, flags); in lookup_phy()
274 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); in mmp_pdma_free_phy()
278 if (!pchan->phy) in mmp_pdma_free_phy()
282 reg = DRCMR(pchan->drcmr); in mmp_pdma_free_phy()
283 writel(0, pchan->phy->base + reg); in mmp_pdma_free_phy()
285 spin_lock_irqsave(&pdev->phy_lock, flags); in mmp_pdma_free_phy()
286 pchan->phy->vchan = NULL; in mmp_pdma_free_phy()
287 pchan->phy = NULL; in mmp_pdma_free_phy()
288 spin_unlock_irqrestore(&pdev->phy_lock, flags); in mmp_pdma_free_phy()
292 * start_pending_queue - transfer any pending transactions
300 if (!chan->idle) { in start_pending_queue()
301 dev_dbg(chan->dev, "DMA controller still busy\n"); in start_pending_queue()
305 if (list_empty(&chan->chain_pending)) { in start_pending_queue()
306 /* chance to re-fetch phy channel with higher prio */ in start_pending_queue()
308 dev_dbg(chan->dev, "no pending list\n"); in start_pending_queue()
312 if (!chan->phy) { in start_pending_queue()
313 chan->phy = lookup_phy(chan); in start_pending_queue()
314 if (!chan->phy) { in start_pending_queue()
315 dev_dbg(chan->dev, "no free dma channel\n"); in start_pending_queue()
321 * pending -> running in start_pending_queue()
324 desc = list_first_entry(&chan->chain_pending, in start_pending_queue()
326 list_splice_tail_init(&chan->chain_pending, &chan->chain_running); in start_pending_queue()
332 set_desc(chan->phy, desc->async_tx.phys); in start_pending_queue()
333 enable_chan(chan->phy); in start_pending_queue()
334 chan->idle = false; in start_pending_queue()
338 /* desc->tx_list ==> pending list */
341 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); in mmp_pdma_tx_submit()
345 dma_cookie_t cookie = -EBUSY; in mmp_pdma_tx_submit()
347 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_tx_submit()
349 list_for_each_entry(child, &desc->tx_list, node) { in mmp_pdma_tx_submit()
350 cookie = dma_cookie_assign(&child->async_tx); in mmp_pdma_tx_submit()
353 /* softly link to pending list - desc->tx_list ==> pending list */ in mmp_pdma_tx_submit()
354 list_splice_tail_init(&desc->tx_list, &chan->chain_pending); in mmp_pdma_tx_submit()
356 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_tx_submit()
367 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); in mmp_pdma_alloc_descriptor()
369 dev_err(chan->dev, "out of memory for link descriptor\n"); in mmp_pdma_alloc_descriptor()
373 INIT_LIST_HEAD(&desc->tx_list); in mmp_pdma_alloc_descriptor()
374 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in mmp_pdma_alloc_descriptor()
376 desc->async_tx.tx_submit = mmp_pdma_tx_submit; in mmp_pdma_alloc_descriptor()
377 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor()
383 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
387 * Return - The number of allocated descriptors.
394 if (chan->desc_pool) in mmp_pdma_alloc_chan_resources()
397 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), in mmp_pdma_alloc_chan_resources()
398 chan->dev, in mmp_pdma_alloc_chan_resources()
402 if (!chan->desc_pool) { in mmp_pdma_alloc_chan_resources()
403 dev_err(chan->dev, "unable to allocate descriptor pool\n"); in mmp_pdma_alloc_chan_resources()
404 return -ENOMEM; in mmp_pdma_alloc_chan_resources()
408 chan->idle = true; in mmp_pdma_alloc_chan_resources()
409 chan->dev_addr = 0; in mmp_pdma_alloc_chan_resources()
419 list_del(&desc->node); in mmp_pdma_free_desc_list()
420 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in mmp_pdma_free_desc_list()
429 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_free_chan_resources()
430 mmp_pdma_free_desc_list(chan, &chan->chain_pending); in mmp_pdma_free_chan_resources()
431 mmp_pdma_free_desc_list(chan, &chan->chain_running); in mmp_pdma_free_chan_resources()
432 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_free_chan_resources()
434 dma_pool_destroy(chan->desc_pool); in mmp_pdma_free_chan_resources()
435 chan->desc_pool = NULL; in mmp_pdma_free_chan_resources()
436 chan->idle = true; in mmp_pdma_free_chan_resources()
437 chan->dev_addr = 0; in mmp_pdma_free_chan_resources()
458 chan->byte_align = false; in mmp_pdma_prep_memcpy()
460 if (!chan->dir) { in mmp_pdma_prep_memcpy()
461 chan->dir = DMA_MEM_TO_MEM; in mmp_pdma_prep_memcpy()
462 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; in mmp_pdma_prep_memcpy()
463 chan->dcmd |= DCMD_BURST32; in mmp_pdma_prep_memcpy()
470 dev_err(chan->dev, "no memory for desc\n"); in mmp_pdma_prep_memcpy()
476 chan->byte_align = true; in mmp_pdma_prep_memcpy()
478 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); in mmp_pdma_prep_memcpy()
479 new->desc.dsadr = dma_src; in mmp_pdma_prep_memcpy()
480 new->desc.dtadr = dma_dst; in mmp_pdma_prep_memcpy()
485 prev->desc.ddadr = new->async_tx.phys; in mmp_pdma_prep_memcpy()
487 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy()
488 async_tx_ack(&new->async_tx); in mmp_pdma_prep_memcpy()
491 len -= copy; in mmp_pdma_prep_memcpy()
493 if (chan->dir == DMA_MEM_TO_DEV) { in mmp_pdma_prep_memcpy()
495 } else if (chan->dir == DMA_DEV_TO_MEM) { in mmp_pdma_prep_memcpy()
497 } else if (chan->dir == DMA_MEM_TO_MEM) { in mmp_pdma_prep_memcpy()
503 list_add_tail(&new->node, &first->tx_list); in mmp_pdma_prep_memcpy()
506 first->async_tx.flags = flags; /* client is in control of this ack */ in mmp_pdma_prep_memcpy()
507 first->async_tx.cookie = -EBUSY; in mmp_pdma_prep_memcpy()
510 new->desc.ddadr = DDADR_STOP; in mmp_pdma_prep_memcpy()
511 new->desc.dcmd |= DCMD_ENDIRQEN; in mmp_pdma_prep_memcpy()
513 chan->cyclic_first = NULL; in mmp_pdma_prep_memcpy()
515 return &first->async_tx; in mmp_pdma_prep_memcpy()
519 mmp_pdma_free_desc_list(chan, &first->tx_list); in mmp_pdma_prep_memcpy()
538 chan->byte_align = false; in mmp_pdma_prep_slave_sg()
540 mmp_pdma_config_write(dchan, &chan->slave_config, dir); in mmp_pdma_prep_slave_sg()
549 chan->byte_align = true; in mmp_pdma_prep_slave_sg()
554 dev_err(chan->dev, "no memory for desc\n"); in mmp_pdma_prep_slave_sg()
558 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); in mmp_pdma_prep_slave_sg()
560 new->desc.dsadr = addr; in mmp_pdma_prep_slave_sg()
561 new->desc.dtadr = chan->dev_addr; in mmp_pdma_prep_slave_sg()
563 new->desc.dsadr = chan->dev_addr; in mmp_pdma_prep_slave_sg()
564 new->desc.dtadr = addr; in mmp_pdma_prep_slave_sg()
570 prev->desc.ddadr = new->async_tx.phys; in mmp_pdma_prep_slave_sg()
572 new->async_tx.cookie = 0; in mmp_pdma_prep_slave_sg()
573 async_tx_ack(&new->async_tx); in mmp_pdma_prep_slave_sg()
577 list_add_tail(&new->node, &first->tx_list); in mmp_pdma_prep_slave_sg()
581 avail -= len; in mmp_pdma_prep_slave_sg()
585 first->async_tx.cookie = -EBUSY; in mmp_pdma_prep_slave_sg()
586 first->async_tx.flags = flags; in mmp_pdma_prep_slave_sg()
589 new->desc.ddadr = DDADR_STOP; in mmp_pdma_prep_slave_sg()
590 new->desc.dcmd |= DCMD_ENDIRQEN; in mmp_pdma_prep_slave_sg()
592 chan->dir = dir; in mmp_pdma_prep_slave_sg()
593 chan->cyclic_first = NULL; in mmp_pdma_prep_slave_sg()
595 return &first->async_tx; in mmp_pdma_prep_slave_sg()
599 mmp_pdma_free_desc_list(chan, &first->tx_list); in mmp_pdma_prep_slave_sg()
624 mmp_pdma_config_write(dchan, &chan->slave_config, direction); in mmp_pdma_prep_dma_cyclic()
629 dma_dst = chan->dev_addr; in mmp_pdma_prep_dma_cyclic()
633 dma_src = chan->dev_addr; in mmp_pdma_prep_dma_cyclic()
636 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); in mmp_pdma_prep_dma_cyclic()
640 chan->dir = direction; in mmp_pdma_prep_dma_cyclic()
646 dev_err(chan->dev, "no memory for desc\n"); in mmp_pdma_prep_dma_cyclic()
650 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | in mmp_pdma_prep_dma_cyclic()
652 new->desc.dsadr = dma_src; in mmp_pdma_prep_dma_cyclic()
653 new->desc.dtadr = dma_dst; in mmp_pdma_prep_dma_cyclic()
658 prev->desc.ddadr = new->async_tx.phys; in mmp_pdma_prep_dma_cyclic()
660 new->async_tx.cookie = 0; in mmp_pdma_prep_dma_cyclic()
661 async_tx_ack(&new->async_tx); in mmp_pdma_prep_dma_cyclic()
664 len -= period_len; in mmp_pdma_prep_dma_cyclic()
666 if (chan->dir == DMA_MEM_TO_DEV) in mmp_pdma_prep_dma_cyclic()
672 list_add_tail(&new->node, &first->tx_list); in mmp_pdma_prep_dma_cyclic()
675 first->async_tx.flags = flags; /* client is in control of this ack */ in mmp_pdma_prep_dma_cyclic()
676 first->async_tx.cookie = -EBUSY; in mmp_pdma_prep_dma_cyclic()
679 new->desc.ddadr = first->async_tx.phys; in mmp_pdma_prep_dma_cyclic()
680 chan->cyclic_first = first; in mmp_pdma_prep_dma_cyclic()
682 return &first->async_tx; in mmp_pdma_prep_dma_cyclic()
686 mmp_pdma_free_desc_list(chan, &first->tx_list); in mmp_pdma_prep_dma_cyclic()
699 return -EINVAL; in mmp_pdma_config_write()
702 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; in mmp_pdma_config_write()
703 maxburst = cfg->src_maxburst; in mmp_pdma_config_write()
704 width = cfg->src_addr_width; in mmp_pdma_config_write()
705 addr = cfg->src_addr; in mmp_pdma_config_write()
707 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; in mmp_pdma_config_write()
708 maxburst = cfg->dst_maxburst; in mmp_pdma_config_write()
709 width = cfg->dst_addr_width; in mmp_pdma_config_write()
710 addr = cfg->dst_addr; in mmp_pdma_config_write()
714 chan->dcmd |= DCMD_WIDTH1; in mmp_pdma_config_write()
716 chan->dcmd |= DCMD_WIDTH2; in mmp_pdma_config_write()
718 chan->dcmd |= DCMD_WIDTH4; in mmp_pdma_config_write()
721 chan->dcmd |= DCMD_BURST8; in mmp_pdma_config_write()
723 chan->dcmd |= DCMD_BURST16; in mmp_pdma_config_write()
725 chan->dcmd |= DCMD_BURST32; in mmp_pdma_config_write()
727 chan->dir = direction; in mmp_pdma_config_write()
728 chan->dev_addr = addr; in mmp_pdma_config_write()
738 memcpy(&chan->slave_config, cfg, sizeof(*cfg)); in mmp_pdma_config()
748 return -EINVAL; in mmp_pdma_terminate_all()
750 disable_chan(chan->phy); in mmp_pdma_terminate_all()
752 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_terminate_all()
753 mmp_pdma_free_desc_list(chan, &chan->chain_pending); in mmp_pdma_terminate_all()
754 mmp_pdma_free_desc_list(chan, &chan->chain_running); in mmp_pdma_terminate_all()
755 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_terminate_all()
756 chan->idle = true; in mmp_pdma_terminate_all()
767 bool cyclic = chan->cyclic_first != NULL; in mmp_pdma_residue()
770 * If the channel does not have a phy pointer anymore, it has already in mmp_pdma_residue()
773 if (!chan->phy) in mmp_pdma_residue()
776 if (chan->dir == DMA_DEV_TO_MEM) in mmp_pdma_residue()
777 curr = readl(chan->phy->base + DTADR(chan->phy->idx)); in mmp_pdma_residue()
779 curr = readl(chan->phy->base + DSADR(chan->phy->idx)); in mmp_pdma_residue()
781 list_for_each_entry(sw, &chan->chain_running, node) { in mmp_pdma_residue()
784 if (chan->dir == DMA_DEV_TO_MEM) in mmp_pdma_residue()
785 start = sw->desc.dtadr; in mmp_pdma_residue()
787 start = sw->desc.dsadr; in mmp_pdma_residue()
789 len = sw->desc.dcmd & DCMD_LENGTH; in mmp_pdma_residue()
803 residue += end - curr; in mmp_pdma_residue()
820 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) in mmp_pdma_residue()
823 if (sw->async_tx.cookie == cookie) { in mmp_pdma_residue()
850 * mmp_pdma_issue_pending - Issue the DMA start command
858 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_issue_pending()
860 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_issue_pending()
876 if (chan->cyclic_first) { in dma_do_tasklet()
877 spin_lock_irqsave(&chan->desc_lock, flags); in dma_do_tasklet()
878 desc = chan->cyclic_first; in dma_do_tasklet()
879 dmaengine_desc_get_callback(&desc->async_tx, &cb); in dma_do_tasklet()
880 spin_unlock_irqrestore(&chan->desc_lock, flags); in dma_do_tasklet()
888 spin_lock_irqsave(&chan->desc_lock, flags); in dma_do_tasklet()
890 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { in dma_do_tasklet()
895 list_move(&desc->node, &chain_cleanup); in dma_do_tasklet()
902 if (desc->desc.dcmd & DCMD_ENDIRQEN) { in dma_do_tasklet()
903 dma_cookie_t cookie = desc->async_tx.cookie; in dma_do_tasklet()
904 dma_cookie_complete(&desc->async_tx); in dma_do_tasklet()
905 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); in dma_do_tasklet()
914 chan->idle = list_empty(&chan->chain_running); in dma_do_tasklet()
918 spin_unlock_irqrestore(&chan->desc_lock, flags); in dma_do_tasklet()
922 struct dma_async_tx_descriptor *txd = &desc->async_tx; in dma_do_tasklet()
925 list_del(&desc->node); in dma_do_tasklet()
930 dma_pool_free(chan->desc_pool, desc, txd->phys); in dma_do_tasklet()
937 struct mmp_pdma_phy *phy; in mmp_pdma_remove() local
940 if (op->dev.of_node) in mmp_pdma_remove()
941 of_dma_controller_free(op->dev.of_node); in mmp_pdma_remove()
943 for (i = 0; i < pdev->dma_channels; i++) { in mmp_pdma_remove()
948 if (irq_num != pdev->dma_channels) { in mmp_pdma_remove()
950 devm_free_irq(&op->dev, irq, pdev); in mmp_pdma_remove()
952 for (i = 0; i < pdev->dma_channels; i++) { in mmp_pdma_remove()
953 phy = &pdev->phy[i]; in mmp_pdma_remove()
955 devm_free_irq(&op->dev, irq, phy); in mmp_pdma_remove()
959 dma_async_device_unregister(&pdev->device); in mmp_pdma_remove()
964 struct mmp_pdma_phy *phy = &pdev->phy[idx]; in mmp_pdma_chan_init() local
968 chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); in mmp_pdma_chan_init()
970 return -ENOMEM; in mmp_pdma_chan_init()
972 phy->idx = idx; in mmp_pdma_chan_init()
973 phy->base = pdev->base; in mmp_pdma_chan_init()
976 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, in mmp_pdma_chan_init()
977 IRQF_SHARED, "pdma", phy); in mmp_pdma_chan_init()
979 dev_err(pdev->dev, "channel request irq fail!\n"); in mmp_pdma_chan_init()
984 spin_lock_init(&chan->desc_lock); in mmp_pdma_chan_init()
985 chan->dev = pdev->dev; in mmp_pdma_chan_init()
986 chan->chan.device = &pdev->device; in mmp_pdma_chan_init()
987 tasklet_setup(&chan->tasklet, dma_do_tasklet); in mmp_pdma_chan_init()
988 INIT_LIST_HEAD(&chan->chain_pending); in mmp_pdma_chan_init()
989 INIT_LIST_HEAD(&chan->chain_running); in mmp_pdma_chan_init()
992 list_add_tail(&chan->chan.device_node, &pdev->device.channels); in mmp_pdma_chan_init()
998 { .compatible = "marvell,pdma-1.0", },
1006 struct mmp_pdma_device *d = ofdma->of_dma_data; in mmp_pdma_dma_xlate()
1009 chan = dma_get_any_slave_channel(&d->device); in mmp_pdma_dma_xlate()
1013 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; in mmp_pdma_dma_xlate()
1021 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); in mmp_pdma_probe()
1028 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); in mmp_pdma_probe()
1030 return -ENOMEM; in mmp_pdma_probe()
1032 pdev->dev = &op->dev; in mmp_pdma_probe()
1034 spin_lock_init(&pdev->phy_lock); in mmp_pdma_probe()
1036 pdev->base = devm_platform_ioremap_resource(op, 0); in mmp_pdma_probe()
1037 if (IS_ERR(pdev->base)) in mmp_pdma_probe()
1038 return PTR_ERR(pdev->base); in mmp_pdma_probe()
1040 if (pdev->dev->of_node) { in mmp_pdma_probe()
1041 /* Parse new and deprecated dma-channels properties */ in mmp_pdma_probe()
1042 if (of_property_read_u32(pdev->dev->of_node, "dma-channels", in mmp_pdma_probe()
1044 of_property_read_u32(pdev->dev->of_node, "#dma-channels", in mmp_pdma_probe()
1046 } else if (pdata && pdata->dma_channels) { in mmp_pdma_probe()
1047 dma_channels = pdata->dma_channels; in mmp_pdma_probe()
1051 pdev->dma_channels = dma_channels; in mmp_pdma_probe()
1058 pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), in mmp_pdma_probe()
1060 if (pdev->phy == NULL) in mmp_pdma_probe()
1061 return -ENOMEM; in mmp_pdma_probe()
1063 INIT_LIST_HEAD(&pdev->device.channels); in mmp_pdma_probe()
1068 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, in mmp_pdma_probe()
1081 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); in mmp_pdma_probe()
1082 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); in mmp_pdma_probe()
1083 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); in mmp_pdma_probe()
1084 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); in mmp_pdma_probe()
1085 pdev->device.dev = &op->dev; in mmp_pdma_probe()
1086 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; in mmp_pdma_probe()
1087 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; in mmp_pdma_probe()
1088 pdev->device.device_tx_status = mmp_pdma_tx_status; in mmp_pdma_probe()
1089 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; in mmp_pdma_probe()
1090 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; in mmp_pdma_probe()
1091 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; in mmp_pdma_probe()
1092 pdev->device.device_issue_pending = mmp_pdma_issue_pending; in mmp_pdma_probe()
1093 pdev->device.device_config = mmp_pdma_config; in mmp_pdma_probe()
1094 pdev->device.device_terminate_all = mmp_pdma_terminate_all; in mmp_pdma_probe()
1095 pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; in mmp_pdma_probe()
1096 pdev->device.src_addr_widths = widths; in mmp_pdma_probe()
1097 pdev->device.dst_addr_widths = widths; in mmp_pdma_probe()
1098 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); in mmp_pdma_probe()
1099 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; in mmp_pdma_probe()
1101 if (pdev->dev->coherent_dma_mask) in mmp_pdma_probe()
1102 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); in mmp_pdma_probe()
1104 dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); in mmp_pdma_probe()
1106 ret = dma_async_device_register(&pdev->device); in mmp_pdma_probe()
1108 dev_err(pdev->device.dev, "unable to register\n"); in mmp_pdma_probe()
1112 if (op->dev.of_node) { in mmp_pdma_probe()
1113 /* Device-tree DMA controller registration */ in mmp_pdma_probe()
1114 ret = of_dma_controller_register(op->dev.of_node, in mmp_pdma_probe()
1117 dev_err(&op->dev, "of_dma_controller_register failed\n"); in mmp_pdma_probe()
1118 dma_async_device_unregister(&pdev->device); in mmp_pdma_probe()
1124 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); in mmp_pdma_probe()
1129 { "mmp-pdma", },
1135 .name = "mmp-pdma",