Lines Matching +full:clk +full:- +full:csr

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Applied Micro X-Gene SoC DMA engine Driver
13 #include <linux/clk.h>
15 #include <linux/dma-mapping.h>
27 /* X-Gene DMA ring csr registers and bit definations */
44 ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
46 ((m) &= (~BIT(31 - (v))))
77 /* X-Gene DMA device csr registers and bit definitions */
106 /* X-Gene SoC EFUSE csr register and bit defination */
110 /* X-Gene DMA Descriptor format */
127 /* X-Gene DMA descriptor empty s/w signature */
130 /* X-Gene DMA configurable parameters defines */
148 /* X-Gene DMA descriptor error codes */
161 /* X-Gene DMA error interrupt codes */
175 /* X-Gene DMA flyby operation code */
181 /* X-Gene DMA SW descriptor flags */
184 /* Define to dump X-Gene DMA descriptor */
195 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
197 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
246 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
248 * @pdma: X-Gene DMA device structure reference
252 * @name: name of X-Gene DMA channel
290 * struct xgene_dma - internal representation of an X-Gene DMA device
292 * @clk: reference to this device's clock
300 * @chan: reference to X-Gene DMA channels
304 struct clk *clk; member
348 val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW); in is_pq_enabled()
381 *len -= nbytes; in xgene_dma_set_src_buffer()
389 return &desc->m1; in xgene_dma_lookup_ext8()
391 return &desc->m0; in xgene_dma_lookup_ext8()
393 return &desc->m3; in xgene_dma_lookup_ext8()
395 return &desc->m2; in xgene_dma_lookup_ext8()
406 desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT); in xgene_dma_init_desc()
407 desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA << in xgene_dma_init_desc()
409 desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT); in xgene_dma_init_desc()
410 desc->m3 |= cpu_to_le64((u64)dst_ring_num << in xgene_dma_init_desc()
424 desc1 = &desc_sw->desc1; in xgene_dma_prep_xor_desc()
425 desc2 = &desc_sw->desc2; in xgene_dma_prep_xor_desc()
428 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); in xgene_dma_prep_xor_desc()
431 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); in xgene_dma_prep_xor_desc()
432 desc1->m3 |= cpu_to_le64(*dst); in xgene_dma_prep_xor_desc()
435 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); in xgene_dma_prep_xor_desc()
438 desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt)); in xgene_dma_prep_xor_desc()
443 xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : in xgene_dma_prep_xor_desc()
444 xgene_dma_lookup_ext8(desc2, i - 1), in xgene_dma_prep_xor_desc()
446 desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); in xgene_dma_prep_xor_desc()
454 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; in xgene_dma_prep_xor_desc()
464 return -EINVAL; in xgene_dma_tx_submit()
466 chan = to_dma_chan(tx->chan); in xgene_dma_tx_submit()
469 spin_lock_bh(&chan->lock); in xgene_dma_tx_submit()
474 list_splice_tail_init(&desc->tx_list, &chan->ld_pending); in xgene_dma_tx_submit()
476 spin_unlock_bh(&chan->lock); in xgene_dma_tx_submit()
484 list_del(&desc->node); in xgene_dma_clean_descriptor()
486 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); in xgene_dma_clean_descriptor()
495 desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys); in xgene_dma_alloc_descriptor()
501 INIT_LIST_HEAD(&desc->tx_list); in xgene_dma_alloc_descriptor()
502 desc->tx.phys = phys; in xgene_dma_alloc_descriptor()
503 desc->tx.tx_submit = xgene_dma_tx_submit; in xgene_dma_alloc_descriptor()
504 dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan); in xgene_dma_alloc_descriptor()
512 * xgene_dma_clean_completed_descriptor - free all descriptors which
514 * @chan: X-Gene DMA channel
523 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) { in xgene_dma_clean_completed_descriptor()
524 if (async_tx_test_ack(&desc->tx)) in xgene_dma_clean_completed_descriptor()
530 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
531 * @chan: X-Gene DMA channel
540 struct dma_async_tx_descriptor *tx = &desc->tx; in xgene_dma_run_tx_complete_actions()
549 if (tx->cookie == 0) in xgene_dma_run_tx_complete_actions()
563 * xgene_dma_clean_running_descriptor - move the completed descriptor from
565 * @chan: X-Gene DMA channel
575 list_del(&desc->node); in xgene_dma_clean_running_descriptor()
581 if (!async_tx_test_ack(&desc->tx)) { in xgene_dma_clean_running_descriptor()
586 list_add_tail(&desc->node, &chan->ld_completed); in xgene_dma_clean_running_descriptor()
591 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); in xgene_dma_clean_running_descriptor()
597 struct xgene_dma_ring *ring = &chan->tx_ring; in xgene_chan_xfer_request()
601 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request()
607 if (++ring->head == ring->slots) in xgene_chan_xfer_request()
608 ring->head = 0; in xgene_chan_xfer_request()
611 memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw)); in xgene_chan_xfer_request()
617 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) { in xgene_chan_xfer_request()
618 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request()
620 if (++ring->head == ring->slots) in xgene_chan_xfer_request()
621 ring->head = 0; in xgene_chan_xfer_request()
623 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); in xgene_chan_xfer_request()
627 chan->pending += ((desc_sw->flags & in xgene_chan_xfer_request()
631 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? in xgene_chan_xfer_request()
632 2 : 1, ring->cmd); in xgene_chan_xfer_request()
636 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
637 * @chan : X-Gene DMA channel
639 * LOCKING: must hold chan->lock
649 if (list_empty(&chan->ld_pending)) { in xgene_chan_xfer_ld_pending()
658 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) { in xgene_chan_xfer_ld_pending()
665 if (chan->pending >= chan->max_outstanding) in xgene_chan_xfer_ld_pending()
674 list_move_tail(&desc_sw->node, &chan->ld_running); in xgene_chan_xfer_ld_pending()
679 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
681 * @chan: X-Gene DMA channel
689 struct xgene_dma_ring *ring = &chan->rx_ring; in xgene_dma_cleanup_descriptors()
697 spin_lock(&chan->lock); in xgene_dma_cleanup_descriptors()
703 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { in xgene_dma_cleanup_descriptors()
705 desc_hw = &ring->desc_hw[ring->head]; in xgene_dma_cleanup_descriptors()
708 if (unlikely(le64_to_cpu(desc_hw->m0) == in xgene_dma_cleanup_descriptors()
712 if (++ring->head == ring->slots) in xgene_dma_cleanup_descriptors()
713 ring->head = 0; in xgene_dma_cleanup_descriptors()
718 desc_hw->m0)), in xgene_dma_cleanup_descriptors()
720 desc_hw->m0))); in xgene_dma_cleanup_descriptors()
728 XGENE_DMA_DESC_DUMP(&desc_sw->desc1, in xgene_dma_cleanup_descriptors()
729 "X-Gene DMA TX DESC1: "); in xgene_dma_cleanup_descriptors()
731 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) in xgene_dma_cleanup_descriptors()
732 XGENE_DMA_DESC_DUMP(&desc_sw->desc2, in xgene_dma_cleanup_descriptors()
733 "X-Gene DMA TX DESC2: "); in xgene_dma_cleanup_descriptors()
736 "X-Gene DMA RX ERR DESC: "); in xgene_dma_cleanup_descriptors()
740 iowrite32(-1, ring->cmd); in xgene_dma_cleanup_descriptors()
743 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); in xgene_dma_cleanup_descriptors()
749 chan->pending -= ((desc_sw->flags & in xgene_dma_cleanup_descriptors()
756 list_move_tail(&desc_sw->node, &ld_completed); in xgene_dma_cleanup_descriptors()
766 spin_unlock(&chan->lock); in xgene_dma_cleanup_descriptors()
780 if (chan->desc_pool) in xgene_dma_alloc_chan_resources()
783 chan->desc_pool = dma_pool_create(chan->name, chan->dev, in xgene_dma_alloc_chan_resources()
786 if (!chan->desc_pool) { in xgene_dma_alloc_chan_resources()
788 return -ENOMEM; in xgene_dma_alloc_chan_resources()
797 * xgene_dma_free_desc_list - Free all descriptors in a queue
798 * @chan: X-Gene DMA channel
801 * LOCKING: must hold chan->lock
818 if (!chan->desc_pool) in xgene_dma_free_chan_resources()
824 spin_lock_bh(&chan->lock); in xgene_dma_free_chan_resources()
827 xgene_dma_free_desc_list(chan, &chan->ld_pending); in xgene_dma_free_chan_resources()
828 xgene_dma_free_desc_list(chan, &chan->ld_running); in xgene_dma_free_chan_resources()
829 xgene_dma_free_desc_list(chan, &chan->ld_completed); in xgene_dma_free_chan_resources()
831 spin_unlock_bh(&chan->lock); in xgene_dma_free_chan_resources()
834 dma_pool_destroy(chan->desc_pool); in xgene_dma_free_chan_resources()
835 chan->desc_pool = NULL; in xgene_dma_free_chan_resources()
865 new->tx.cookie = 0; in xgene_dma_prep_xor()
866 async_tx_ack(&new->tx); in xgene_dma_prep_xor()
869 list_add_tail(&new->node, &first->tx_list); in xgene_dma_prep_xor()
872 new->tx.flags = flags; /* client is in control of this ack */ in xgene_dma_prep_xor()
873 new->tx.cookie = -EBUSY; in xgene_dma_prep_xor()
874 list_splice(&first->tx_list, &new->tx_list); in xgene_dma_prep_xor()
876 return &new->tx; in xgene_dma_prep_xor()
882 xgene_dma_free_desc_list(chan, &first->tx_list); in xgene_dma_prep_xor()
923 new->tx.cookie = 0; in xgene_dma_prep_pq()
924 async_tx_ack(&new->tx); in xgene_dma_prep_pq()
927 list_add_tail(&new->node, &first->tx_list); in xgene_dma_prep_pq()
949 new->tx.flags = flags; /* client is in control of this ack */ in xgene_dma_prep_pq()
950 new->tx.cookie = -EBUSY; in xgene_dma_prep_pq()
951 list_splice(&first->tx_list, &new->tx_list); in xgene_dma_prep_pq()
953 return &new->tx; in xgene_dma_prep_pq()
959 xgene_dma_free_desc_list(chan, &first->tx_list); in xgene_dma_prep_pq()
967 spin_lock_bh(&chan->lock); in xgene_dma_issue_pending()
969 spin_unlock_bh(&chan->lock); in xgene_dma_issue_pending()
986 /* Re-enable DMA channel IRQ */ in xgene_dma_tasklet_cb()
987 enable_irq(chan->rx_irq); in xgene_dma_tasklet_cb()
1000 disable_irq_nosync(chan->rx_irq); in xgene_dma_chan_ring_isr()
1007 tasklet_schedule(&chan->tasklet); in xgene_dma_chan_ring_isr()
1018 val = ioread32(pdma->csr_dma + XGENE_DMA_INT); in xgene_dma_err_isr()
1021 iowrite32(val, pdma->csr_dma + XGENE_DMA_INT); in xgene_dma_err_isr()
1026 dev_err(pdma->dev, in xgene_dma_err_isr()
1036 iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE); in xgene_dma_wr_ring_state()
1039 iowrite32(ring->state[i], ring->pdma->csr_ring + in xgene_dma_wr_ring_state()
1045 memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG); in xgene_dma_clr_ring_state()
1051 void *ring_cfg = ring->state; in xgene_dma_setup_ring()
1052 u64 addr = ring->desc_paddr; in xgene_dma_setup_ring()
1055 ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; in xgene_dma_setup_ring()
1063 if (ring->owner == XGENE_DMA_RING_OWNER_DMA) { in xgene_dma_setup_ring()
1076 XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize); in xgene_dma_setup_ring()
1082 iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id), in xgene_dma_setup_ring()
1083 ring->pdma->csr_ring + XGENE_DMA_RING_ID); in xgene_dma_setup_ring()
1086 iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num), in xgene_dma_setup_ring()
1087 ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); in xgene_dma_setup_ring()
1089 if (ring->owner != XGENE_DMA_RING_OWNER_CPU) in xgene_dma_setup_ring()
1093 for (i = 0; i < ring->slots; i++) { in xgene_dma_setup_ring()
1096 desc = &ring->desc_hw[i]; in xgene_dma_setup_ring()
1097 desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); in xgene_dma_setup_ring()
1101 val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); in xgene_dma_setup_ring()
1102 XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num); in xgene_dma_setup_ring()
1103 iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); in xgene_dma_setup_ring()
1110 if (ring->owner == XGENE_DMA_RING_OWNER_CPU) { in xgene_dma_clear_ring()
1112 val = ioread32(ring->pdma->csr_ring + in xgene_dma_clear_ring()
1114 XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num); in xgene_dma_clear_ring()
1115 iowrite32(val, ring->pdma->csr_ring + in xgene_dma_clear_ring()
1120 ring_id = XGENE_DMA_RING_ID_SETUP(ring->id); in xgene_dma_clear_ring()
1121 iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID); in xgene_dma_clear_ring()
1123 iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); in xgene_dma_clear_ring()
1129 ring->cmd_base = ring->pdma->csr_ring_cmd + in xgene_dma_set_ring_cmd()
1130 XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num - in xgene_dma_set_ring_cmd()
1133 ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET; in xgene_dma_set_ring_cmd()
1159 return -EINVAL; in xgene_dma_get_ring_size()
1170 /* De-allocate DMA ring descriptor */ in xgene_dma_delete_ring_one()
1171 if (ring->desc_vaddr) { in xgene_dma_delete_ring_one()
1172 dma_free_coherent(ring->pdma->dev, ring->size, in xgene_dma_delete_ring_one()
1173 ring->desc_vaddr, ring->desc_paddr); in xgene_dma_delete_ring_one()
1174 ring->desc_vaddr = NULL; in xgene_dma_delete_ring_one()
1180 xgene_dma_delete_ring_one(&chan->rx_ring); in xgene_dma_delete_chan_rings()
1181 xgene_dma_delete_ring_one(&chan->tx_ring); in xgene_dma_delete_chan_rings()
1191 ring->pdma = chan->pdma; in xgene_dma_create_ring_one()
1192 ring->cfgsize = cfgsize; in xgene_dma_create_ring_one()
1193 ring->num = chan->pdma->ring_num++; in xgene_dma_create_ring_one()
1194 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); in xgene_dma_create_ring_one()
1199 ring->size = ret; in xgene_dma_create_ring_one()
1202 ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, in xgene_dma_create_ring_one()
1203 &ring->desc_paddr, GFP_KERNEL); in xgene_dma_create_ring_one()
1204 if (!ring->desc_vaddr) { in xgene_dma_create_ring_one()
1206 return -ENOMEM; in xgene_dma_create_ring_one()
1218 struct xgene_dma_ring *rx_ring = &chan->rx_ring; in xgene_dma_create_chan_rings()
1219 struct xgene_dma_ring *tx_ring = &chan->tx_ring; in xgene_dma_create_chan_rings()
1223 rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; in xgene_dma_create_chan_rings()
1224 rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; in xgene_dma_create_chan_rings()
1232 rx_ring->id, rx_ring->num, rx_ring->desc_vaddr); in xgene_dma_create_chan_rings()
1235 tx_ring->owner = XGENE_DMA_RING_OWNER_DMA; in xgene_dma_create_chan_rings()
1236 tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id; in xgene_dma_create_chan_rings()
1245 tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num); in xgene_dma_create_chan_rings()
1249 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); in xgene_dma_create_chan_rings()
1252 chan->max_outstanding = tx_ring->slots; in xgene_dma_create_chan_rings()
1262 ret = xgene_dma_create_chan_rings(&pdma->chan[i]); in xgene_dma_init_rings()
1265 xgene_dma_delete_chan_rings(&pdma->chan[j]); in xgene_dma_init_rings()
1278 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_enable()
1281 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_enable()
1288 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_disable()
1290 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_disable()
1300 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); in xgene_dma_mask_interrupts()
1302 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); in xgene_dma_mask_interrupts()
1304 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); in xgene_dma_mask_interrupts()
1306 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); in xgene_dma_mask_interrupts()
1308 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); in xgene_dma_mask_interrupts()
1311 iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK); in xgene_dma_mask_interrupts()
1321 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); in xgene_dma_unmask_interrupts()
1323 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); in xgene_dma_unmask_interrupts()
1325 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); in xgene_dma_unmask_interrupts()
1327 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); in xgene_dma_unmask_interrupts()
1329 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); in xgene_dma_unmask_interrupts()
1333 pdma->csr_dma + XGENE_DMA_INT_MASK); in xgene_dma_unmask_interrupts()
1342 pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC); in xgene_dma_init_hw()
1347 pdma->csr_dma + XGENE_DMA_RAID6_CONT); in xgene_dma_init_hw()
1349 dev_info(pdma->dev, "PQ is disabled in HW\n"); in xgene_dma_init_hw()
1355 val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR); in xgene_dma_init_hw()
1358 dev_info(pdma->dev, in xgene_dma_init_hw()
1359 "X-Gene DMA v%d.%02d.%02d driver registered %d channels", in xgene_dma_init_hw()
1366 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) && in xgene_dma_init_ring_mngr()
1367 (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST))) in xgene_dma_init_ring_mngr()
1370 iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN); in xgene_dma_init_ring_mngr()
1371 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST); in xgene_dma_init_ring_mngr()
1374 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); in xgene_dma_init_ring_mngr()
1377 ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); in xgene_dma_init_ring_mngr()
1382 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY) in xgene_dma_init_ring_mngr()
1384 dev_err(pdma->dev, in xgene_dma_init_ring_mngr()
1386 return -ENODEV; in xgene_dma_init_ring_mngr()
1391 pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1); in xgene_dma_init_ring_mngr()
1393 pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1); in xgene_dma_init_ring_mngr()
1395 pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS); in xgene_dma_init_ring_mngr()
1399 pdma->csr_ring + XGENE_DMA_RING_CONFIG); in xgene_dma_init_ring_mngr()
1413 iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); in xgene_dma_init_mem()
1416 ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); in xgene_dma_init_mem()
1421 if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY) in xgene_dma_init_mem()
1423 dev_err(pdma->dev, in xgene_dma_init_mem()
1425 return -ENODEV; in xgene_dma_init_mem()
1437 ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr, in xgene_dma_request_irqs()
1440 dev_err(pdma->dev, in xgene_dma_request_irqs()
1441 "Failed to register error IRQ %d\n", pdma->err_irq); in xgene_dma_request_irqs()
1447 chan = &pdma->chan[i]; in xgene_dma_request_irqs()
1448 irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); in xgene_dma_request_irqs()
1449 ret = devm_request_irq(chan->dev, chan->rx_irq, in xgene_dma_request_irqs()
1451 0, chan->name, chan); in xgene_dma_request_irqs()
1454 chan->rx_irq); in xgene_dma_request_irqs()
1455 devm_free_irq(pdma->dev, pdma->err_irq, pdma); in xgene_dma_request_irqs()
1458 chan = &pdma->chan[i]; in xgene_dma_request_irqs()
1459 irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); in xgene_dma_request_irqs()
1460 devm_free_irq(chan->dev, chan->rx_irq, chan); in xgene_dma_request_irqs()
1476 devm_free_irq(pdma->dev, pdma->err_irq, pdma); in xgene_dma_free_irqs()
1479 chan = &pdma->chan[i]; in xgene_dma_free_irqs()
1480 irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); in xgene_dma_free_irqs()
1481 devm_free_irq(chan->dev, chan->rx_irq, chan); in xgene_dma_free_irqs()
1489 dma_cap_zero(dma_dev->cap_mask); in xgene_dma_set_caps()
1493 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR in xgene_dma_set_caps()
1502 if ((chan->id == XGENE_DMA_PQ_CHANNEL) && in xgene_dma_set_caps()
1503 is_pq_enabled(chan->pdma)) { in xgene_dma_set_caps()
1504 dma_cap_set(DMA_PQ, dma_dev->cap_mask); in xgene_dma_set_caps()
1505 dma_cap_set(DMA_XOR, dma_dev->cap_mask); in xgene_dma_set_caps()
1506 } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) && in xgene_dma_set_caps()
1507 !is_pq_enabled(chan->pdma)) { in xgene_dma_set_caps()
1508 dma_cap_set(DMA_XOR, dma_dev->cap_mask); in xgene_dma_set_caps()
1512 dma_dev->dev = chan->dev; in xgene_dma_set_caps()
1513 dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources; in xgene_dma_set_caps()
1514 dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; in xgene_dma_set_caps()
1515 dma_dev->device_issue_pending = xgene_dma_issue_pending; in xgene_dma_set_caps()
1516 dma_dev->device_tx_status = xgene_dma_tx_status; in xgene_dma_set_caps()
1518 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { in xgene_dma_set_caps()
1519 dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; in xgene_dma_set_caps()
1520 dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; in xgene_dma_set_caps()
1521 dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES; in xgene_dma_set_caps()
1524 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { in xgene_dma_set_caps()
1525 dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; in xgene_dma_set_caps()
1526 dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; in xgene_dma_set_caps()
1527 dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES; in xgene_dma_set_caps()
1533 struct xgene_dma_chan *chan = &pdma->chan[id]; in xgene_dma_async_register()
1534 struct dma_device *dma_dev = &pdma->dma_dev[id]; in xgene_dma_async_register()
1537 chan->dma_chan.device = dma_dev; in xgene_dma_async_register()
1539 spin_lock_init(&chan->lock); in xgene_dma_async_register()
1540 INIT_LIST_HEAD(&chan->ld_pending); in xgene_dma_async_register()
1541 INIT_LIST_HEAD(&chan->ld_running); in xgene_dma_async_register()
1542 INIT_LIST_HEAD(&chan->ld_completed); in xgene_dma_async_register()
1543 tasklet_setup(&chan->tasklet, xgene_dma_tasklet_cb); in xgene_dma_async_register()
1545 chan->pending = 0; in xgene_dma_async_register()
1546 chan->desc_pool = NULL; in xgene_dma_async_register()
1547 dma_cookie_init(&chan->dma_chan); in xgene_dma_async_register()
1553 INIT_LIST_HEAD(&dma_dev->channels); in xgene_dma_async_register()
1554 list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels); in xgene_dma_async_register()
1560 tasklet_kill(&chan->tasklet); in xgene_dma_async_register()
1566 dev_info(pdma->dev, in xgene_dma_async_register()
1567 "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan), in xgene_dma_async_register()
1568 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", in xgene_dma_async_register()
1569 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); in xgene_dma_async_register()
1582 dma_async_device_unregister(&pdma->dma_dev[j]); in xgene_dma_init_async()
1583 tasklet_kill(&pdma->chan[j].tasklet); in xgene_dma_init_async()
1598 dma_async_device_unregister(&pdma->dma_dev[i]); in xgene_dma_async_unregister()
1606 pdma->ring_num = XGENE_DMA_RING_NUM; in xgene_dma_init_channels()
1609 chan = &pdma->chan[i]; in xgene_dma_init_channels()
1610 chan->dev = pdma->dev; in xgene_dma_init_channels()
1611 chan->pdma = pdma; in xgene_dma_init_channels()
1612 chan->id = i; in xgene_dma_init_channels()
1613 snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id); in xgene_dma_init_channels()
1623 /* Get DMA csr region */ in xgene_dma_get_resources()
1626 dev_err(&pdev->dev, "Failed to get csr region\n"); in xgene_dma_get_resources()
1627 return -ENXIO; in xgene_dma_get_resources()
1630 pdma->csr_dma = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1632 if (!pdma->csr_dma) { in xgene_dma_get_resources()
1633 dev_err(&pdev->dev, "Failed to ioremap csr region"); in xgene_dma_get_resources()
1634 return -ENOMEM; in xgene_dma_get_resources()
1637 /* Get DMA ring csr region */ in xgene_dma_get_resources()
1640 dev_err(&pdev->dev, "Failed to get ring csr region\n"); in xgene_dma_get_resources()
1641 return -ENXIO; in xgene_dma_get_resources()
1644 pdma->csr_ring = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1646 if (!pdma->csr_ring) { in xgene_dma_get_resources()
1647 dev_err(&pdev->dev, "Failed to ioremap ring csr region"); in xgene_dma_get_resources()
1648 return -ENOMEM; in xgene_dma_get_resources()
1651 /* Get DMA ring cmd csr region */ in xgene_dma_get_resources()
1654 dev_err(&pdev->dev, "Failed to get ring cmd csr region\n"); in xgene_dma_get_resources()
1655 return -ENXIO; in xgene_dma_get_resources()
1658 pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1660 if (!pdma->csr_ring_cmd) { in xgene_dma_get_resources()
1661 dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region"); in xgene_dma_get_resources()
1662 return -ENOMEM; in xgene_dma_get_resources()
1665 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; in xgene_dma_get_resources()
1667 /* Get efuse csr region */ in xgene_dma_get_resources()
1670 dev_err(&pdev->dev, "Failed to get efuse csr region\n"); in xgene_dma_get_resources()
1671 return -ENXIO; in xgene_dma_get_resources()
1674 pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1676 if (!pdma->csr_efuse) { in xgene_dma_get_resources()
1677 dev_err(&pdev->dev, "Failed to ioremap efuse csr region"); in xgene_dma_get_resources()
1678 return -ENOMEM; in xgene_dma_get_resources()
1684 return -ENXIO; in xgene_dma_get_resources()
1686 pdma->err_irq = irq; in xgene_dma_get_resources()
1692 return -ENXIO; in xgene_dma_get_resources()
1694 pdma->chan[i - 1].rx_irq = irq; in xgene_dma_get_resources()
1705 pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL); in xgene_dma_probe()
1707 return -ENOMEM; in xgene_dma_probe()
1709 pdma->dev = &pdev->dev; in xgene_dma_probe()
1716 pdma->clk = devm_clk_get(&pdev->dev, NULL); in xgene_dma_probe()
1717 if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) { in xgene_dma_probe()
1718 dev_err(&pdev->dev, "Failed to get clk\n"); in xgene_dma_probe()
1719 return PTR_ERR(pdma->clk); in xgene_dma_probe()
1722 /* Enable clk before accessing registers */ in xgene_dma_probe()
1723 if (!IS_ERR(pdma->clk)) { in xgene_dma_probe()
1724 ret = clk_prepare_enable(pdma->clk); in xgene_dma_probe()
1726 dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); in xgene_dma_probe()
1736 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42)); in xgene_dma_probe()
1738 dev_err(&pdev->dev, "No usable DMA configuration\n"); in xgene_dma_probe()
1769 xgene_dma_delete_chan_rings(&pdma->chan[i]); in xgene_dma_probe()
1773 if (!IS_ERR(pdma->clk)) in xgene_dma_probe()
1774 clk_disable_unprepare(pdma->clk); in xgene_dma_probe()
1793 chan = &pdma->chan[i]; in xgene_dma_remove()
1794 tasklet_kill(&chan->tasklet); in xgene_dma_remove()
1798 if (!IS_ERR(pdma->clk)) in xgene_dma_remove()
1799 clk_disable_unprepare(pdma->clk); in xgene_dma_remove()
1811 {.compatible = "apm,xgene-storm-dma",},
1820 .name = "X-Gene-DMA",
1828 MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");