Lines Matching +full:num +full:- +full:tx +full:- +full:queues

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for AMD Queue-based DMA Subsystem
5 * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
12 #include <linux/dma-map-ops.h>
19 #define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
20 #define QDMA_REG_OFF(d, r) ((d)->roffs[r].off)
43 idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx; in qdma_get_intr_ring_idx()
44 qdev->qintr_ring_idx %= qdev->qintr_ring_num; in qdma_get_intr_ring_idx()
52 const struct qdma_reg_field *f = &qdev->rfields[field]; in qdma_get_field()
56 low_pos = f->lsb / BITS_PER_TYPE(*data); in qdma_get_field()
57 hi_pos = f->msb / BITS_PER_TYPE(*data); in qdma_get_field()
60 low_bit = f->lsb % BITS_PER_TYPE(*data); in qdma_get_field()
61 hi_bit = f->msb % BITS_PER_TYPE(*data); in qdma_get_field()
65 low_bit = f->lsb % BITS_PER_TYPE(*data); in qdma_get_field()
66 hi_bit = low_bit + (f->msb - f->lsb); in qdma_get_field()
72 hi_bit = f->msb % BITS_PER_TYPE(*data); in qdma_get_field()
75 low_bit = f->msb - f->lsb - hi_bit; in qdma_get_field()
77 low_bit -= 32; in qdma_get_field()
78 value |= (u64)data[hi_pos - 1] << low_bit; in qdma_get_field()
79 mask = GENMASK(31, 32 - low_bit); in qdma_get_field()
80 value |= (data[hi_pos - 2] & mask) >> low_bit; in qdma_get_field()
89 const struct qdma_reg_field *f = &qdev->rfields[field]; in qdma_set_field()
92 low_pos = f->lsb / BITS_PER_TYPE(*data); in qdma_set_field()
93 hi_pos = f->msb / BITS_PER_TYPE(*data); in qdma_set_field()
94 low_bit = f->lsb % BITS_PER_TYPE(*data); in qdma_set_field()
98 data[low_pos++] |= (u32)(value >> (32 - low_bit)); in qdma_set_field()
100 data[low_pos] |= (u32)(value >> (64 - low_bit)); in qdma_set_field()
106 const struct qdma_reg *r = &qdev->roffs[reg]; in qdma_reg_write()
109 if (r->count > 1) in qdma_reg_write()
110 ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count); in qdma_reg_write()
112 ret = regmap_write(qdev->regmap, r->off, *data); in qdma_reg_write()
120 const struct qdma_reg *r = &qdev->roffs[reg]; in qdma_reg_read()
123 if (r->count > 1) in qdma_reg_read()
124 ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count); in qdma_reg_read()
126 ret = regmap_read(qdev->regmap, r->off, data); in qdma_reg_read()
146 ret = regmap_read_poll_timeout(qdev->regmap, in qdma_context_cmd_execute()
185 qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base); in qdma_prep_sw_desc_context()
186 qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec); in qdma_prep_sw_desc_context()
187 qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid); in qdma_prep_sw_desc_context()
207 qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base); in qdma_prep_intr_context()
208 qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec); in qdma_prep_intr_context()
209 qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size); in qdma_prep_intr_context()
210 qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid); in qdma_prep_intr_context()
211 qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color); in qdma_prep_intr_context()
212 qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid); in qdma_prep_intr_context()
220 qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase); in qdma_prep_fmap_context()
221 qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax); in qdma_prep_fmap_context()
237 mutex_lock(&qdev->ctxt_lock); in qdma_prog_context()
255 mutex_unlock(&qdev->ctxt_lock); in qdma_prog_context()
278 return -EBUSY; in qdma_check_queue_status()
294 struct qdma_device *qdev = queue->qdev; in qdma_clear_queue_context()
296 int ret, num, i; in qdma_clear_queue_context() local
298 if (queue->dir == DMA_MEM_TO_DEV) { in qdma_clear_queue_context()
300 num = ARRAY_SIZE(h2c_types); in qdma_clear_queue_context()
303 num = ARRAY_SIZE(c2h_types); in qdma_clear_queue_context()
305 for (i = 0; i < num; i++) { in qdma_clear_queue_context()
307 queue->qid, NULL); in qdma_clear_queue_context()
324 qdev->fid, NULL); in qdma_setup_fmap_context()
331 fmap.qmax = qdev->chan_num * 2; in qdma_setup_fmap_context()
334 qdev->fid, ctxt); in qdma_setup_fmap_context()
364 * Enable or disable memory-mapped DMA engines
379 struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev); in qdma_get_hw_info()
388 if (pdata->max_mm_channels * 2 > value) { in qdma_get_hw_info()
389 qdma_err(qdev, "not enough hw queues %d", value); in qdma_get_hw_info()
390 return -EINVAL; in qdma_get_hw_info()
392 qdev->chan_num = pdata->max_mm_channels; in qdma_get_hw_info()
394 ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID); in qdma_get_hw_info()
399 qdev->chan_num, qdev->fid); in qdma_get_hw_info()
406 struct qdma_device *qdev = queue->qdev; in qdma_update_pidx()
408 return regmap_write(qdev->regmap, queue->pidx_reg, in qdma_update_pidx()
415 struct qdma_device *qdev = queue->qdev; in qdma_update_cidx()
417 return regmap_write(qdev->regmap, queue->cidx_reg, in qdma_update_cidx()
422 * qdma_free_vdesc - Free descriptor
435 struct qdma_queue *q, **queues; in qdma_alloc_queues() local
440 queues = &qdev->h2c_queues; in qdma_alloc_queues()
443 queues = &qdev->c2h_queues; in qdma_alloc_queues()
447 *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q), in qdma_alloc_queues()
449 if (!*queues) in qdma_alloc_queues()
450 return -ENOMEM; in qdma_alloc_queues()
452 for (i = 0; i < qdev->chan_num; i++) { in qdma_alloc_queues()
457 q = &(*queues)[i]; in qdma_alloc_queues()
458 q->ring_size = QDMA_DEFAULT_RING_SIZE; in qdma_alloc_queues()
459 q->idx_mask = q->ring_size - 2; in qdma_alloc_queues()
460 q->qdev = qdev; in qdma_alloc_queues()
461 q->dir = dir; in qdma_alloc_queues()
462 q->qid = i; in qdma_alloc_queues()
463 q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE; in qdma_alloc_queues()
464 q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) + in qdma_alloc_queues()
466 q->vchan.desc_free = qdma_free_vdesc; in qdma_alloc_queues()
467 vchan_init(&q->vchan, &qdev->dma_dev); in qdma_alloc_queues()
478 ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value); in qdma_device_verify()
485 return -ENODEV; in qdma_device_verify()
487 qdev->rfields = qdma_regfs_default; in qdma_device_verify()
488 qdev->roffs = qdma_regos_default; in qdma_device_verify()
495 struct device *dev = &qdev->pdev->dev; in qdma_device_setup()
500 dev = dev->parent; in qdma_device_setup()
503 return -EINVAL; in qdma_device_setup()
505 set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev)); in qdma_device_setup()
521 /* Enable memory-mapped DMA engine in both directions */ in qdma_device_setup()
530 qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret); in qdma_device_setup()
536 qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret); in qdma_device_setup()
544 * qdma_free_queue_resources() - Free queue resources
550 struct qdma_device *qdev = queue->qdev; in qdma_free_queue_resources()
551 struct device *dev = qdev->dma_dev.dev; in qdma_free_queue_resources()
554 vchan_free_chan_resources(&queue->vchan); in qdma_free_queue_resources()
555 dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE, in qdma_free_queue_resources()
556 queue->desc_base, queue->dma_desc_base); in qdma_free_queue_resources()
560 * qdma_alloc_queue_resources() - Allocate queue resources
566 struct qdma_device *qdev = queue->qdev; in qdma_alloc_queue_resources()
575 size = queue->ring_size * QDMA_MM_DESC_SIZE; in qdma_alloc_queue_resources()
576 queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size, in qdma_alloc_queue_resources()
577 &queue->dma_desc_base, in qdma_alloc_queue_resources()
579 if (!queue->desc_base) { in qdma_alloc_queue_resources()
581 return -ENOMEM; in qdma_alloc_queue_resources()
586 desc.desc_base = queue->dma_desc_base; in qdma_alloc_queue_resources()
587 ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid); in qdma_alloc_queue_resources()
590 chan->name); in qdma_alloc_queue_resources()
591 dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base, in qdma_alloc_queue_resources()
592 queue->dma_desc_base); in qdma_alloc_queue_resources()
596 queue->pidx = 0; in qdma_alloc_queue_resources()
597 queue->cidx = 0; in qdma_alloc_queue_resources()
607 return info->dir == queue->dir; in qdma_filter_fn()
612 struct qdma_device *qdev = queue->qdev; in qdma_xfer_start()
615 if (!vchan_next_desc(&queue->vchan)) in qdma_xfer_start()
619 queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid); in qdma_xfer_start()
621 ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx); in qdma_xfer_start()
624 queue->pidx, CHAN_STR(queue), queue->qid); in qdma_xfer_start()
635 spin_lock_irqsave(&queue->vchan.lock, flags); in qdma_issue_pending()
636 if (vchan_issue_pending(&queue->vchan)) { in qdma_issue_pending()
637 if (queue->submitted_vdesc) { in qdma_issue_pending()
638 queue->issued_vdesc = queue->submitted_vdesc; in qdma_issue_pending()
639 queue->submitted_vdesc = NULL; in qdma_issue_pending()
644 spin_unlock_irqrestore(&queue->vchan.lock, flags); in qdma_issue_pending()
651 if (((q->pidx + 1) & q->idx_mask) == q->cidx) in qdma_get_desc()
654 desc = q->desc_base + q->pidx; in qdma_get_desc()
655 q->pidx = (q->pidx + 1) & q->idx_mask; in qdma_get_desc()
669 if (!vdesc->sg_len) in qdma_hw_enqueue()
672 if (q->dir == DMA_MEM_TO_DEV) { in qdma_hw_enqueue()
673 dst = &vdesc->dev_addr; in qdma_hw_enqueue()
677 src = &vdesc->dev_addr; in qdma_hw_enqueue()
680 for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) { in qdma_hw_enqueue()
681 addr = sg_dma_address(sg) + vdesc->sg_off; in qdma_hw_enqueue()
682 rest = sg_dma_len(sg) - vdesc->sg_off; in qdma_hw_enqueue()
687 ret = -EBUSY; in qdma_hw_enqueue()
691 desc->src_addr = cpu_to_le64(*src); in qdma_hw_enqueue()
692 desc->dst_addr = cpu_to_le64(*dst); in qdma_hw_enqueue()
693 desc->len = cpu_to_le32(len); in qdma_hw_enqueue()
695 vdesc->dev_addr += len; in qdma_hw_enqueue()
696 vdesc->sg_off += len; in qdma_hw_enqueue()
697 vdesc->pending_descs++; in qdma_hw_enqueue()
699 rest -= len; in qdma_hw_enqueue()
701 vdesc->sg_off = 0; in qdma_hw_enqueue()
704 vdesc->sg_len -= i; in qdma_hw_enqueue()
705 vdesc->pidx = q->pidx; in qdma_hw_enqueue()
711 struct virt_dma_chan *vc = &q->vchan; in qdma_fill_pending_vdesc()
716 if (!list_empty(&vc->desc_issued)) { in qdma_fill_pending_vdesc()
717 vd = &q->issued_vdesc->vdesc; in qdma_fill_pending_vdesc()
718 list_for_each_entry_from(vd, &vc->desc_issued, node) { in qdma_fill_pending_vdesc()
722 q->issued_vdesc = vdesc; in qdma_fill_pending_vdesc()
726 q->issued_vdesc = vdesc; in qdma_fill_pending_vdesc()
729 if (list_empty(&vc->desc_submitted)) in qdma_fill_pending_vdesc()
732 if (q->submitted_vdesc) in qdma_fill_pending_vdesc()
733 vd = &q->submitted_vdesc->vdesc; in qdma_fill_pending_vdesc()
735 vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node); in qdma_fill_pending_vdesc()
737 list_for_each_entry_from(vd, &vc->desc_submitted, node) { in qdma_fill_pending_vdesc()
743 q->submitted_vdesc = vdesc; in qdma_fill_pending_vdesc()
746 static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx) in qdma_tx_submit() argument
748 struct virt_dma_chan *vc = to_virt_chan(tx->chan); in qdma_tx_submit()
749 struct qdma_queue *q = to_qdma_queue(&vc->chan); in qdma_tx_submit()
754 vd = container_of(tx, struct virt_dma_desc, tx); in qdma_tx_submit()
755 spin_lock_irqsave(&vc->lock, flags); in qdma_tx_submit()
756 cookie = dma_cookie_assign(tx); in qdma_tx_submit()
758 list_move_tail(&vd->node, &vc->desc_submitted); in qdma_tx_submit()
760 spin_unlock_irqrestore(&vc->lock, flags); in qdma_tx_submit()
771 struct dma_async_tx_descriptor *tx; in qdma_prep_device_sg() local
777 vdesc->sgl = sgl; in qdma_prep_device_sg()
778 vdesc->sg_len = sg_len; in qdma_prep_device_sg()
780 vdesc->dev_addr = q->cfg.dst_addr; in qdma_prep_device_sg()
782 vdesc->dev_addr = q->cfg.src_addr; in qdma_prep_device_sg()
784 tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags); in qdma_prep_device_sg()
785 tx->tx_submit = qdma_tx_submit; in qdma_prep_device_sg()
787 return tx; in qdma_prep_device_sg()
795 memcpy(&q->cfg, cfg, sizeof(*cfg)); in qdma_device_config()
804 qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid); in qdma_arm_err_intr()
805 qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx); in qdma_arm_err_intr()
844 qdev = intr->qdev; in qdma_queue_isr()
845 index = intr->cidx; in qdma_queue_isr()
852 intr_ent = le64_to_cpu(intr->base[index]); in qdma_queue_isr()
854 if (color != intr->color) in qdma_queue_isr()
859 q = qdev->c2h_queues; in qdma_queue_isr()
861 q = qdev->h2c_queues; in qdma_queue_isr()
866 spin_lock_irqsave(&q->vchan.lock, flags); in qdma_queue_isr()
867 comp_desc = (cidx - q->cidx) & q->idx_mask; in qdma_queue_isr()
869 vd = vchan_next_desc(&q->vchan); in qdma_queue_isr()
874 while (comp_desc > vdesc->pending_descs) { in qdma_queue_isr()
875 list_del(&vd->node); in qdma_queue_isr()
877 comp_desc -= vdesc->pending_descs; in qdma_queue_isr()
878 vd = vchan_next_desc(&q->vchan); in qdma_queue_isr()
881 vdesc->pending_descs -= comp_desc; in qdma_queue_isr()
882 if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) { in qdma_queue_isr()
883 list_del(&vd->node); in qdma_queue_isr()
886 q->cidx = cidx; in qdma_queue_isr()
892 spin_unlock_irqrestore(&q->vchan.lock, flags); in qdma_queue_isr()
901 intr->color = !intr->color; in qdma_queue_isr()
909 qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index); in qdma_queue_isr()
915 intr->cidx = index; in qdma_queue_isr()
917 ret = qdma_update_cidx(q, intr->ridx, index); in qdma_queue_isr()
929 struct device *dev = &qdev->pdev->dev; in qdma_init_error_irq()
933 vec = qdev->queue_irq_start - 1; in qdma_init_error_irq()
936 IRQF_ONESHOT, "amd-qdma-error", qdev); in qdma_init_error_irq()
952 struct device *dev = &qdev->pdev->dev; in qdmam_alloc_qintr_rings()
958 qdev->qintr_ring_num = qdev->queue_irq_num; in qdmam_alloc_qintr_rings()
959 qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num, in qdmam_alloc_qintr_rings()
960 sizeof(*qdev->qintr_rings), in qdmam_alloc_qintr_rings()
962 if (!qdev->qintr_rings) in qdmam_alloc_qintr_rings()
963 return -ENOMEM; in qdmam_alloc_qintr_rings()
965 vector = qdev->queue_irq_start; in qdmam_alloc_qintr_rings()
966 for (i = 0; i < qdev->qintr_ring_num; i++, vector++) { in qdmam_alloc_qintr_rings()
967 ring = &qdev->qintr_rings[i]; in qdmam_alloc_qintr_rings()
968 ring->qdev = qdev; in qdmam_alloc_qintr_rings()
969 ring->msix_id = qdev->err_irq_idx + i + 1; in qdmam_alloc_qintr_rings()
970 ring->ridx = i; in qdmam_alloc_qintr_rings()
971 ring->color = 1; in qdmam_alloc_qintr_rings()
972 ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE, in qdmam_alloc_qintr_rings()
973 &ring->dev_base, GFP_KERNEL); in qdmam_alloc_qintr_rings()
974 if (!ring->base) { in qdmam_alloc_qintr_rings()
976 return -ENOMEM; in qdmam_alloc_qintr_rings()
978 intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base); in qdmam_alloc_qintr_rings()
979 intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096; in qdmam_alloc_qintr_rings()
980 intr_ctxt.vec = ring->msix_id; in qdmam_alloc_qintr_rings()
984 QDMA_CTXT_CLEAR, ring->ridx, NULL); in qdmam_alloc_qintr_rings()
992 QDMA_CTXT_WRITE, ring->ridx, ctxt); in qdmam_alloc_qintr_rings()
1000 "amd-qdma-queue", ring); in qdmam_alloc_qintr_rings()
1034 dma_async_device_unregister(&qdev->dma_dev); in amd_qdma_remove()
1036 mutex_destroy(&qdev->ctxt_lock); in amd_qdma_remove()
1041 struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev); in amd_qdma_probe()
1047 qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL); in amd_qdma_probe()
1049 return -ENOMEM; in amd_qdma_probe()
1052 qdev->pdev = pdev; in amd_qdma_probe()
1053 mutex_init(&qdev->ctxt_lock); in amd_qdma_probe()
1058 ret = -ENODEV; in amd_qdma_probe()
1061 qdev->err_irq_idx = pdata->irq_index; in amd_qdma_probe()
1062 qdev->queue_irq_start = res->start + 1; in amd_qdma_probe()
1063 qdev->queue_irq_num = resource_size(res) - 1; in amd_qdma_probe()
1072 qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs, in amd_qdma_probe()
1074 if (IS_ERR(qdev->regmap)) { in amd_qdma_probe()
1075 ret = PTR_ERR(qdev->regmap); in amd_qdma_probe()
1088 INIT_LIST_HEAD(&qdev->dma_dev.channels); in amd_qdma_probe()
1100 dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask); in amd_qdma_probe()
1101 dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask); in amd_qdma_probe()
1103 qdev->dma_dev.dev = &pdev->dev; in amd_qdma_probe()
1104 qdev->dma_dev.filter.map = pdata->device_map; in amd_qdma_probe()
1105 qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2; in amd_qdma_probe()
1106 qdev->dma_dev.filter.fn = qdma_filter_fn; in amd_qdma_probe()
1107 qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources; in amd_qdma_probe()
1108 qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources; in amd_qdma_probe()
1109 qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg; in amd_qdma_probe()
1110 qdev->dma_dev.device_config = qdma_device_config; in amd_qdma_probe()
1111 qdev->dma_dev.device_issue_pending = qdma_issue_pending; in amd_qdma_probe()
1112 qdev->dma_dev.device_tx_status = dma_cookie_status; in amd_qdma_probe()
1113 qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in amd_qdma_probe()
1115 ret = dma_async_device_register(&qdev->dma_dev); in amd_qdma_probe()
1126 mutex_destroy(&qdev->ctxt_lock); in amd_qdma_probe()
1133 .name = "amd-qdma",