Lines Matching +full:psi +full:- +full:l
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
12 #include <linux/dma-mapping.h>
17 #include <linux/soc/ti/k3-ringacc.h>
18 #include <linux/dma/ti-cppi5.h>
19 #include <linux/dma/k3-udma-glue.h>
21 #include "k3-udma.h"
22 #include "k3-psil-priv.h"
104 common->udmax = of_xudma_dev_get(udmax_np, NULL); in of_k3_udma_glue_parse()
105 if (IS_ERR(common->udmax)) in of_k3_udma_glue_parse()
106 return PTR_ERR(common->udmax); in of_k3_udma_glue_parse()
108 common->ringacc = xudma_get_ringacc(common->udmax); in of_k3_udma_glue_parse()
109 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); in of_k3_udma_glue_parse()
118 return -EINVAL; in of_k3_udma_glue_parse_chn_common()
121 return -EINVAL; in of_k3_udma_glue_parse_chn_common()
124 common->ep_config = psil_get_ep_config(thread_id); in of_k3_udma_glue_parse_chn_common()
125 if (IS_ERR(common->ep_config)) { in of_k3_udma_glue_parse_chn_common()
126 dev_err(common->dev, in of_k3_udma_glue_parse_chn_common()
127 "No configuration for psi-l thread 0x%04x\n", in of_k3_udma_glue_parse_chn_common()
129 return PTR_ERR(common->ep_config); in of_k3_udma_glue_parse_chn_common()
132 common->epib = common->ep_config->needs_epib; in of_k3_udma_glue_parse_chn_common()
133 common->psdata_size = common->ep_config->psd_size; in of_k3_udma_glue_parse_chn_common()
136 common->dst_thread = thread_id; in of_k3_udma_glue_parse_chn_common()
138 common->src_thread = thread_id; in of_k3_udma_glue_parse_chn_common()
153 return -EINVAL; in of_k3_udma_glue_parse_chn()
155 index = of_property_match_string(chn_np, "dma-names", name); in of_k3_udma_glue_parse_chn()
159 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, in of_k3_udma_glue_parse_chn()
161 return -ENOENT; in of_k3_udma_glue_parse_chn()
169 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) { in of_k3_udma_glue_parse_chn()
170 dev_err(common->dev, "Invalid channel atype: %u\n", in of_k3_udma_glue_parse_chn()
172 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
175 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) { in of_k3_udma_glue_parse_chn()
176 dev_err(common->dev, "Invalid channel asel: %u\n", in of_k3_udma_glue_parse_chn()
178 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
182 common->atype_asel = dma_spec.args[1]; in of_k3_udma_glue_parse_chn()
199 return -EINVAL; in of_k3_udma_glue_parse_chn_by_id()
211 struct device *dev = tx_chn->common.dev; in k3_udma_glue_dump_tx_chn()
217 tx_chn->udma_tchan_id, in k3_udma_glue_dump_tx_chn()
218 tx_chn->common.src_thread, in k3_udma_glue_dump_tx_chn()
219 tx_chn->common.dst_thread); in k3_udma_glue_dump_tx_chn()
225 struct device *dev = chn->common.dev; in k3_udma_glue_dump_tx_rt_chn()
229 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG)); in k3_udma_glue_dump_tx_rt_chn()
231 xudma_tchanrt_read(chn->udma_tchanx, in k3_udma_glue_dump_tx_rt_chn()
234 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
236 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
238 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
243 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; in k3_udma_glue_cfg_tx_chn()
256 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_tx_chn()
257 req.index = tx_chn->udma_tchan_id; in k3_udma_glue_cfg_tx_chn()
258 if (tx_chn->tx_pause_on_err) in k3_udma_glue_cfg_tx_chn()
260 if (tx_chn->tx_filt_einfo) in k3_udma_glue_cfg_tx_chn()
262 if (tx_chn->tx_filt_pswords) in k3_udma_glue_cfg_tx_chn()
265 if (tx_chn->tx_supr_tdpkt) in k3_udma_glue_cfg_tx_chn()
267 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_tx_chn()
268 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_cfg_tx_chn()
269 req.tx_atype = tx_chn->common.atype_asel; in k3_udma_glue_cfg_tx_chn()
271 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_tx_chn()
281 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, in k3_udma_glue_request_tx_chn_common()
282 tx_chn->common.psdata_size, in k3_udma_glue_request_tx_chn_common()
283 tx_chn->common.swdata_size); in k3_udma_glue_request_tx_chn_common()
285 if (xudma_is_pktdma(tx_chn->common.udmax)) in k3_udma_glue_request_tx_chn_common()
286 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id; in k3_udma_glue_request_tx_chn_common()
288 tx_chn->udma_tchan_id = -1; in k3_udma_glue_request_tx_chn_common()
291 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, in k3_udma_glue_request_tx_chn_common()
292 tx_chn->udma_tchan_id); in k3_udma_glue_request_tx_chn_common()
293 if (IS_ERR(tx_chn->udma_tchanx)) { in k3_udma_glue_request_tx_chn_common()
294 ret = PTR_ERR(tx_chn->udma_tchanx); in k3_udma_glue_request_tx_chn_common()
298 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); in k3_udma_glue_request_tx_chn_common()
300 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_tx_chn_common()
301 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax); in k3_udma_glue_request_tx_chn_common()
302 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x", in k3_udma_glue_request_tx_chn_common()
303 tx_chn->udma_tchan_id, tx_chn->common.dst_thread); in k3_udma_glue_request_tx_chn_common()
304 ret = device_register(&tx_chn->common.chan_dev); in k3_udma_glue_request_tx_chn_common()
307 put_device(&tx_chn->common.chan_dev); in k3_udma_glue_request_tx_chn_common()
308 tx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_tx_chn_common()
312 if (xudma_is_pktdma(tx_chn->common.udmax)) { in k3_udma_glue_request_tx_chn_common()
314 tx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_tx_chn_common()
315 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev, in k3_udma_glue_request_tx_chn_common()
319 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); in k3_udma_glue_request_tx_chn_common()
321 if (xudma_is_pktdma(tx_chn->common.udmax)) in k3_udma_glue_request_tx_chn_common()
322 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id; in k3_udma_glue_request_tx_chn_common()
324 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id; in k3_udma_glue_request_tx_chn_common()
327 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc, in k3_udma_glue_request_tx_chn_common()
328 tx_chn->udma_tflow_id, -1, in k3_udma_glue_request_tx_chn_common()
329 &tx_chn->ringtx, in k3_udma_glue_request_tx_chn_common()
330 &tx_chn->ringtxcq); in k3_udma_glue_request_tx_chn_common()
337 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn); in k3_udma_glue_request_tx_chn_common()
338 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev; in k3_udma_glue_request_tx_chn_common()
341 if (xudma_is_pktdma(tx_chn->common.udmax)) { in k3_udma_glue_request_tx_chn_common()
342 cfg->tx_cfg.asel = tx_chn->common.atype_asel; in k3_udma_glue_request_tx_chn_common()
343 cfg->txcq_cfg.asel = tx_chn->common.atype_asel; in k3_udma_glue_request_tx_chn_common()
346 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); in k3_udma_glue_request_tx_chn_common()
352 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); in k3_udma_glue_request_tx_chn_common()
358 /* request and cfg psi-l */ in k3_udma_glue_request_tx_chn_common()
359 tx_chn->common.src_thread = in k3_udma_glue_request_tx_chn_common()
360 xudma_dev_get_psil_base(tx_chn->common.udmax) + in k3_udma_glue_request_tx_chn_common()
361 tx_chn->udma_tchan_id; in k3_udma_glue_request_tx_chn_common()
383 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_tx_chn()
385 tx_chn->common.dev = dev; in k3_udma_glue_request_tx_chn()
386 tx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_tx_chn()
387 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; in k3_udma_glue_request_tx_chn()
388 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; in k3_udma_glue_request_tx_chn()
389 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; in k3_udma_glue_request_tx_chn()
390 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; in k3_udma_glue_request_tx_chn()
393 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_tx_chn()
394 &tx_chn->common, true); in k3_udma_glue_request_tx_chn()
420 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_tx_chn_for_thread_id()
422 tx_chn->common.dev = dev; in k3_udma_glue_request_tx_chn_for_thread_id()
423 tx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_tx_chn_for_thread_id()
424 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; in k3_udma_glue_request_tx_chn_for_thread_id()
425 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; in k3_udma_glue_request_tx_chn_for_thread_id()
426 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; in k3_udma_glue_request_tx_chn_for_thread_id()
427 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; in k3_udma_glue_request_tx_chn_for_thread_id()
429 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id); in k3_udma_glue_request_tx_chn_for_thread_id()
447 if (tx_chn->psil_paired) { in k3_udma_glue_release_tx_chn()
448 xudma_navss_psil_unpair(tx_chn->common.udmax, in k3_udma_glue_release_tx_chn()
449 tx_chn->common.src_thread, in k3_udma_glue_release_tx_chn()
450 tx_chn->common.dst_thread); in k3_udma_glue_release_tx_chn()
451 tx_chn->psil_paired = false; in k3_udma_glue_release_tx_chn()
454 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) in k3_udma_glue_release_tx_chn()
455 xudma_tchan_put(tx_chn->common.udmax, in k3_udma_glue_release_tx_chn()
456 tx_chn->udma_tchanx); in k3_udma_glue_release_tx_chn()
458 if (tx_chn->ringtxcq) in k3_udma_glue_release_tx_chn()
459 k3_ringacc_ring_free(tx_chn->ringtxcq); in k3_udma_glue_release_tx_chn()
461 if (tx_chn->ringtx) in k3_udma_glue_release_tx_chn()
462 k3_ringacc_ring_free(tx_chn->ringtx); in k3_udma_glue_release_tx_chn()
464 if (tx_chn->common.chan_dev.parent) { in k3_udma_glue_release_tx_chn()
465 device_unregister(&tx_chn->common.chan_dev); in k3_udma_glue_release_tx_chn()
466 tx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_release_tx_chn()
477 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) in k3_udma_glue_push_tx_chn()
478 return -ENOMEM; in k3_udma_glue_push_tx_chn()
480 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_push_tx_chn()
481 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); in k3_udma_glue_push_tx_chn()
483 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); in k3_udma_glue_push_tx_chn()
492 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); in k3_udma_glue_pop_tx_chn()
494 atomic_inc(&tx_chn->free_pkts); in k3_udma_glue_pop_tx_chn()
504 ret = xudma_navss_psil_pair(tx_chn->common.udmax, in k3_udma_glue_enable_tx_chn()
505 tx_chn->common.src_thread, in k3_udma_glue_enable_tx_chn()
506 tx_chn->common.dst_thread); in k3_udma_glue_enable_tx_chn()
508 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret); in k3_udma_glue_enable_tx_chn()
512 tx_chn->psil_paired = true; in k3_udma_glue_enable_tx_chn()
514 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_tx_chn()
517 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_tx_chn()
529 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_tx_chn()
531 xudma_tchanrt_write(tx_chn->udma_tchanx, in k3_udma_glue_disable_tx_chn()
535 if (tx_chn->psil_paired) { in k3_udma_glue_disable_tx_chn()
536 xudma_navss_psil_unpair(tx_chn->common.udmax, in k3_udma_glue_disable_tx_chn()
537 tx_chn->common.src_thread, in k3_udma_glue_disable_tx_chn()
538 tx_chn->common.dst_thread); in k3_udma_glue_disable_tx_chn()
539 tx_chn->psil_paired = false; in k3_udma_glue_disable_tx_chn()
552 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_tdown_tx_chn()
555 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_tx_chn()
558 val = xudma_tchanrt_read(tx_chn->udma_tchanx, in k3_udma_glue_tdown_tx_chn()
562 dev_err(tx_chn->common.dev, "TX tdown timeout\n"); in k3_udma_glue_tdown_tx_chn()
568 val = xudma_tchanrt_read(tx_chn->udma_tchanx, in k3_udma_glue_tdown_tx_chn()
571 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_tx_chn()
580 struct device *dev = tx_chn->common.dev; in k3_udma_glue_reset_tx_chn()
591 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); in k3_udma_glue_reset_tx_chn()
595 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); in k3_udma_glue_reset_tx_chn()
597 if (ret != -ENODATA) in k3_udma_glue_reset_tx_chn()
604 /* reset TXCQ as it is not input for udma - expected to be empty */ in k3_udma_glue_reset_tx_chn()
605 k3_ringacc_ring_reset(tx_chn->ringtxcq); in k3_udma_glue_reset_tx_chn()
606 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); in k3_udma_glue_reset_tx_chn()
612 return tx_chn->common.hdesc_size; in k3_udma_glue_tx_get_hdesc_size()
618 return k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_tx_get_txcq_id()
624 if (xudma_is_pktdma(tx_chn->common.udmax)) { in k3_udma_glue_tx_get_irq()
625 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax, in k3_udma_glue_tx_get_irq()
626 tx_chn->udma_tflow_id); in k3_udma_glue_tx_get_irq()
628 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); in k3_udma_glue_tx_get_irq()
631 if (!tx_chn->virq) in k3_udma_glue_tx_get_irq()
632 return -ENXIO; in k3_udma_glue_tx_get_irq()
634 return tx_chn->virq; in k3_udma_glue_tx_get_irq()
641 if (xudma_is_pktdma(tx_chn->common.udmax) && in k3_udma_glue_tx_get_dma_device()
642 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15)) in k3_udma_glue_tx_get_dma_device()
643 return &tx_chn->common.chan_dev; in k3_udma_glue_tx_get_dma_device()
645 return xudma_get_device(tx_chn->common.udmax); in k3_udma_glue_tx_get_dma_device()
652 if (!xudma_is_pktdma(tx_chn->common.udmax) || in k3_udma_glue_tx_dma_to_cppi5_addr()
653 !tx_chn->common.atype_asel) in k3_udma_glue_tx_dma_to_cppi5_addr()
656 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; in k3_udma_glue_tx_dma_to_cppi5_addr()
663 if (!xudma_is_pktdma(tx_chn->common.udmax) || in k3_udma_glue_tx_cppi5_to_dma_addr()
664 !tx_chn->common.atype_asel) in k3_udma_glue_tx_cppi5_to_dma_addr()
667 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); in k3_udma_glue_tx_cppi5_to_dma_addr()
673 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_chn()
684 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_rx_chn()
685 req.index = rx_chn->udma_rchan_id; in k3_udma_glue_cfg_rx_chn()
686 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_rx_chn()
690 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); in k3_udma_glue_cfg_rx_chn()
693 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num && in k3_udma_glue_cfg_rx_chn()
694 rx_chn->flow_id_base != rx_chn->udma_rchan_id) { in k3_udma_glue_cfg_rx_chn()
698 req.flowid_start = rx_chn->flow_id_base; in k3_udma_glue_cfg_rx_chn()
699 req.flowid_cnt = rx_chn->flow_num; in k3_udma_glue_cfg_rx_chn()
702 req.rx_atype = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_chn()
704 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_rx_chn()
706 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", in k3_udma_glue_cfg_rx_chn()
707 rx_chn->udma_rchan_id, ret); in k3_udma_glue_cfg_rx_chn()
715 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow()
717 if (IS_ERR_OR_NULL(flow->udma_rflow)) in k3_udma_glue_release_rx_flow()
720 if (flow->ringrxfdq) in k3_udma_glue_release_rx_flow()
721 k3_ringacc_ring_free(flow->ringrxfdq); in k3_udma_glue_release_rx_flow()
723 if (flow->ringrx) in k3_udma_glue_release_rx_flow()
724 k3_ringacc_ring_free(flow->ringrx); in k3_udma_glue_release_rx_flow()
726 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_release_rx_flow()
727 flow->udma_rflow = NULL; in k3_udma_glue_release_rx_flow()
728 rx_chn->flows_ready--; in k3_udma_glue_release_rx_flow()
735 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow()
736 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_flow()
737 struct device *dev = rx_chn->common.dev; in k3_udma_glue_cfg_rx_flow()
743 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, in k3_udma_glue_cfg_rx_flow()
744 flow->udma_rflow_id); in k3_udma_glue_cfg_rx_flow()
745 if (IS_ERR(flow->udma_rflow)) { in k3_udma_glue_cfg_rx_flow()
746 ret = PTR_ERR(flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
751 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { in k3_udma_glue_cfg_rx_flow()
752 ret = -ENODEV; in k3_udma_glue_cfg_rx_flow()
756 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_cfg_rx_flow()
757 rx_ringfdq_id = flow->udma_rflow_id + in k3_udma_glue_cfg_rx_flow()
758 xudma_get_rflow_ring_offset(rx_chn->common.udmax); in k3_udma_glue_cfg_rx_flow()
761 rx_ring_id = flow_cfg->ring_rxq_id; in k3_udma_glue_cfg_rx_flow()
762 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id; in k3_udma_glue_cfg_rx_flow()
766 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, in k3_udma_glue_cfg_rx_flow()
768 &flow->ringrxfdq, in k3_udma_glue_cfg_rx_flow()
769 &flow->ringrx); in k3_udma_glue_cfg_rx_flow()
776 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn); in k3_udma_glue_cfg_rx_flow()
777 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev; in k3_udma_glue_cfg_rx_flow()
780 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_cfg_rx_flow()
781 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_flow()
782 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_flow()
785 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); in k3_udma_glue_cfg_rx_flow()
791 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); in k3_udma_glue_cfg_rx_flow()
797 if (rx_chn->remote) { in k3_udma_glue_cfg_rx_flow()
801 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); in k3_udma_glue_cfg_rx_flow()
802 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_cfg_rx_flow()
821 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_rx_flow()
822 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_cfg_rx_flow()
823 if (rx_chn->common.epib) in k3_udma_glue_cfg_rx_flow()
825 if (rx_chn->common.psdata_size) in k3_udma_glue_cfg_rx_flow()
827 if (flow_cfg->rx_error_handling) in k3_udma_glue_cfg_rx_flow()
832 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; in k3_udma_glue_cfg_rx_flow()
840 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_rx_flow()
842 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_cfg_rx_flow()
847 rx_chn->flows_ready++; in k3_udma_glue_cfg_rx_flow()
849 flow->udma_rflow_id, rx_chn->flows_ready); in k3_udma_glue_cfg_rx_flow()
854 k3_ringacc_ring_free(flow->ringrxfdq); in k3_udma_glue_cfg_rx_flow()
855 k3_ringacc_ring_free(flow->ringrx); in k3_udma_glue_cfg_rx_flow()
858 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
859 flow->udma_rflow = NULL; in k3_udma_glue_cfg_rx_flow()
866 struct device *dev = chn->common.dev; in k3_udma_glue_dump_rx_chn()
878 chn->udma_rchan_id, in k3_udma_glue_dump_rx_chn()
879 chn->common.src_thread, in k3_udma_glue_dump_rx_chn()
880 chn->common.dst_thread, in k3_udma_glue_dump_rx_chn()
881 chn->common.epib, in k3_udma_glue_dump_rx_chn()
882 chn->common.hdesc_size, in k3_udma_glue_dump_rx_chn()
883 chn->common.psdata_size, in k3_udma_glue_dump_rx_chn()
884 chn->common.swdata_size, in k3_udma_glue_dump_rx_chn()
885 chn->flow_id_base, in k3_udma_glue_dump_rx_chn()
886 chn->flow_num); in k3_udma_glue_dump_rx_chn()
892 struct device *dev = chn->common.dev; in k3_udma_glue_dump_rx_rt_chn()
897 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG)); in k3_udma_glue_dump_rx_rt_chn()
899 xudma_rchanrt_read(chn->udma_rchanx, in k3_udma_glue_dump_rx_rt_chn()
902 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
904 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
906 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
916 if (cfg->flow_id_use_rxchan_id) in k3_udma_glue_allocate_rx_flows()
920 if (rx_chn->flow_id_base != -1 && in k3_udma_glue_allocate_rx_flows()
921 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_allocate_rx_flows()
925 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_allocate_rx_flows()
926 rx_chn->flow_id_base, in k3_udma_glue_allocate_rx_flows()
927 rx_chn->flow_num); in k3_udma_glue_allocate_rx_flows()
929 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", in k3_udma_glue_allocate_rx_flows()
930 rx_chn->flow_id_base, rx_chn->flow_num, ret); in k3_udma_glue_allocate_rx_flows()
933 rx_chn->flow_id_base = ret; in k3_udma_glue_allocate_rx_flows()
946 if (cfg->flow_id_num <= 0) in k3_udma_glue_request_rx_chn_priv()
947 return ERR_PTR(-EINVAL); in k3_udma_glue_request_rx_chn_priv()
949 if (cfg->flow_id_num != 1 && in k3_udma_glue_request_rx_chn_priv()
950 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) in k3_udma_glue_request_rx_chn_priv()
951 return ERR_PTR(-EINVAL); in k3_udma_glue_request_rx_chn_priv()
955 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_rx_chn_priv()
957 rx_chn->common.dev = dev; in k3_udma_glue_request_rx_chn_priv()
958 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_rx_chn_priv()
959 rx_chn->remote = false; in k3_udma_glue_request_rx_chn_priv()
962 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_rx_chn_priv()
963 &rx_chn->common, false); in k3_udma_glue_request_rx_chn_priv()
967 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_rx_chn_priv()
968 rx_chn->common.psdata_size, in k3_udma_glue_request_rx_chn_priv()
969 rx_chn->common.swdata_size); in k3_udma_glue_request_rx_chn_priv()
971 ep_cfg = rx_chn->common.ep_config; in k3_udma_glue_request_rx_chn_priv()
973 if (xudma_is_pktdma(rx_chn->common.udmax)) in k3_udma_glue_request_rx_chn_priv()
974 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; in k3_udma_glue_request_rx_chn_priv()
976 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_rx_chn_priv()
979 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, in k3_udma_glue_request_rx_chn_priv()
980 rx_chn->udma_rchan_id); in k3_udma_glue_request_rx_chn_priv()
981 if (IS_ERR(rx_chn->udma_rchanx)) { in k3_udma_glue_request_rx_chn_priv()
982 ret = PTR_ERR(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
986 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
988 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_rx_chn_priv()
989 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_request_rx_chn_priv()
990 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x", in k3_udma_glue_request_rx_chn_priv()
991 rx_chn->udma_rchan_id, rx_chn->common.src_thread); in k3_udma_glue_request_rx_chn_priv()
992 ret = device_register(&rx_chn->common.chan_dev); in k3_udma_glue_request_rx_chn_priv()
995 put_device(&rx_chn->common.chan_dev); in k3_udma_glue_request_rx_chn_priv()
996 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_rx_chn_priv()
1000 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_rx_chn_priv()
1002 rx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_rx_chn_priv()
1003 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, in k3_udma_glue_request_rx_chn_priv()
1007 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_rx_chn_priv()
1008 int flow_start = cfg->flow_id_base; in k3_udma_glue_request_rx_chn_priv()
1011 if (flow_start == -1) in k3_udma_glue_request_rx_chn_priv()
1012 flow_start = ep_cfg->flow_start; in k3_udma_glue_request_rx_chn_priv()
1014 flow_end = flow_start + cfg->flow_id_num - 1; in k3_udma_glue_request_rx_chn_priv()
1015 if (flow_start < ep_cfg->flow_start || in k3_udma_glue_request_rx_chn_priv()
1016 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) { in k3_udma_glue_request_rx_chn_priv()
1018 ret = -EINVAL; in k3_udma_glue_request_rx_chn_priv()
1021 rx_chn->flow_id_base = flow_start; in k3_udma_glue_request_rx_chn_priv()
1023 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_rx_chn_priv()
1026 if (cfg->flow_id_use_rxchan_id) in k3_udma_glue_request_rx_chn_priv()
1027 rx_chn->flow_id_base = rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
1030 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_rx_chn_priv()
1032 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv()
1033 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
1034 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv()
1035 ret = -ENOMEM; in k3_udma_glue_request_rx_chn_priv()
1043 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_rx_chn_priv()
1044 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv()
1046 /* request and cfg psi-l */ in k3_udma_glue_request_rx_chn_priv()
1047 rx_chn->common.dst_thread = in k3_udma_glue_request_rx_chn_priv()
1048 xudma_dev_get_psil_base(rx_chn->common.udmax) + in k3_udma_glue_request_rx_chn_priv()
1049 rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
1058 if (cfg->def_flow_cfg) { in k3_udma_glue_request_rx_chn_priv()
1059 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); in k3_udma_glue_request_rx_chn_priv()
1080 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_remote_rx_chn_common()
1081 rx_chn->common.psdata_size, in k3_udma_glue_request_remote_rx_chn_common()
1082 rx_chn->common.swdata_size); in k3_udma_glue_request_remote_rx_chn_common()
1084 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn_common()
1085 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn_common()
1086 if (!rx_chn->flows) in k3_udma_glue_request_remote_rx_chn_common()
1087 return -ENOMEM; in k3_udma_glue_request_remote_rx_chn_common()
1089 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_remote_rx_chn_common()
1090 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_request_remote_rx_chn_common()
1091 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x", in k3_udma_glue_request_remote_rx_chn_common()
1092 rx_chn->common.src_thread, rx_chn->flow_id_base); in k3_udma_glue_request_remote_rx_chn_common()
1093 ret = device_register(&rx_chn->common.chan_dev); in k3_udma_glue_request_remote_rx_chn_common()
1096 put_device(&rx_chn->common.chan_dev); in k3_udma_glue_request_remote_rx_chn_common()
1097 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_remote_rx_chn_common()
1101 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_remote_rx_chn_common()
1103 rx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_remote_rx_chn_common()
1104 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, in k3_udma_glue_request_remote_rx_chn_common()
1112 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_remote_rx_chn_common()
1113 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn_common()
1127 if (cfg->flow_id_num <= 0 || in k3_udma_glue_request_remote_rx_chn()
1128 cfg->flow_id_use_rxchan_id || in k3_udma_glue_request_remote_rx_chn()
1129 cfg->def_flow_cfg || in k3_udma_glue_request_remote_rx_chn()
1130 cfg->flow_id_base < 0) in k3_udma_glue_request_remote_rx_chn()
1131 return ERR_PTR(-EINVAL); in k3_udma_glue_request_remote_rx_chn()
1140 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_remote_rx_chn()
1142 rx_chn->common.dev = dev; in k3_udma_glue_request_remote_rx_chn()
1143 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_remote_rx_chn()
1144 rx_chn->remote = true; in k3_udma_glue_request_remote_rx_chn()
1145 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_remote_rx_chn()
1146 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_remote_rx_chn()
1147 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_remote_rx_chn()
1148 rx_chn->psil_paired = false; in k3_udma_glue_request_remote_rx_chn()
1151 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_remote_rx_chn()
1152 &rx_chn->common, false); in k3_udma_glue_request_remote_rx_chn()
1175 if (cfg->flow_id_num <= 0 || in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1176 cfg->flow_id_use_rxchan_id || in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1177 cfg->def_flow_cfg || in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1178 cfg->flow_id_base < 0) in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1179 return ERR_PTR(-EINVAL); in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1188 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1190 rx_chn->common.dev = dev; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1191 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1192 rx_chn->remote = true; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1193 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1194 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1195 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1196 rx_chn->psil_paired = false; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1198 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id); in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1218 if (cfg->remote) in k3_udma_glue_request_rx_chn()
1229 if (IS_ERR_OR_NULL(rx_chn->common.udmax)) in k3_udma_glue_release_rx_chn()
1232 if (rx_chn->psil_paired) { in k3_udma_glue_release_rx_chn()
1233 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1234 rx_chn->common.src_thread, in k3_udma_glue_release_rx_chn()
1235 rx_chn->common.dst_thread); in k3_udma_glue_release_rx_chn()
1236 rx_chn->psil_paired = false; in k3_udma_glue_release_rx_chn()
1239 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_release_rx_chn()
1242 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_release_rx_chn()
1243 xudma_free_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1244 rx_chn->flow_id_base, in k3_udma_glue_release_rx_chn()
1245 rx_chn->flow_num); in k3_udma_glue_release_rx_chn()
1247 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) in k3_udma_glue_release_rx_chn()
1248 xudma_rchan_put(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1249 rx_chn->udma_rchanx); in k3_udma_glue_release_rx_chn()
1251 if (rx_chn->common.chan_dev.parent) { in k3_udma_glue_release_rx_chn()
1252 device_unregister(&rx_chn->common.chan_dev); in k3_udma_glue_release_rx_chn()
1253 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_release_rx_chn()
1262 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_init()
1263 return -EINVAL; in k3_udma_glue_rx_flow_init()
1274 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_get_fdq_id()
1275 return -EINVAL; in k3_udma_glue_rx_flow_get_fdq_id()
1277 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id()
1279 return k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_rx_flow_get_fdq_id()
1285 return rx_chn->flow_id_base; in k3_udma_glue_rx_get_flow_id_base()
1292 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_enable()
1293 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_enable()
1294 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_enable()
1300 if (!rx_chn->remote) in k3_udma_glue_rx_flow_enable()
1301 return -EINVAL; in k3_udma_glue_rx_flow_enable()
1303 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); in k3_udma_glue_rx_flow_enable()
1304 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_rx_flow_enable()
1314 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_rx_flow_enable()
1315 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_rx_flow_enable()
1322 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_rx_flow_enable()
1324 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_rx_flow_enable()
1335 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_disable()
1336 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_disable()
1337 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_disable()
1341 if (!rx_chn->remote) in k3_udma_glue_rx_flow_disable()
1342 return -EINVAL; in k3_udma_glue_rx_flow_disable()
1351 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_rx_flow_disable()
1352 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_rx_flow_disable()
1359 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_rx_flow_disable()
1361 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_rx_flow_disable()
1373 if (rx_chn->remote) in k3_udma_glue_enable_rx_chn()
1374 return -EINVAL; in k3_udma_glue_enable_rx_chn()
1376 if (rx_chn->flows_ready < rx_chn->flow_num) in k3_udma_glue_enable_rx_chn()
1377 return -EINVAL; in k3_udma_glue_enable_rx_chn()
1379 ret = xudma_navss_psil_pair(rx_chn->common.udmax, in k3_udma_glue_enable_rx_chn()
1380 rx_chn->common.src_thread, in k3_udma_glue_enable_rx_chn()
1381 rx_chn->common.dst_thread); in k3_udma_glue_enable_rx_chn()
1383 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret); in k3_udma_glue_enable_rx_chn()
1387 rx_chn->psil_paired = true; in k3_udma_glue_enable_rx_chn()
1389 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_rx_chn()
1392 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_rx_chn()
1404 xudma_rchanrt_write(rx_chn->udma_rchanx, in k3_udma_glue_disable_rx_chn()
1406 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_rx_chn()
1410 if (rx_chn->psil_paired) { in k3_udma_glue_disable_rx_chn()
1411 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_disable_rx_chn()
1412 rx_chn->common.src_thread, in k3_udma_glue_disable_rx_chn()
1413 rx_chn->common.dst_thread); in k3_udma_glue_disable_rx_chn()
1414 rx_chn->psil_paired = false; in k3_udma_glue_disable_rx_chn()
1425 if (rx_chn->remote) in k3_udma_glue_tdown_rx_chn()
1430 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_tdown_rx_chn()
1433 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_rx_chn()
1436 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1440 dev_err(rx_chn->common.dev, "RX tdown timeout\n"); in k3_udma_glue_tdown_rx_chn()
1446 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1449 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_rx_chn()
1458 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_reset_rx_chn()
1459 struct device *dev = rx_chn->common.dev; in k3_udma_glue_reset_rx_chn()
1463 /* reset RXCQ as it is not input for udma - expected to be empty */ in k3_udma_glue_reset_rx_chn()
1464 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); in k3_udma_glue_reset_rx_chn()
1478 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); in k3_udma_glue_reset_rx_chn()
1482 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); in k3_udma_glue_reset_rx_chn()
1484 if (ret != -ENODATA) in k3_udma_glue_reset_rx_chn()
1491 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); in k3_udma_glue_reset_rx_chn()
1494 k3_ringacc_ring_reset(flow->ringrx); in k3_udma_glue_reset_rx_chn()
1502 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_push_rx_chn()
1504 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); in k3_udma_glue_push_rx_chn()
1511 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_pop_rx_chn()
1513 return k3_ringacc_ring_pop(flow->ringrx, desc_dma); in k3_udma_glue_pop_rx_chn()
1522 flow = &rx_chn->flows[flow_num]; in k3_udma_glue_rx_get_irq()
1524 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_rx_get_irq()
1525 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax, in k3_udma_glue_rx_get_irq()
1526 flow->udma_rflow_id); in k3_udma_glue_rx_get_irq()
1528 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); in k3_udma_glue_rx_get_irq()
1531 if (!flow->virq) in k3_udma_glue_rx_get_irq()
1532 return -ENXIO; in k3_udma_glue_rx_get_irq()
1534 return flow->virq; in k3_udma_glue_rx_get_irq()
1541 if (xudma_is_pktdma(rx_chn->common.udmax) && in k3_udma_glue_rx_get_dma_device()
1542 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15)) in k3_udma_glue_rx_get_dma_device()
1543 return &rx_chn->common.chan_dev; in k3_udma_glue_rx_get_dma_device()
1545 return xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_rx_get_dma_device()
1552 if (!xudma_is_pktdma(rx_chn->common.udmax) || in k3_udma_glue_rx_dma_to_cppi5_addr()
1553 !rx_chn->common.atype_asel) in k3_udma_glue_rx_dma_to_cppi5_addr()
1556 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; in k3_udma_glue_rx_dma_to_cppi5_addr()
1563 if (!xudma_is_pktdma(rx_chn->common.udmax) || in k3_udma_glue_rx_cppi5_to_dma_addr()
1564 !rx_chn->common.atype_asel) in k3_udma_glue_rx_cppi5_to_dma_addr()
1567 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); in k3_udma_glue_rx_cppi5_to_dma_addr()