1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * K3 NAVSS DMA glue interface
4   *
5   * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6   *
7   */
8  
9  #include <linux/module.h>
10  #include <linux/atomic.h>
11  #include <linux/delay.h>
12  #include <linux/dma-mapping.h>
13  #include <linux/io.h>
14  #include <linux/init.h>
15  #include <linux/of.h>
16  #include <linux/platform_device.h>
17  #include <linux/soc/ti/k3-ringacc.h>
18  #include <linux/dma/ti-cppi5.h>
19  #include <linux/dma/k3-udma-glue.h>
20  
21  #include "k3-udma.h"
22  #include "k3-psil-priv.h"
23  
24  struct k3_udma_glue_common {
25  	struct device *dev;
26  	struct device chan_dev;
27  	struct udma_dev *udmax;
28  	const struct udma_tisci_rm *tisci_rm;
29  	struct k3_ringacc *ringacc;
30  	u32 src_thread;
31  	u32 dst_thread;
32  
33  	u32  hdesc_size;
34  	bool epib;
35  	u32  psdata_size;
36  	u32  swdata_size;
37  	u32  atype_asel;
38  	struct psil_endpoint_config *ep_config;
39  };
40  
41  struct k3_udma_glue_tx_channel {
42  	struct k3_udma_glue_common common;
43  
44  	struct udma_tchan *udma_tchanx;
45  	int udma_tchan_id;
46  
47  	struct k3_ring *ringtx;
48  	struct k3_ring *ringtxcq;
49  
50  	bool psil_paired;
51  
52  	int virq;
53  
54  	atomic_t free_pkts;
55  	bool tx_pause_on_err;
56  	bool tx_filt_einfo;
57  	bool tx_filt_pswords;
58  	bool tx_supr_tdpkt;
59  
60  	int udma_tflow_id;
61  };
62  
63  struct k3_udma_glue_rx_flow {
64  	struct udma_rflow *udma_rflow;
65  	int udma_rflow_id;
66  	struct k3_ring *ringrx;
67  	struct k3_ring *ringrxfdq;
68  
69  	int virq;
70  };
71  
72  struct k3_udma_glue_rx_channel {
73  	struct k3_udma_glue_common common;
74  
75  	struct udma_rchan *udma_rchanx;
76  	int udma_rchan_id;
77  	bool remote;
78  
79  	bool psil_paired;
80  
81  	u32  swdata_size;
82  	int  flow_id_base;
83  
84  	struct k3_udma_glue_rx_flow *flows;
85  	u32 flow_num;
86  	u32 flows_ready;
87  };
88  
k3_udma_chan_dev_release(struct device * dev)89  static void k3_udma_chan_dev_release(struct device *dev)
90  {
91  	/* The struct containing the device is devm managed */
92  }
93  
94  static struct class k3_udma_glue_devclass = {
95  	.name		= "k3_udma_glue_chan",
96  	.dev_release	= k3_udma_chan_dev_release,
97  };
98  
99  #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
100  
of_k3_udma_glue_parse(struct device_node * udmax_np,struct k3_udma_glue_common * common)101  static int of_k3_udma_glue_parse(struct device_node *udmax_np,
102  				 struct k3_udma_glue_common *common)
103  {
104  	common->udmax = of_xudma_dev_get(udmax_np, NULL);
105  	if (IS_ERR(common->udmax))
106  		return PTR_ERR(common->udmax);
107  
108  	common->ringacc = xudma_get_ringacc(common->udmax);
109  	common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
110  
111  	return 0;
112  }
113  
of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common * common,u32 thread_id,bool tx_chn)114  static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
115  					    bool tx_chn)
116  {
117  	if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
118  		return -EINVAL;
119  
120  	if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
121  		return -EINVAL;
122  
123  	/* get psil endpoint config */
124  	common->ep_config = psil_get_ep_config(thread_id);
125  	if (IS_ERR(common->ep_config)) {
126  		dev_err(common->dev,
127  			"No configuration for psi-l thread 0x%04x\n",
128  			thread_id);
129  		return PTR_ERR(common->ep_config);
130  	}
131  
132  	common->epib = common->ep_config->needs_epib;
133  	common->psdata_size = common->ep_config->psd_size;
134  
135  	if (tx_chn)
136  		common->dst_thread = thread_id;
137  	else
138  		common->src_thread = thread_id;
139  
140  	return 0;
141  }
142  
of_k3_udma_glue_parse_chn(struct device_node * chn_np,const char * name,struct k3_udma_glue_common * common,bool tx_chn)143  static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
144  		const char *name, struct k3_udma_glue_common *common,
145  		bool tx_chn)
146  {
147  	struct of_phandle_args dma_spec;
148  	u32 thread_id;
149  	int ret = 0;
150  	int index;
151  
152  	if (unlikely(!name))
153  		return -EINVAL;
154  
155  	index = of_property_match_string(chn_np, "dma-names", name);
156  	if (index < 0)
157  		return index;
158  
159  	if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
160  				       &dma_spec))
161  		return -ENOENT;
162  
163  	ret = of_k3_udma_glue_parse(dma_spec.np, common);
164  	if (ret)
165  		goto out_put_spec;
166  
167  	thread_id = dma_spec.args[0];
168  	if (dma_spec.args_count == 2) {
169  		if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
170  			dev_err(common->dev, "Invalid channel atype: %u\n",
171  				dma_spec.args[1]);
172  			ret = -EINVAL;
173  			goto out_put_spec;
174  		}
175  		if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
176  			dev_err(common->dev, "Invalid channel asel: %u\n",
177  				dma_spec.args[1]);
178  			ret = -EINVAL;
179  			goto out_put_spec;
180  		}
181  
182  		common->atype_asel = dma_spec.args[1];
183  	}
184  
185  	ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
186  
187  out_put_spec:
188  	of_node_put(dma_spec.np);
189  	return ret;
190  }
191  
192  static int
of_k3_udma_glue_parse_chn_by_id(struct device_node * udmax_np,struct k3_udma_glue_common * common,bool tx_chn,u32 thread_id)193  of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
194  				bool tx_chn, u32 thread_id)
195  {
196  	int ret = 0;
197  
198  	if (unlikely(!udmax_np))
199  		return -EINVAL;
200  
201  	ret = of_k3_udma_glue_parse(udmax_np, common);
202  	if (ret)
203  		return ret;
204  
205  	ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
206  	return ret;
207  }
208  
k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)209  static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
210  {
211  	struct device *dev = tx_chn->common.dev;
212  
213  	dev_dbg(dev, "dump_tx_chn:\n"
214  		"udma_tchan_id: %d\n"
215  		"src_thread: %08x\n"
216  		"dst_thread: %08x\n",
217  		tx_chn->udma_tchan_id,
218  		tx_chn->common.src_thread,
219  		tx_chn->common.dst_thread);
220  }
221  
k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel * chn,char * mark)222  static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
223  					char *mark)
224  {
225  	struct device *dev = chn->common.dev;
226  
227  	dev_dbg(dev, "=== dump ===> %s\n", mark);
228  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
229  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
230  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
231  		xudma_tchanrt_read(chn->udma_tchanx,
232  				   UDMA_CHAN_RT_PEER_RT_EN_REG));
233  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
234  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
235  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
236  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
237  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
238  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
239  }
240  
k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)241  static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
242  {
243  	const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
244  	struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
245  
246  	memset(&req, 0, sizeof(req));
247  
248  	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
249  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
250  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
251  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
252  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
253  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
254  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
255  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
256  	req.nav_id = tisci_rm->tisci_dev_id;
257  	req.index = tx_chn->udma_tchan_id;
258  	if (tx_chn->tx_pause_on_err)
259  		req.tx_pause_on_err = 1;
260  	if (tx_chn->tx_filt_einfo)
261  		req.tx_filt_einfo = 1;
262  	if (tx_chn->tx_filt_pswords)
263  		req.tx_filt_pswords = 1;
264  	req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
265  	if (tx_chn->tx_supr_tdpkt)
266  		req.tx_supr_tdpkt = 1;
267  	req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
268  	req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
269  	req.tx_atype = tx_chn->common.atype_asel;
270  
271  	return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
272  }
273  
274  static int
k3_udma_glue_request_tx_chn_common(struct device * dev,struct k3_udma_glue_tx_channel * tx_chn,struct k3_udma_glue_tx_channel_cfg * cfg)275  k3_udma_glue_request_tx_chn_common(struct device *dev,
276  				   struct k3_udma_glue_tx_channel *tx_chn,
277  				   struct k3_udma_glue_tx_channel_cfg *cfg)
278  {
279  	int ret;
280  
281  	tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
282  						tx_chn->common.psdata_size,
283  						tx_chn->common.swdata_size);
284  
285  	if (xudma_is_pktdma(tx_chn->common.udmax))
286  		tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
287  	else
288  		tx_chn->udma_tchan_id = -1;
289  
290  	/* request and cfg UDMAP TX channel */
291  	tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
292  					      tx_chn->udma_tchan_id);
293  	if (IS_ERR(tx_chn->udma_tchanx)) {
294  		ret = PTR_ERR(tx_chn->udma_tchanx);
295  		dev_err(dev, "UDMAX tchanx get err %d\n", ret);
296  		return ret;
297  	}
298  	tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
299  
300  	tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
301  	tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
302  	dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
303  		     tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
304  	ret = device_register(&tx_chn->common.chan_dev);
305  	if (ret) {
306  		dev_err(dev, "Channel Device registration failed %d\n", ret);
307  		put_device(&tx_chn->common.chan_dev);
308  		tx_chn->common.chan_dev.parent = NULL;
309  		return ret;
310  	}
311  
312  	if (xudma_is_pktdma(tx_chn->common.udmax)) {
313  		/* prepare the channel device as coherent */
314  		tx_chn->common.chan_dev.dma_coherent = true;
315  		dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
316  					     DMA_BIT_MASK(48));
317  	}
318  
319  	atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
320  
321  	if (xudma_is_pktdma(tx_chn->common.udmax))
322  		tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
323  	else
324  		tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
325  
326  	/* request and cfg rings */
327  	ret =  k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
328  					     tx_chn->udma_tflow_id, -1,
329  					     &tx_chn->ringtx,
330  					     &tx_chn->ringtxcq);
331  	if (ret) {
332  		dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
333  		return ret;
334  	}
335  
336  	/* Set the dma_dev for the rings to be configured */
337  	cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
338  	cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
339  
340  	/* Set the ASEL value for DMA rings of PKTDMA */
341  	if (xudma_is_pktdma(tx_chn->common.udmax)) {
342  		cfg->tx_cfg.asel = tx_chn->common.atype_asel;
343  		cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
344  	}
345  
346  	ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
347  	if (ret) {
348  		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
349  		return ret;
350  	}
351  
352  	ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
353  	if (ret) {
354  		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
355  		return ret;
356  	}
357  
358  	/* request and cfg psi-l */
359  	tx_chn->common.src_thread =
360  			xudma_dev_get_psil_base(tx_chn->common.udmax) +
361  			tx_chn->udma_tchan_id;
362  
363  	ret = k3_udma_glue_cfg_tx_chn(tx_chn);
364  	if (ret) {
365  		dev_err(dev, "Failed to cfg tchan %d\n", ret);
366  		return ret;
367  	}
368  
369  	k3_udma_glue_dump_tx_chn(tx_chn);
370  
371  	return 0;
372  }
373  
374  struct k3_udma_glue_tx_channel *
k3_udma_glue_request_tx_chn(struct device * dev,const char * name,struct k3_udma_glue_tx_channel_cfg * cfg)375  k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
376  			    struct k3_udma_glue_tx_channel_cfg *cfg)
377  {
378  	struct k3_udma_glue_tx_channel *tx_chn;
379  	int ret;
380  
381  	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
382  	if (!tx_chn)
383  		return ERR_PTR(-ENOMEM);
384  
385  	tx_chn->common.dev = dev;
386  	tx_chn->common.swdata_size = cfg->swdata_size;
387  	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
388  	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
389  	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
390  	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
391  
392  	/* parse of udmap channel */
393  	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
394  					&tx_chn->common, true);
395  	if (ret)
396  		goto err;
397  
398  	ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
399  	if (ret)
400  		goto err;
401  
402  	return tx_chn;
403  
404  err:
405  	k3_udma_glue_release_tx_chn(tx_chn);
406  	return ERR_PTR(ret);
407  }
408  EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
409  
410  struct k3_udma_glue_tx_channel *
k3_udma_glue_request_tx_chn_for_thread_id(struct device * dev,struct k3_udma_glue_tx_channel_cfg * cfg,struct device_node * udmax_np,u32 thread_id)411  k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
412  					  struct k3_udma_glue_tx_channel_cfg *cfg,
413  					  struct device_node *udmax_np, u32 thread_id)
414  {
415  	struct k3_udma_glue_tx_channel *tx_chn;
416  	int ret;
417  
418  	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
419  	if (!tx_chn)
420  		return ERR_PTR(-ENOMEM);
421  
422  	tx_chn->common.dev = dev;
423  	tx_chn->common.swdata_size = cfg->swdata_size;
424  	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
425  	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
426  	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
427  	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
428  
429  	ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
430  	if (ret)
431  		goto err;
432  
433  	ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
434  	if (ret)
435  		goto err;
436  
437  	return tx_chn;
438  
439  err:
440  	k3_udma_glue_release_tx_chn(tx_chn);
441  	return ERR_PTR(ret);
442  }
443  EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
444  
k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)445  void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
446  {
447  	if (tx_chn->psil_paired) {
448  		xudma_navss_psil_unpair(tx_chn->common.udmax,
449  					tx_chn->common.src_thread,
450  					tx_chn->common.dst_thread);
451  		tx_chn->psil_paired = false;
452  	}
453  
454  	if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
455  		xudma_tchan_put(tx_chn->common.udmax,
456  				tx_chn->udma_tchanx);
457  
458  	if (tx_chn->ringtxcq)
459  		k3_ringacc_ring_free(tx_chn->ringtxcq);
460  
461  	if (tx_chn->ringtx)
462  		k3_ringacc_ring_free(tx_chn->ringtx);
463  
464  	if (tx_chn->common.chan_dev.parent) {
465  		device_unregister(&tx_chn->common.chan_dev);
466  		tx_chn->common.chan_dev.parent = NULL;
467  	}
468  }
469  EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
470  
k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,struct cppi5_host_desc_t * desc_tx,dma_addr_t desc_dma)471  int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
472  			     struct cppi5_host_desc_t *desc_tx,
473  			     dma_addr_t desc_dma)
474  {
475  	u32 ringtxcq_id;
476  
477  	if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
478  		return -ENOMEM;
479  
480  	ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
481  	cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
482  
483  	return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
484  }
485  EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
486  
k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * desc_dma)487  int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
488  			    dma_addr_t *desc_dma)
489  {
490  	int ret;
491  
492  	ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
493  	if (!ret)
494  		atomic_inc(&tx_chn->free_pkts);
495  
496  	return ret;
497  }
498  EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
499  
k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)500  int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
501  {
502  	int ret;
503  
504  	ret = xudma_navss_psil_pair(tx_chn->common.udmax,
505  				    tx_chn->common.src_thread,
506  				    tx_chn->common.dst_thread);
507  	if (ret) {
508  		dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
509  		return ret;
510  	}
511  
512  	tx_chn->psil_paired = true;
513  
514  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
515  			    UDMA_PEER_RT_EN_ENABLE);
516  
517  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
518  			    UDMA_CHAN_RT_CTL_EN);
519  
520  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
521  	return 0;
522  }
523  EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
524  
k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)525  void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
526  {
527  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
528  
529  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
530  
531  	xudma_tchanrt_write(tx_chn->udma_tchanx,
532  			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
533  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
534  
535  	if (tx_chn->psil_paired) {
536  		xudma_navss_psil_unpair(tx_chn->common.udmax,
537  					tx_chn->common.src_thread,
538  					tx_chn->common.dst_thread);
539  		tx_chn->psil_paired = false;
540  	}
541  }
542  EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
543  
k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,bool sync)544  void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
545  			       bool sync)
546  {
547  	int i = 0;
548  	u32 val;
549  
550  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
551  
552  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
553  			    UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
554  
555  	val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
556  
557  	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
558  		val = xudma_tchanrt_read(tx_chn->udma_tchanx,
559  					 UDMA_CHAN_RT_CTL_REG);
560  		udelay(1);
561  		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
562  			dev_err(tx_chn->common.dev, "TX tdown timeout\n");
563  			break;
564  		}
565  		i++;
566  	}
567  
568  	val = xudma_tchanrt_read(tx_chn->udma_tchanx,
569  				 UDMA_CHAN_RT_PEER_RT_EN_REG);
570  	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
571  		dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
572  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
573  }
574  EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
575  
k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,void * data,void (* cleanup)(void * data,dma_addr_t desc_dma))576  void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
577  			       void *data,
578  			       void (*cleanup)(void *data, dma_addr_t desc_dma))
579  {
580  	struct device *dev = tx_chn->common.dev;
581  	dma_addr_t desc_dma;
582  	int occ_tx, i, ret;
583  
584  	/*
585  	 * TXQ reset need to be special way as it is input for udma and its
586  	 * state cached by udma, so:
587  	 * 1) save TXQ occ
588  	 * 2) clean up TXQ and call callback .cleanup() for each desc
589  	 * 3) reset TXQ in a special way
590  	 */
591  	occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
592  	dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
593  
594  	for (i = 0; i < occ_tx; i++) {
595  		ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
596  		if (ret) {
597  			if (ret != -ENODATA)
598  				dev_err(dev, "TX reset pop %d\n", ret);
599  			break;
600  		}
601  		cleanup(data, desc_dma);
602  	}
603  
604  	/* reset TXCQ as it is not input for udma - expected to be empty */
605  	k3_ringacc_ring_reset(tx_chn->ringtxcq);
606  	k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
607  }
608  EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
609  
k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel * tx_chn)610  u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
611  {
612  	return tx_chn->common.hdesc_size;
613  }
614  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
615  
k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel * tx_chn)616  u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
617  {
618  	return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
619  }
620  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
621  
k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel * tx_chn)622  int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
623  {
624  	if (xudma_is_pktdma(tx_chn->common.udmax)) {
625  		tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
626  							  tx_chn->udma_tflow_id);
627  	} else {
628  		tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
629  	}
630  
631  	if (!tx_chn->virq)
632  		return -ENXIO;
633  
634  	return tx_chn->virq;
635  }
636  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
637  
638  struct device *
k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel * tx_chn)639  	k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
640  {
641  	if (xudma_is_pktdma(tx_chn->common.udmax) &&
642  	    (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
643  		return &tx_chn->common.chan_dev;
644  
645  	return xudma_get_device(tx_chn->common.udmax);
646  }
647  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
648  
k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * addr)649  void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
650  				       dma_addr_t *addr)
651  {
652  	if (!xudma_is_pktdma(tx_chn->common.udmax) ||
653  	    !tx_chn->common.atype_asel)
654  		return;
655  
656  	*addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
657  }
658  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
659  
k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * addr)660  void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
661  				       dma_addr_t *addr)
662  {
663  	if (!xudma_is_pktdma(tx_chn->common.udmax) ||
664  	    !tx_chn->common.atype_asel)
665  		return;
666  
667  	*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
668  }
669  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
670  
k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)671  static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
672  {
673  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
674  	struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
675  	int ret;
676  
677  	memset(&req, 0, sizeof(req));
678  
679  	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
680  			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
681  			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
682  			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
683  
684  	req.nav_id = tisci_rm->tisci_dev_id;
685  	req.index = rx_chn->udma_rchan_id;
686  	req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
687  	/*
688  	 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
689  	 * and udmax impl, so just configure it to invalid value.
690  	 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
691  	 */
692  	req.rxcq_qnum = 0xFFFF;
693  	if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
694  	    rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
695  		/* Default flow + extra ones */
696  		req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
697  				    TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
698  		req.flowid_start = rx_chn->flow_id_base;
699  		req.flowid_cnt = rx_chn->flow_num;
700  	}
701  	req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
702  	req.rx_atype = rx_chn->common.atype_asel;
703  
704  	ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
705  	if (ret)
706  		dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
707  			rx_chn->udma_rchan_id, ret);
708  
709  	return ret;
710  }
711  
k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num)712  static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
713  					 u32 flow_num)
714  {
715  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
716  
717  	if (IS_ERR_OR_NULL(flow->udma_rflow))
718  		return;
719  
720  	if (flow->ringrxfdq)
721  		k3_ringacc_ring_free(flow->ringrxfdq);
722  
723  	if (flow->ringrx)
724  		k3_ringacc_ring_free(flow->ringrx);
725  
726  	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
727  	flow->udma_rflow = NULL;
728  	rx_chn->flows_ready--;
729  }
730  
k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx,struct k3_udma_glue_rx_flow_cfg * flow_cfg)731  static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
732  				    u32 flow_idx,
733  				    struct k3_udma_glue_rx_flow_cfg *flow_cfg)
734  {
735  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
736  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
737  	struct device *dev = rx_chn->common.dev;
738  	struct ti_sci_msg_rm_udmap_flow_cfg req;
739  	int rx_ring_id;
740  	int rx_ringfdq_id;
741  	int ret = 0;
742  
743  	flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
744  					   flow->udma_rflow_id);
745  	if (IS_ERR(flow->udma_rflow)) {
746  		ret = PTR_ERR(flow->udma_rflow);
747  		dev_err(dev, "UDMAX rflow get err %d\n", ret);
748  		return ret;
749  	}
750  
751  	if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
752  		ret = -ENODEV;
753  		goto err_rflow_put;
754  	}
755  
756  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
757  		rx_ringfdq_id = flow->udma_rflow_id +
758  				xudma_get_rflow_ring_offset(rx_chn->common.udmax);
759  		rx_ring_id = 0;
760  	} else {
761  		rx_ring_id = flow_cfg->ring_rxq_id;
762  		rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
763  	}
764  
765  	/* request and cfg rings */
766  	ret =  k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
767  					     rx_ringfdq_id, rx_ring_id,
768  					     &flow->ringrxfdq,
769  					     &flow->ringrx);
770  	if (ret) {
771  		dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
772  		goto err_rflow_put;
773  	}
774  
775  	/* Set the dma_dev for the rings to be configured */
776  	flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
777  	flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
778  
779  	/* Set the ASEL value for DMA rings of PKTDMA */
780  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
781  		flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
782  		flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
783  	}
784  
785  	ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
786  	if (ret) {
787  		dev_err(dev, "Failed to cfg ringrx %d\n", ret);
788  		goto err_ringrxfdq_free;
789  	}
790  
791  	ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
792  	if (ret) {
793  		dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
794  		goto err_ringrxfdq_free;
795  	}
796  
797  	if (rx_chn->remote) {
798  		rx_ring_id = TI_SCI_RESOURCE_NULL;
799  		rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
800  	} else {
801  		rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
802  		rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
803  	}
804  
805  	memset(&req, 0, sizeof(req));
806  
807  	req.valid_params =
808  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
809  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
810  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
811  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
812  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
813  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
814  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
815  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
816  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
817  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
818  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
819  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
820  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
821  	req.nav_id = tisci_rm->tisci_dev_id;
822  	req.flow_index = flow->udma_rflow_id;
823  	if (rx_chn->common.epib)
824  		req.rx_einfo_present = 1;
825  	if (rx_chn->common.psdata_size)
826  		req.rx_psinfo_present = 1;
827  	if (flow_cfg->rx_error_handling)
828  		req.rx_error_handling = 1;
829  	req.rx_desc_type = 0;
830  	req.rx_dest_qnum = rx_ring_id;
831  	req.rx_src_tag_hi_sel = 0;
832  	req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
833  	req.rx_dest_tag_hi_sel = 0;
834  	req.rx_dest_tag_lo_sel = 0;
835  	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
836  	req.rx_fdq1_qnum = rx_ringfdq_id;
837  	req.rx_fdq2_qnum = rx_ringfdq_id;
838  	req.rx_fdq3_qnum = rx_ringfdq_id;
839  
840  	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
841  	if (ret) {
842  		dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
843  			ret);
844  		goto err_ringrxfdq_free;
845  	}
846  
847  	rx_chn->flows_ready++;
848  	dev_dbg(dev, "flow%d config done. ready:%d\n",
849  		flow->udma_rflow_id, rx_chn->flows_ready);
850  
851  	return 0;
852  
853  err_ringrxfdq_free:
854  	k3_ringacc_ring_free(flow->ringrxfdq);
855  	k3_ringacc_ring_free(flow->ringrx);
856  
857  err_rflow_put:
858  	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
859  	flow->udma_rflow = NULL;
860  
861  	return ret;
862  }
863  
k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel * chn)864  static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
865  {
866  	struct device *dev = chn->common.dev;
867  
868  	dev_dbg(dev, "dump_rx_chn:\n"
869  		"udma_rchan_id: %d\n"
870  		"src_thread: %08x\n"
871  		"dst_thread: %08x\n"
872  		"epib: %d\n"
873  		"hdesc_size: %u\n"
874  		"psdata_size: %u\n"
875  		"swdata_size: %u\n"
876  		"flow_id_base: %d\n"
877  		"flow_num: %d\n",
878  		chn->udma_rchan_id,
879  		chn->common.src_thread,
880  		chn->common.dst_thread,
881  		chn->common.epib,
882  		chn->common.hdesc_size,
883  		chn->common.psdata_size,
884  		chn->common.swdata_size,
885  		chn->flow_id_base,
886  		chn->flow_num);
887  }
888  
k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel * chn,char * mark)889  static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
890  					char *mark)
891  {
892  	struct device *dev = chn->common.dev;
893  
894  	dev_dbg(dev, "=== dump ===> %s\n", mark);
895  
896  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
897  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
898  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
899  		xudma_rchanrt_read(chn->udma_rchanx,
900  				   UDMA_CHAN_RT_PEER_RT_EN_REG));
901  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
902  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
903  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
904  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
905  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
906  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
907  }
908  
909  static int
k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel * rx_chn,struct k3_udma_glue_rx_channel_cfg * cfg)910  k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
911  			       struct k3_udma_glue_rx_channel_cfg *cfg)
912  {
913  	int ret;
914  
915  	/* default rflow */
916  	if (cfg->flow_id_use_rxchan_id)
917  		return 0;
918  
919  	/* not a GP rflows */
920  	if (rx_chn->flow_id_base != -1 &&
921  	    !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
922  		return 0;
923  
924  	/* Allocate range of GP rflows */
925  	ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
926  					 rx_chn->flow_id_base,
927  					 rx_chn->flow_num);
928  	if (ret < 0) {
929  		dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
930  			rx_chn->flow_id_base, rx_chn->flow_num, ret);
931  		return ret;
932  	}
933  	rx_chn->flow_id_base = ret;
934  
935  	return 0;
936  }
937  
938  static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn_priv(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)939  k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
940  				 struct k3_udma_glue_rx_channel_cfg *cfg)
941  {
942  	struct k3_udma_glue_rx_channel *rx_chn;
943  	struct psil_endpoint_config *ep_cfg;
944  	int ret, i;
945  
946  	if (cfg->flow_id_num <= 0)
947  		return ERR_PTR(-EINVAL);
948  
949  	if (cfg->flow_id_num != 1 &&
950  	    (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
951  		return ERR_PTR(-EINVAL);
952  
953  	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
954  	if (!rx_chn)
955  		return ERR_PTR(-ENOMEM);
956  
957  	rx_chn->common.dev = dev;
958  	rx_chn->common.swdata_size = cfg->swdata_size;
959  	rx_chn->remote = false;
960  
961  	/* parse of udmap channel */
962  	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
963  					&rx_chn->common, false);
964  	if (ret)
965  		goto err;
966  
967  	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
968  						rx_chn->common.psdata_size,
969  						rx_chn->common.swdata_size);
970  
971  	ep_cfg = rx_chn->common.ep_config;
972  
973  	if (xudma_is_pktdma(rx_chn->common.udmax))
974  		rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
975  	else
976  		rx_chn->udma_rchan_id = -1;
977  
978  	/* request and cfg UDMAP RX channel */
979  	rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
980  					      rx_chn->udma_rchan_id);
981  	if (IS_ERR(rx_chn->udma_rchanx)) {
982  		ret = PTR_ERR(rx_chn->udma_rchanx);
983  		dev_err(dev, "UDMAX rchanx get err %d\n", ret);
984  		goto err;
985  	}
986  	rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
987  
988  	rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
989  	rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
990  	dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
991  		     rx_chn->udma_rchan_id, rx_chn->common.src_thread);
992  	ret = device_register(&rx_chn->common.chan_dev);
993  	if (ret) {
994  		dev_err(dev, "Channel Device registration failed %d\n", ret);
995  		put_device(&rx_chn->common.chan_dev);
996  		rx_chn->common.chan_dev.parent = NULL;
997  		goto err;
998  	}
999  
1000  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1001  		/* prepare the channel device as coherent */
1002  		rx_chn->common.chan_dev.dma_coherent = true;
1003  		dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1004  					     DMA_BIT_MASK(48));
1005  	}
1006  
1007  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1008  		int flow_start = cfg->flow_id_base;
1009  		int flow_end;
1010  
1011  		if (flow_start == -1)
1012  			flow_start = ep_cfg->flow_start;
1013  
1014  		flow_end = flow_start + cfg->flow_id_num - 1;
1015  		if (flow_start < ep_cfg->flow_start ||
1016  		    flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
1017  			dev_err(dev, "Invalid flow range requested\n");
1018  			ret = -EINVAL;
1019  			goto err;
1020  		}
1021  		rx_chn->flow_id_base = flow_start;
1022  	} else {
1023  		rx_chn->flow_id_base = cfg->flow_id_base;
1024  
1025  		/* Use RX channel id as flow id: target dev can't generate flow_id */
1026  		if (cfg->flow_id_use_rxchan_id)
1027  			rx_chn->flow_id_base = rx_chn->udma_rchan_id;
1028  	}
1029  
1030  	rx_chn->flow_num = cfg->flow_id_num;
1031  
1032  	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1033  				     sizeof(*rx_chn->flows), GFP_KERNEL);
1034  	if (!rx_chn->flows) {
1035  		ret = -ENOMEM;
1036  		goto err;
1037  	}
1038  
1039  	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1040  	if (ret)
1041  		goto err;
1042  
1043  	for (i = 0; i < rx_chn->flow_num; i++)
1044  		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1045  
1046  	/* request and cfg psi-l */
1047  	rx_chn->common.dst_thread =
1048  			xudma_dev_get_psil_base(rx_chn->common.udmax) +
1049  			rx_chn->udma_rchan_id;
1050  
1051  	ret = k3_udma_glue_cfg_rx_chn(rx_chn);
1052  	if (ret) {
1053  		dev_err(dev, "Failed to cfg rchan %d\n", ret);
1054  		goto err;
1055  	}
1056  
1057  	/* init default RX flow only if flow_num = 1 */
1058  	if (cfg->def_flow_cfg) {
1059  		ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
1060  		if (ret)
1061  			goto err;
1062  	}
1063  
1064  	k3_udma_glue_dump_rx_chn(rx_chn);
1065  
1066  	return rx_chn;
1067  
1068  err:
1069  	k3_udma_glue_release_rx_chn(rx_chn);
1070  	return ERR_PTR(ret);
1071  }
1072  
1073  static int
k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel * rx_chn,struct k3_udma_glue_rx_channel_cfg * cfg,struct device * dev)1074  k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
1075  					  struct k3_udma_glue_rx_channel_cfg *cfg,
1076  					  struct device *dev)
1077  {
1078  	int ret, i;
1079  
1080  	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1081  						rx_chn->common.psdata_size,
1082  						rx_chn->common.swdata_size);
1083  
1084  	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1085  				     sizeof(*rx_chn->flows), GFP_KERNEL);
1086  	if (!rx_chn->flows)
1087  		return -ENOMEM;
1088  
1089  	rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1090  	rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1091  	dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
1092  		     rx_chn->common.src_thread, rx_chn->flow_id_base);
1093  	ret = device_register(&rx_chn->common.chan_dev);
1094  	if (ret) {
1095  		dev_err(dev, "Channel Device registration failed %d\n", ret);
1096  		put_device(&rx_chn->common.chan_dev);
1097  		rx_chn->common.chan_dev.parent = NULL;
1098  		return ret;
1099  	}
1100  
1101  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1102  		/* prepare the channel device as coherent */
1103  		rx_chn->common.chan_dev.dma_coherent = true;
1104  		dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1105  					     DMA_BIT_MASK(48));
1106  	}
1107  
1108  	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1109  	if (ret)
1110  		return ret;
1111  
1112  	for (i = 0; i < rx_chn->flow_num; i++)
1113  		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1114  
1115  	k3_udma_glue_dump_rx_chn(rx_chn);
1116  
1117  	return 0;
1118  }
1119  
1120  static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)1121  k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
1122  				   struct k3_udma_glue_rx_channel_cfg *cfg)
1123  {
1124  	struct k3_udma_glue_rx_channel *rx_chn;
1125  	int ret;
1126  
1127  	if (cfg->flow_id_num <= 0 ||
1128  	    cfg->flow_id_use_rxchan_id ||
1129  	    cfg->def_flow_cfg ||
1130  	    cfg->flow_id_base < 0)
1131  		return ERR_PTR(-EINVAL);
1132  
1133  	/*
1134  	 * Remote RX channel is under control of Remote CPU core, so
1135  	 * Linux can only request and manipulate by dedicated RX flows
1136  	 */
1137  
1138  	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1139  	if (!rx_chn)
1140  		return ERR_PTR(-ENOMEM);
1141  
1142  	rx_chn->common.dev = dev;
1143  	rx_chn->common.swdata_size = cfg->swdata_size;
1144  	rx_chn->remote = true;
1145  	rx_chn->udma_rchan_id = -1;
1146  	rx_chn->flow_num = cfg->flow_id_num;
1147  	rx_chn->flow_id_base = cfg->flow_id_base;
1148  	rx_chn->psil_paired = false;
1149  
1150  	/* parse of udmap channel */
1151  	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1152  					&rx_chn->common, false);
1153  	if (ret)
1154  		goto err;
1155  
1156  	ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1157  	if (ret)
1158  		goto err;
1159  
1160  	return rx_chn;
1161  
1162  err:
1163  	k3_udma_glue_release_rx_chn(rx_chn);
1164  	return ERR_PTR(ret);
1165  }
1166  
1167  struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device * dev,struct k3_udma_glue_rx_channel_cfg * cfg,struct device_node * udmax_np,u32 thread_id)1168  k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
1169  						 struct k3_udma_glue_rx_channel_cfg *cfg,
1170  						 struct device_node *udmax_np, u32 thread_id)
1171  {
1172  	struct k3_udma_glue_rx_channel *rx_chn;
1173  	int ret;
1174  
1175  	if (cfg->flow_id_num <= 0 ||
1176  	    cfg->flow_id_use_rxchan_id ||
1177  	    cfg->def_flow_cfg ||
1178  	    cfg->flow_id_base < 0)
1179  		return ERR_PTR(-EINVAL);
1180  
1181  	/*
1182  	 * Remote RX channel is under control of Remote CPU core, so
1183  	 * Linux can only request and manipulate by dedicated RX flows
1184  	 */
1185  
1186  	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1187  	if (!rx_chn)
1188  		return ERR_PTR(-ENOMEM);
1189  
1190  	rx_chn->common.dev = dev;
1191  	rx_chn->common.swdata_size = cfg->swdata_size;
1192  	rx_chn->remote = true;
1193  	rx_chn->udma_rchan_id = -1;
1194  	rx_chn->flow_num = cfg->flow_id_num;
1195  	rx_chn->flow_id_base = cfg->flow_id_base;
1196  	rx_chn->psil_paired = false;
1197  
1198  	ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
1199  	if (ret)
1200  		goto err;
1201  
1202  	ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1203  	if (ret)
1204  		goto err;
1205  
1206  	return rx_chn;
1207  
1208  err:
1209  	k3_udma_glue_release_rx_chn(rx_chn);
1210  	return ERR_PTR(ret);
1211  }
1212  EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
1213  
1214  struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)1215  k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1216  			    struct k3_udma_glue_rx_channel_cfg *cfg)
1217  {
1218  	if (cfg->remote)
1219  		return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1220  	else
1221  		return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1222  }
1223  EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1224  
k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1225  void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1226  {
1227  	int i;
1228  
1229  	if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1230  		return;
1231  
1232  	if (rx_chn->psil_paired) {
1233  		xudma_navss_psil_unpair(rx_chn->common.udmax,
1234  					rx_chn->common.src_thread,
1235  					rx_chn->common.dst_thread);
1236  		rx_chn->psil_paired = false;
1237  	}
1238  
1239  	for (i = 0; i < rx_chn->flow_num; i++)
1240  		k3_udma_glue_release_rx_flow(rx_chn, i);
1241  
1242  	if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1243  		xudma_free_gp_rflow_range(rx_chn->common.udmax,
1244  					  rx_chn->flow_id_base,
1245  					  rx_chn->flow_num);
1246  
1247  	if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1248  		xudma_rchan_put(rx_chn->common.udmax,
1249  				rx_chn->udma_rchanx);
1250  
1251  	if (rx_chn->common.chan_dev.parent) {
1252  		device_unregister(&rx_chn->common.chan_dev);
1253  		rx_chn->common.chan_dev.parent = NULL;
1254  	}
1255  }
1256  EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1257  
k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx,struct k3_udma_glue_rx_flow_cfg * flow_cfg)1258  int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1259  			      u32 flow_idx,
1260  			      struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1261  {
1262  	if (flow_idx >= rx_chn->flow_num)
1263  		return -EINVAL;
1264  
1265  	return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1266  }
1267  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1268  
k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1269  u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1270  				    u32 flow_idx)
1271  {
1272  	struct k3_udma_glue_rx_flow *flow;
1273  
1274  	if (flow_idx >= rx_chn->flow_num)
1275  		return -EINVAL;
1276  
1277  	flow = &rx_chn->flows[flow_idx];
1278  
1279  	return k3_ringacc_get_ring_id(flow->ringrxfdq);
1280  }
1281  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1282  
k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel * rx_chn)1283  u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1284  {
1285  	return rx_chn->flow_id_base;
1286  }
1287  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1288  
k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1289  int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1290  				u32 flow_idx)
1291  {
1292  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1293  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1294  	struct device *dev = rx_chn->common.dev;
1295  	struct ti_sci_msg_rm_udmap_flow_cfg req;
1296  	int rx_ring_id;
1297  	int rx_ringfdq_id;
1298  	int ret = 0;
1299  
1300  	if (!rx_chn->remote)
1301  		return -EINVAL;
1302  
1303  	rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1304  	rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1305  
1306  	memset(&req, 0, sizeof(req));
1307  
1308  	req.valid_params =
1309  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1310  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1311  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1312  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1313  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1314  	req.nav_id = tisci_rm->tisci_dev_id;
1315  	req.flow_index = flow->udma_rflow_id;
1316  	req.rx_dest_qnum = rx_ring_id;
1317  	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1318  	req.rx_fdq1_qnum = rx_ringfdq_id;
1319  	req.rx_fdq2_qnum = rx_ringfdq_id;
1320  	req.rx_fdq3_qnum = rx_ringfdq_id;
1321  
1322  	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1323  	if (ret) {
1324  		dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1325  			ret);
1326  	}
1327  
1328  	return ret;
1329  }
1330  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1331  
k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1332  int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1333  				 u32 flow_idx)
1334  {
1335  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1336  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1337  	struct device *dev = rx_chn->common.dev;
1338  	struct ti_sci_msg_rm_udmap_flow_cfg req;
1339  	int ret = 0;
1340  
1341  	if (!rx_chn->remote)
1342  		return -EINVAL;
1343  
1344  	memset(&req, 0, sizeof(req));
1345  	req.valid_params =
1346  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1347  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1348  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1349  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1350  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1351  	req.nav_id = tisci_rm->tisci_dev_id;
1352  	req.flow_index = flow->udma_rflow_id;
1353  	req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1354  	req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1355  	req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1356  	req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1357  	req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1358  
1359  	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1360  	if (ret) {
1361  		dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1362  			ret);
1363  	}
1364  
1365  	return ret;
1366  }
1367  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1368  
k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1369  int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1370  {
1371  	int ret;
1372  
1373  	if (rx_chn->remote)
1374  		return -EINVAL;
1375  
1376  	if (rx_chn->flows_ready < rx_chn->flow_num)
1377  		return -EINVAL;
1378  
1379  	ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1380  				    rx_chn->common.src_thread,
1381  				    rx_chn->common.dst_thread);
1382  	if (ret) {
1383  		dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1384  		return ret;
1385  	}
1386  
1387  	rx_chn->psil_paired = true;
1388  
1389  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1390  			    UDMA_CHAN_RT_CTL_EN);
1391  
1392  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1393  			    UDMA_PEER_RT_EN_ENABLE);
1394  
1395  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1396  	return 0;
1397  }
1398  EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1399  
k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1400  void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1401  {
1402  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1403  
1404  	xudma_rchanrt_write(rx_chn->udma_rchanx,
1405  			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1406  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1407  
1408  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1409  
1410  	if (rx_chn->psil_paired) {
1411  		xudma_navss_psil_unpair(rx_chn->common.udmax,
1412  					rx_chn->common.src_thread,
1413  					rx_chn->common.dst_thread);
1414  		rx_chn->psil_paired = false;
1415  	}
1416  }
1417  EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1418  
k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,bool sync)1419  void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1420  			       bool sync)
1421  {
1422  	int i = 0;
1423  	u32 val;
1424  
1425  	if (rx_chn->remote)
1426  		return;
1427  
1428  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1429  
1430  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1431  			    UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1432  
1433  	val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1434  
1435  	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1436  		val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1437  					 UDMA_CHAN_RT_CTL_REG);
1438  		udelay(1);
1439  		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1440  			dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1441  			break;
1442  		}
1443  		i++;
1444  	}
1445  
1446  	val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1447  				 UDMA_CHAN_RT_PEER_RT_EN_REG);
1448  	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1449  		dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1450  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1451  }
1452  EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1453  
k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,void * data,void (* cleanup)(void * data,dma_addr_t desc_dma),bool skip_fdq)1454  void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1455  		u32 flow_num, void *data,
1456  		void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1457  {
1458  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1459  	struct device *dev = rx_chn->common.dev;
1460  	dma_addr_t desc_dma;
1461  	int occ_rx, i, ret;
1462  
1463  	/* reset RXCQ as it is not input for udma - expected to be empty */
1464  	occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1465  	dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1466  
1467  	/* Skip RX FDQ in case one FDQ is used for the set of flows */
1468  	if (skip_fdq)
1469  		goto do_reset;
1470  
1471  	/*
1472  	 * RX FDQ reset need to be special way as it is input for udma and its
1473  	 * state cached by udma, so:
1474  	 * 1) save RX FDQ occ
1475  	 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1476  	 * 3) reset RX FDQ in a special way
1477  	 */
1478  	occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1479  	dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1480  
1481  	for (i = 0; i < occ_rx; i++) {
1482  		ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1483  		if (ret) {
1484  			if (ret != -ENODATA)
1485  				dev_err(dev, "RX reset pop %d\n", ret);
1486  			break;
1487  		}
1488  		cleanup(data, desc_dma);
1489  	}
1490  
1491  	k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1492  
1493  do_reset:
1494  	k3_ringacc_ring_reset(flow->ringrx);
1495  }
1496  EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1497  
k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,struct cppi5_host_desc_t * desc_rx,dma_addr_t desc_dma)1498  int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1499  			     u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1500  			     dma_addr_t desc_dma)
1501  {
1502  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1503  
1504  	return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1505  }
1506  EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1507  
k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,dma_addr_t * desc_dma)1508  int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1509  			    u32 flow_num, dma_addr_t *desc_dma)
1510  {
1511  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1512  
1513  	return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1514  }
1515  EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1516  
k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num)1517  int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1518  			    u32 flow_num)
1519  {
1520  	struct k3_udma_glue_rx_flow *flow;
1521  
1522  	flow = &rx_chn->flows[flow_num];
1523  
1524  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1525  		flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1526  							flow->udma_rflow_id);
1527  	} else {
1528  		flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1529  	}
1530  
1531  	if (!flow->virq)
1532  		return -ENXIO;
1533  
1534  	return flow->virq;
1535  }
1536  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1537  
1538  struct device *
k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel * rx_chn)1539  	k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1540  {
1541  	if (xudma_is_pktdma(rx_chn->common.udmax) &&
1542  	    (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1543  		return &rx_chn->common.chan_dev;
1544  
1545  	return xudma_get_device(rx_chn->common.udmax);
1546  }
1547  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1548  
k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel * rx_chn,dma_addr_t * addr)1549  void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1550  				       dma_addr_t *addr)
1551  {
1552  	if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1553  	    !rx_chn->common.atype_asel)
1554  		return;
1555  
1556  	*addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1557  }
1558  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1559  
k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel * rx_chn,dma_addr_t * addr)1560  void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1561  				       dma_addr_t *addr)
1562  {
1563  	if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1564  	    !rx_chn->common.atype_asel)
1565  		return;
1566  
1567  	*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1568  }
1569  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1570  
k3_udma_glue_class_init(void)1571  static int __init k3_udma_glue_class_init(void)
1572  {
1573  	return class_register(&k3_udma_glue_devclass);
1574  }
1575  
1576  module_init(k3_udma_glue_class_init);
1577  MODULE_DESCRIPTION("TI K3 NAVSS DMA glue interface");
1578  MODULE_LICENSE("GPL v2");
1579