xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision 0626a4da6c07f30da06dd6747e8cc290a60371d8)
1 /*
2  * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hif.h>
25 #include <htt.h>
26 #include <wdi_event.h>
27 #include <queue.h>
28 #include "dp_types.h"
29 #include "dp_htt.h"
30 #include "dp_tx.h"
31 #include "dp_ipa.h"
32 
33 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
34 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
35 
36 /**
37  * dp_tx_ipa_uc_detach - Free autonomy TX resources
38  * @soc: data path instance
39  * @pdev: core txrx pdev context
40  *
41  * Free allocated TX buffers with WBM SRNG
42  *
43  * Return: none
44  */
45 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
46 {
47 	int idx;
48 
49 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
50 		if (soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]) {
51 			qdf_nbuf_free((qdf_nbuf_t)
52 				      (soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]));
53 
54 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
55 							(void *)NULL;
56 		}
57 	}
58 
59 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
60 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
61 }
62 
63 /**
64  * dp_rx_ipa_uc_detach - free autonomy RX resources
65  * @soc: data path instance
66  * @pdev: core txrx pdev context
67  *
68  * This function will detach DP RX into main device context
69  * will free DP Rx resources.
70  *
71  * Return: none
72  */
73 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
74 {
75 }
76 
77 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
78 {
79 	/* TX resource detach */
80 	dp_tx_ipa_uc_detach(soc, pdev);
81 
82 	/* RX resource detach */
83 	dp_rx_ipa_uc_detach(soc, pdev);
84 
85 	return QDF_STATUS_SUCCESS;	/* success */
86 }
87 
88 /**
89  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
90  * @soc: data path instance
91  * @pdev: Physical device handle
92  *
93  * Allocate TX buffer from non-cacheable memory
94  * Attache allocated TX buffers with WBM SRNG
95  *
96  * Return: int
97  */
98 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
99 {
100 	uint32_t tx_buffer_count;
101 	uint32_t ring_base_align = 8;
102 	qdf_dma_addr_t buffer_paddr;
103 	struct hal_srng *wbm_srng =
104 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
105 	struct hal_srng_params srng_params;
106 	uint32_t paddr_lo;
107 	uint32_t paddr_hi;
108 	void *ring_entry;
109 	int num_entries;
110 	qdf_nbuf_t nbuf;
111 	int retval = QDF_STATUS_SUCCESS;
112 	/*
113 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
114 	 * unsigned int uc_tx_buf_sz =
115 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
116 	 */
117 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
118 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
119 
120 	hal_get_srng_params(soc->hal_soc, (void *)wbm_srng, &srng_params);
121 	num_entries = srng_params.num_entries;
122 
123 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
124 		  "%s: requested %d buffers to be posted to wbm ring",
125 		   __func__, num_entries);
126 
127 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
128 		qdf_mem_malloc(num_entries *
129 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
130 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
131 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
132 			  "%s: IPA WBM Ring Tx buf pool vaddr alloc fail",
133 			  __func__);
134 		return -ENOMEM;
135 	}
136 
137 	hal_srng_access_start(soc->hal_soc, (void *)wbm_srng);
138 
139 	/*
140 	 * Allocate Tx buffers as many as possible
141 	 * Populate Tx buffers into WBM2IPA ring
142 	 * This initial buffer population will simulate H/W as source ring,
143 	 * and update HP
144 	 */
145 	for (tx_buffer_count = 0;
146 		tx_buffer_count < num_entries - 1; tx_buffer_count++) {
147 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
148 		if (!nbuf)
149 			break;
150 
151 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
152 				(void *)wbm_srng);
153 		if (!ring_entry) {
154 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
155 				  "%s: Failed to get WBM ring entry",
156 				  __func__);
157 			qdf_nbuf_free(nbuf);
158 			break;
159 		}
160 
161 		qdf_nbuf_map_single(soc->osdev, nbuf,
162 				    QDF_DMA_BIDIRECTIONAL);
163 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
164 
165 		paddr_lo = ((u64)buffer_paddr & 0x00000000ffffffff);
166 		paddr_hi = ((u64)buffer_paddr & 0x0000001f00000000) >> 32;
167 		HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
168 		HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
169 		HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
170 				      HAL_WBM_SW0_BM_ID));
171 
172 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
173 			= (void *)nbuf;
174 	}
175 
176 	hal_srng_access_end(soc->hal_soc, wbm_srng);
177 
178 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
179 
180 	if (tx_buffer_count) {
181 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
182 			  "%s: IPA WDI TX buffer: %d allocated",
183 			  __func__, tx_buffer_count);
184 	} else {
185 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
186 			  "%s: No IPA WDI TX buffer allocated",
187 			  __func__);
188 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
189 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
190 		retval = -ENOMEM;
191 	}
192 
193 	return retval;
194 }
195 
196 /**
197  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
198  * @soc: data path instance
199  * @pdev: core txrx pdev context
200  *
201  * This function will attach a DP RX instance into the main
202  * device (SOC) context.
203  *
204  * Return: QDF_STATUS_SUCCESS: success
205  *         QDF_STATUS_E_RESOURCES: Error return
206  */
207 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
208 {
209 	return QDF_STATUS_SUCCESS;
210 }
211 
212 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
213 {
214 	int error;
215 
216 	/* TX resource attach */
217 	error = dp_tx_ipa_uc_attach(soc, pdev);
218 	if (error) {
219 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
220 			  "%s: DP IPA UC TX attach fail code %d",
221 			  __func__, error);
222 		return error;
223 	}
224 
225 	/* RX resource attach */
226 	error = dp_rx_ipa_uc_attach(soc, pdev);
227 	if (error) {
228 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
229 			  "%s: DP IPA UC RX attach fail code %d",
230 			  __func__, error);
231 		dp_tx_ipa_uc_detach(soc, pdev);
232 		return error;
233 	}
234 
235 	return QDF_STATUS_SUCCESS;	/* success */
236 }
237 
238 /*
239  * dp_ipa_ring_resource_setup() - setup IPA ring resources
240  * @soc: data path SoC handle
241  *
242  * Return: none
243  */
244 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
245 		struct dp_pdev *pdev)
246 {
247 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
248 	struct hal_srng *hal_srng;
249 	struct hal_srng_params srng_params;
250 	qdf_dma_addr_t hp_addr;
251 	unsigned long addr_offset, dev_base_paddr;
252 
253 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
254 	hal_srng = soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
255 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
256 
257 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
258 		srng_params.ring_base_paddr;
259 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
260 		srng_params.ring_base_vaddr;
261 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
262 		(srng_params.num_entries * srng_params.entry_size) << 2;
263 	/*
264 	 * For the register backed memory addresses, use the scn->mem_pa to
265 	 * calculate the physical address of the shadow registers
266 	 */
267 	dev_base_paddr =
268 		(unsigned long)
269 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
270 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
271 		      (unsigned long)(hal_soc->dev_base_addr);
272 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
273 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
274 
275 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
276 		"%s: addr_offset=%x, dev_base_paddr=%x, ipa_tcl_hp_paddr=%x",
277 		__func__, (unsigned int)addr_offset,
278 		(unsigned int)dev_base_paddr,
279 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr));
280 
281 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
282 	hal_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
283 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
284 
285 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
286 		srng_params.ring_base_paddr;
287 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
288 		srng_params.ring_base_vaddr;
289 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
290 		(srng_params.num_entries * srng_params.entry_size) << 2;
291 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
292 		      (unsigned long)(hal_soc->dev_base_addr);
293 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
294 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
295 
296 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
297 		"%s: addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x",
298 		__func__, (unsigned int)addr_offset,
299 		(unsigned int)dev_base_paddr,
300 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr));
301 
302 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
303 	hal_srng = soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
304 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
305 
306 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
307 		srng_params.ring_base_paddr;
308 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
309 		srng_params.ring_base_vaddr;
310 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
311 		(srng_params.num_entries * srng_params.entry_size) << 2;
312 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
313 		      (unsigned long)(hal_soc->dev_base_addr);
314 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
315 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
316 
317 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
318 		"%s: addr_offset=%x, dev_base_paddr=%x, ipa_reo_tp_paddr=%x",
319 		__func__, (unsigned int)addr_offset,
320 		(unsigned int)dev_base_paddr,
321 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr));
322 
323 	hal_srng = pdev->rx_refill_buf_ring2.hal_srng;
324 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
325 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
326 		srng_params.ring_base_paddr;
327 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
328 		srng_params.ring_base_vaddr;
329 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
330 		(srng_params.num_entries * srng_params.entry_size) << 2;
331 	hp_addr = hal_srng_get_hp_addr(hal_soc, (void *)hal_srng);
332 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = hp_addr;
333 
334 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
335 		"%s: ipa_rx_refill_buf_hp_paddr=%x", __func__,
336 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr));
337 
338 	return 0;
339 }
340 
341 /**
342  * dp_ipa_uc_get_resource() - Client request resource information
343  * @ppdev - handle to the device instance
344  *
345  *  IPA client will request IPA UC related resource information
346  *  Resource information will be distributed to IPA module
347  *  All of the required resources should be pre-allocated
348  *
349  * Return: QDF_STATUS
350  */
351 QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev)
352 {
353 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
354 	struct dp_soc *soc = pdev->soc;
355 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
356 
357 	ipa_res->tx_ring_base_paddr =
358 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr;
359 	ipa_res->tx_ring_size =
360 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size;
361 	ipa_res->tx_num_alloc_buffer =
362 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
363 
364 	ipa_res->tx_comp_ring_base_paddr =
365 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr;
366 	ipa_res->tx_comp_ring_size =
367 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size;
368 
369 	ipa_res->rx_rdy_ring_base_paddr =
370 		soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr;
371 	ipa_res->rx_rdy_ring_size =
372 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size;
373 
374 	ipa_res->rx_refill_ring_base_paddr =
375 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr;
376 	ipa_res->rx_refill_ring_size =
377 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size;
378 
379 	if ((0 == ipa_res->tx_comp_ring_base_paddr) ||
380 			(0 == ipa_res->rx_rdy_ring_base_paddr))
381 		return QDF_STATUS_E_FAILURE;
382 
383 	return QDF_STATUS_SUCCESS;
384 }
385 
386 /**
387  * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG
388  * @ppdev - handle to the device instance
389  *
390  * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
391  * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
392  *
393  * Return: none
394  */
395 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev)
396 {
397 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
398 	struct dp_soc *soc = pdev->soc;
399 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
400 	struct hal_srng *wbm_srng =
401 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
402 	struct hal_srng *reo_srng =
403 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
404 
405 	hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
406 	hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
407 	hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
408 
409 	return QDF_STATUS_SUCCESS;
410 }
411 
412 /**
413  * dp_ipa_op_response() - Handle OP command response from firmware
414  * @ppdev - handle to the device instance
415  * @op_msg: op response message from firmware
416  *
417  * Return: none
418  */
419 QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
420 {
421 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
422 
423 	if (pdev->ipa_uc_op_cb) {
424 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
425 	} else {
426 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
427 		    "%s: IPA callback function is not registered", __func__);
428 		qdf_mem_free(op_msg);
429 		return QDF_STATUS_E_FAILURE;
430 	}
431 
432 	return QDF_STATUS_SUCCESS;
433 }
434 
435 /**
436  * dp_ipa_register_op_cb() - Register OP handler function
437  * @ppdev - handle to the device instance
438  * @op_cb: handler function pointer
439  *
440  * Return: none
441  */
442 QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev,
443 				 ipa_uc_op_cb_type op_cb,
444 				 void *usr_ctxt)
445 {
446 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
447 
448 	pdev->ipa_uc_op_cb = op_cb;
449 	pdev->usr_ctxt = usr_ctxt;
450 
451 	return QDF_STATUS_SUCCESS;
452 }
453 
454 /**
455  * dp_ipa_get_stat() - Get firmware wdi status
456  * @ppdev - handle to the device instance
457  *
458  * Return: none
459  */
460 QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev)
461 {
462 	/* TBD */
463 	return QDF_STATUS_SUCCESS;
464 }
465 
466 /**
467  * dp_tx_send_ipa_data_frame() - send IPA data frame
468  * @vdev: vdev
469  * @skb: skb
470  *
471  * Return: skb/ NULL is for success
472  */
473 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
474 {
475 	qdf_nbuf_t ret;
476 
477 	/* Terminate the (single-element) list of tx frames */
478 	qdf_nbuf_set_next(skb, NULL);
479 	ret = dp_tx_send((struct dp_vdev_t *)vdev, skb);
480 	if (ret) {
481 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
482 			  "%s: Failed to tx", __func__);
483 		return ret;
484 	}
485 
486 	return NULL;
487 }
488 
489 /**
490  * dp_ipa_enable_autonomy() – Enable autonomy RX path
491  * @pdev - handle to the device instance
492  *
493  * Set all RX packet route to IPA REO ring
494  * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
495  * Return: none
496  */
497 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev)
498 {
499 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
500 	struct dp_soc *soc = pdev->soc;
501 	uint32_t remap_val;
502 
503 	/* Call HAL API to remap REO rings to REO2IPA ring */
504 	remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
505 		    HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW4) |
506 		    HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW4) |
507 		    HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW4) |
508 		    HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW4) |
509 		    HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
510 		    HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
511 		    HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
512 	hal_reo_remap_IX0(soc->hal_soc, remap_val);
513 
514 	return QDF_STATUS_SUCCESS;
515 }
516 
517 /**
518  * dp_ipa_disable_autonomy() – Disable autonomy RX path
519  * @ppdev - handle to the device instance
520  *
521  * Disable RX packet routing to IPA REO
522  * Program Destination_Ring_Ctrl_IX_0 REO register to disable
523  * Return: none
524  */
525 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev)
526 {
527 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
528 	struct dp_soc *soc = pdev->soc;
529 	uint32_t remap_val;
530 
531 	/* Call HAL API to remap REO rings to REO2IPA ring */
532 	remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
533 		    HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW1) |
534 		    HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW2) |
535 		    HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW3) |
536 		    HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW2) |
537 		    HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
538 		    HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
539 		    HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
540 	hal_reo_remap_IX0(soc->hal_soc, remap_val);
541 
542 	return QDF_STATUS_SUCCESS;
543 }
544 
545 /* This should be configurable per H/W configuration enable status */
546 #define L3_HEADER_PADDING	2
547 
548 /**
549  * dp_ipa_setup() - Setup and connect IPA pipes
550  * @ppdev - handle to the device instance
551  * @ipa_i2w_cb: IPA to WLAN callback
552  * @ipa_w2i_cb: WLAN to IPA callback
553  * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
554  * @ipa_desc_size: IPA descriptor size
555  * @ipa_priv: handle to the HTT instance
556  * @is_rm_enabled: Is IPA RM enabled or not
557  * @tx_pipe_handle: pointer to Tx pipe handle
558  * @rx_pipe_handle: pointer to Rx pipe handle
559  *
560  * Return: QDF_STATUS
561  */
562 QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
563 			void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
564 			uint32_t ipa_desc_size, void *ipa_priv,
565 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
566 			uint32_t *rx_pipe_handle)
567 {
568 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
569 	struct dp_soc *soc = pdev->soc;
570 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
571 	qdf_ipa_wdi3_setup_info_t *tx;
572 	qdf_ipa_wdi3_setup_info_t *rx;
573 	qdf_ipa_wdi3_conn_in_params_t pipe_in;
574 	qdf_ipa_wdi3_conn_out_params_t pipe_out;
575 	struct tcl_data_cmd *tcl_desc_ptr;
576 	uint8_t *desc_addr;
577 	uint32_t desc_size;
578 	int ret;
579 
580 	qdf_mem_zero(&tx, sizeof(struct ipa_wdi3_setup_info));
581 	qdf_mem_zero(&rx, sizeof(struct ipa_wdi3_setup_info));
582 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
583 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
584 
585 	/* TX PIPE */
586 	/**
587 	 * Transfer Ring: WBM Ring
588 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
589 	 * Event Ring: TCL ring
590 	 * Event Ring Doorbell PA: TCL Head Pointer Address
591 	 */
592 	tx = &QDF_IPA_WDI3_CONN_IN_PARAMS_TX(&pipe_in);
593 	QDF_IPA_WDI3_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
594 	QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
595 	QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
596 	QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
597 	QDF_IPA_WDI3_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
598 	QDF_IPA_WDI3_SETUP_INFO_MODE(tx) = IPA_BASIC;
599 	QDF_IPA_WDI3_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
600 	QDF_IPA_WDI3_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
601 	QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
602 		ipa_res->tx_comp_ring_base_paddr;
603 	QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
604 		ipa_res->tx_comp_ring_size;
605 	/* WBM Tail Pointer Address */
606 	QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
607 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
608 	QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
609 		ipa_res->tx_ring_base_paddr;
610 	QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
611 	/* TCL Head Pointer Address */
612 	QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
613 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
614 	QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
615 		ipa_res->tx_num_alloc_buffer;
616 	QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(tx) = 0;
617 
618 	/* Preprogram TCL descriptor */
619 	desc_addr =
620 		(uint8_t *)QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
621 	desc_size = sizeof(struct tcl_data_cmd);
622 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
623 	tcl_desc_ptr = (struct tcl_data_cmd *)
624 		(QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
625 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
626 						HAL_RX_BUF_RBM_SW2_BM;
627 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
628 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
629 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
630 
631 	/* RX PIPE */
632 	/**
633 	 * Transfer Ring: REO Ring
634 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
635 	 * Event Ring: FW ring
636 	 * Event Ring Doorbell PA: FW Head Pointer Address
637 	 */
638 	rx = &QDF_IPA_WDI3_CONN_IN_PARAMS_RX(&pipe_in);
639 	QDF_IPA_WDI3_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
640 	QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
641 	QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
642 	QDF_IPA_WDI3_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
643 	QDF_IPA_WDI3_SETUP_INFO_MODE(rx) = IPA_BASIC;
644 	QDF_IPA_WDI3_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
645 	QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = ipa_res->rx_rdy_ring_base_paddr;
646 	QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx) = ipa_res->rx_rdy_ring_size;
647 	QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = /* REO Tail Pointer Address */
648 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
649 	QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx) = ipa_res->rx_refill_ring_base_paddr;
650 	QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx) = ipa_res->rx_refill_ring_size;
651 	QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = /* FW Head Pointer Address */
652 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
653 	QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
654 
655 	QDF_IPA_WDI3_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
656 	QDF_IPA_WDI3_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
657 
658 	/* Connect WDI IPA PIPE */
659 	ret = qdf_ipa_wdi3_conn_pipes(&pipe_in, &pipe_out);
660 	if (ret) {
661 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
662 			  "%s: ipa_wdi3_conn_pipes: IPA pipe setup failed: ret=%d",
663 			  __func__, ret);
664 		return QDF_STATUS_E_FAILURE;
665 	}
666 
667 	/* IPA uC Doorbell registers */
668 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
669 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
670 		  __func__,
671 		(unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
672 		(unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
673 
674 	ipa_res->tx_comp_doorbell_paddr =
675 		QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
676 	ipa_res->tx_comp_doorbell_vaddr =
677 		QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
678 	ipa_res->rx_ready_doorbell_paddr =
679 		QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
680 
681 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
682 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
683 		  __func__,
684 		  "transfer_ring_base_pa",
685 		  (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
686 		  "transfer_ring_size",
687 		  QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx),
688 		  "transfer_ring_doorbell_pa",
689 		  (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
690 		  "event_ring_base_pa",
691 		  (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx),
692 		  "event_ring_size",
693 		  QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx),
694 		  "event_ring_doorbell_pa",
695 		  (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
696 		  "num_pkt_buffers",
697 		  QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx),
698 		  "tx_comp_doorbell_paddr",
699 		  (void *)ipa_res->tx_comp_doorbell_paddr);
700 
701 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
702 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
703 		  __func__,
704 		  "transfer_ring_base_pa",
705 		  (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
706 		  "transfer_ring_size",
707 		  QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx),
708 		  "transfer_ring_doorbell_pa",
709 		  (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
710 		  "event_ring_base_pa",
711 		  (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx),
712 		  "event_ring_size",
713 		  QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx),
714 		  "event_ring_doorbell_pa",
715 		  (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
716 		  "num_pkt_buffers",
717 		  QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(rx),
718 		  "tx_comp_doorbell_paddr",
719 		  (void *)ipa_res->rx_ready_doorbell_paddr);
720 
721 	return QDF_STATUS_SUCCESS;
722 }
723 
724 /**
725  * dp_ipa_cleanup() - Disconnect IPA pipes
726  * @tx_pipe_handle: Tx pipe handle
727  * @rx_pipe_handle: Rx pipe handle
728  *
729  * Return: QDF_STATUS
730  */
731 QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
732 {
733 	int ret;
734 
735 	ret = qdf_ipa_wdi3_disconn_pipes();
736 	if (ret) {
737 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
738 		    "%s: ipa_wdi3_disconn_pipes: IPA pipe cleanup failed: ret=%d",
739 		    __func__, ret);
740 		return QDF_STATUS_E_FAILURE;
741 	}
742 
743 	return QDF_STATUS_SUCCESS;
744 }
745 
746 /**
747  * dp_ipa_setup_iface() - Setup IPA header and register interface
748  * @ifname: Interface name
749  * @mac_addr: Interface MAC address
750  * @prod_client: IPA prod client type
751  * @cons_client: IPA cons client type
752  * @session_id: Session ID
753  * @is_ipv6_enabled: Is IPV6 enabled or not
754  *
755  * Return: QDF_STATUS
756  */
757 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
758 			      qdf_ipa_client_type_t prod_client,
759 			      qdf_ipa_client_type_t cons_client,
760 			      uint8_t session_id, bool is_ipv6_enabled)
761 {
762 	qdf_ipa_wdi3_reg_intf_in_params_t in;
763 	qdf_ipa_wdi3_hdr_info_t hdr_info;
764 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
765 	int ret = -EINVAL;
766 
767 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
768 		  "%s: Add Partial hdr: %s, %pM",
769 		  __func__, ifname, mac_addr);
770 
771 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t));
772 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
773 
774 	/* IPV4 header */
775 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
776 
777 	QDF_IPA_WDI3_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
778 	QDF_IPA_WDI3_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
779 	QDF_IPA_WDI3_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
780 	QDF_IPA_WDI3_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
781 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
782 
783 	QDF_IPA_WDI3_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
784 	memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[0]), &hdr_info,
785 		sizeof(qdf_ipa_wdi3_hdr_info_t));
786 	QDF_IPA_WDI3_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
787 	QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA(&in) =
788 		htonl(session_id << 16);
789 	QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
790 
791 	/* IPV6 header */
792 	if (is_ipv6_enabled) {
793 		uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IPV6);
794 		memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[1]),
795 			&hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t));
796 	}
797 
798 	ret = qdf_ipa_wdi3_reg_intf(&in);
799 	if (ret) {
800 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
801 		    "%s: ipa_wdi3_reg_intf: register IPA interface falied: ret=%d",
802 		    __func__, ret);
803 		return QDF_STATUS_E_FAILURE;
804 	}
805 
806 	return QDF_STATUS_SUCCESS;
807 }
808 
809 /**
810  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
811  * @ifname: Interface name
812  * @is_ipv6_enabled: Is IPV6 enabled or not
813  *
814  * Return: QDF_STATUS
815  */
816 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
817 {
818 	int ret;
819 
820 	ret = qdf_ipa_wdi3_dereg_intf(ifname);
821 	if (ret) {
822 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
823 			  "%s: ipa_wdi3_dereg_intf: IPA pipe deregistration failed: ret=%d",
824 			  __func__, ret);
825 		return QDF_STATUS_E_FAILURE;
826 	}
827 
828 	return QDF_STATUS_SUCCESS;
829 }
830 
831  /**
832  * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
833  * @ppdev - handle to the device instance
834  *
835  * Return: QDF_STATUS
836  */
837 QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev)
838 {
839 	QDF_STATUS result;
840 
841 	result = qdf_ipa_wdi3_enable_pipes();
842 	if (result) {
843 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
844 			  "%s: Enable WDI PIPE fail, code %d",
845 			  __func__, result);
846 		return QDF_STATUS_E_FAILURE;
847 	}
848 
849 	return QDF_STATUS_SUCCESS;
850 }
851 
852 /**
853  * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes
854  * @ppdev - handle to the device instance
855  *
856  * Return: QDF_STATUS
857  */
858 QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev)
859 {
860 	QDF_STATUS result;
861 
862 	result = qdf_ipa_wdi3_disable_pipes();
863 	if (result) {
864 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
865 			  "%s: Disable WDI PIPE fail, code %d",
866 			  __func__, result);
867 		return QDF_STATUS_E_FAILURE;
868 	}
869 
870 	return QDF_STATUS_SUCCESS;
871 }
872 
873 /**
874  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
875  * @client: Client type
876  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
877  *
878  * Return: QDF_STATUS
879  */
880 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
881 {
882 	qdf_ipa_wdi3_perf_profile_t profile;
883 	QDF_STATUS result;
884 
885 	profile.client = client;
886 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
887 
888 	result = qdf_ipa_wdi3_set_perf_profile(&profile);
889 	if (result) {
890 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
891 			  "%s: ipa_wdi3_set_perf_profile fail, code %d",
892 			  __func__, result);
893 		return QDF_STATUS_E_FAILURE;
894 	}
895 
896 	return QDF_STATUS_SUCCESS;
897 }
898 #endif
899