xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hif.h>
25 #include <htt.h>
26 #include <wdi_event.h>
27 #include <queue.h>
28 #include "dp_types.h"
29 #include "dp_htt.h"
30 #include "dp_tx.h"
31 #include "dp_rx.h"
32 #include "dp_ipa.h"
33 
34 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
35 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
36 
37 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
38 						   qdf_nbuf_t nbuf,
39 						   bool create)
40 {
41 	qdf_mem_info_t mem_map_table = {0};
42 
43 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
44 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
45 				 skb_end_pointer(nbuf) - nbuf->data);
46 
47 	if (create)
48 		qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
49 	else
50 		qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
51 
52 	return QDF_STATUS_SUCCESS;
53 }
54 
55 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
56 					     qdf_nbuf_t nbuf,
57 					     bool create)
58 {
59 	bool reo_remapped = false;
60 	struct dp_pdev *pdev;
61 	int i;
62 
63 	for (i = 0; i < soc->pdev_count; i++) {
64 		pdev = soc->pdev_list[i];
65 		if (pdev && pdev->monitor_configured)
66 			return QDF_STATUS_SUCCESS;
67 	}
68 
69 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
70 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
71 		return QDF_STATUS_SUCCESS;
72 
73 	qdf_spin_lock_bh(&soc->remap_lock);
74 	reo_remapped = soc->reo_remapped;
75 	qdf_spin_unlock_bh(&soc->remap_lock);
76 
77 	if (!reo_remapped)
78 		return QDF_STATUS_SUCCESS;
79 
80 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
81 }
82 
83 #ifdef RX_DESC_MULTI_PAGE_ALLOC
84 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
85 							 struct dp_pdev *pdev,
86 							 bool create)
87 {
88 	struct rx_desc_pool *rx_pool;
89 	uint8_t pdev_id;
90 	uint32_t num_desc, page_id, offset, i;
91 	uint16_t num_desc_per_page;
92 	union dp_rx_desc_list_elem_t *rx_desc_elem;
93 	struct dp_rx_desc *rx_desc;
94 	qdf_nbuf_t nbuf;
95 
96 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
97 		return QDF_STATUS_SUCCESS;
98 
99 	pdev_id = pdev->pdev_id;
100 	rx_pool = &soc->rx_desc_buf[pdev_id];
101 
102 	qdf_spin_lock_bh(&rx_pool->lock);
103 	num_desc = rx_pool->pool_size;
104 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
105 	for (i = 0; i < num_desc; i++) {
106 		page_id = i / num_desc_per_page;
107 		offset = i % num_desc_per_page;
108 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
109 			break;
110 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
111 		rx_desc = &rx_desc_elem->rx_desc;
112 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
113 			continue;
114 		nbuf = rx_desc->nbuf;
115 
116 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
117 	}
118 	qdf_spin_unlock_bh(&rx_pool->lock);
119 
120 	return QDF_STATUS_SUCCESS;
121 }
122 #else
123 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
124 							 struct dp_pdev *pdev,
125 							 bool create)
126 {
127 	struct rx_desc_pool *rx_pool;
128 	uint8_t pdev_id;
129 	qdf_nbuf_t nbuf;
130 	int i;
131 
132 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
133 		return QDF_STATUS_SUCCESS;
134 
135 	pdev_id = pdev->pdev_id;
136 	rx_pool = &soc->rx_desc_buf[pdev_id];
137 
138 	qdf_spin_lock_bh(&rx_pool->lock);
139 	for (i = 0; i < rx_pool->pool_size; i++) {
140 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
141 		    rx_pool->array[i].rx_desc.unmapped)
142 			continue;
143 
144 		nbuf = rx_pool->array[i].rx_desc.nbuf;
145 
146 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
147 	}
148 	qdf_spin_unlock_bh(&rx_pool->lock);
149 
150 	return QDF_STATUS_SUCCESS;
151 }
152 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
153 
154 /**
155  * dp_tx_ipa_uc_detach - Free autonomy TX resources
156  * @soc: data path instance
157  * @pdev: core txrx pdev context
158  *
159  * Free allocated TX buffers with WBM SRNG
160  *
161  * Return: none
162  */
163 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
164 {
165 	int idx;
166 	qdf_nbuf_t nbuf;
167 	struct dp_ipa_resources *ipa_res;
168 
169 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
170 		nbuf = (qdf_nbuf_t)
171 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
172 		if (!nbuf)
173 			continue;
174 
175 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
176 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, false);
177 
178 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
179 		qdf_nbuf_free(nbuf);
180 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
181 						(void *)NULL;
182 	}
183 
184 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
185 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
186 
187 	ipa_res = &pdev->ipa_resource;
188 	iounmap(ipa_res->tx_comp_doorbell_vaddr);
189 
190 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
191 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
192 }
193 
194 /**
195  * dp_rx_ipa_uc_detach - free autonomy RX resources
196  * @soc: data path instance
197  * @pdev: core txrx pdev context
198  *
199  * This function will detach DP RX into main device context
200  * will free DP Rx resources.
201  *
202  * Return: none
203  */
204 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
205 {
206 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
207 
208 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
209 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
210 }
211 
212 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
213 {
214 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
215 		return QDF_STATUS_SUCCESS;
216 
217 	/* TX resource detach */
218 	dp_tx_ipa_uc_detach(soc, pdev);
219 
220 	/* RX resource detach */
221 	dp_rx_ipa_uc_detach(soc, pdev);
222 
223 	qdf_spinlock_destroy(&soc->remap_lock);
224 
225 	return QDF_STATUS_SUCCESS;	/* success */
226 }
227 
228 /**
229  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
230  * @soc: data path instance
231  * @pdev: Physical device handle
232  *
233  * Allocate TX buffer from non-cacheable memory
234  * Attache allocated TX buffers with WBM SRNG
235  *
236  * Return: int
237  */
238 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
239 {
240 	uint32_t tx_buffer_count;
241 	uint32_t ring_base_align = 8;
242 	qdf_dma_addr_t buffer_paddr;
243 	struct hal_srng *wbm_srng =
244 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
245 	struct hal_srng_params srng_params;
246 	uint32_t paddr_lo;
247 	uint32_t paddr_hi;
248 	void *ring_entry;
249 	int num_entries;
250 	qdf_nbuf_t nbuf;
251 	int retval = QDF_STATUS_SUCCESS;
252 
253 	/*
254 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
255 	 * unsigned int uc_tx_buf_sz =
256 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
257 	 */
258 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
259 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
260 
261 	hal_get_srng_params(soc->hal_soc, (void *)wbm_srng, &srng_params);
262 	num_entries = srng_params.num_entries;
263 
264 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
265 		  "%s: requested %d buffers to be posted to wbm ring",
266 		   __func__, num_entries);
267 
268 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
269 		qdf_mem_malloc(num_entries *
270 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
271 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
272 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
273 			  "%s: IPA WBM Ring Tx buf pool vaddr alloc fail",
274 			  __func__);
275 		return -ENOMEM;
276 	}
277 
278 	hal_srng_access_start_unlocked(soc->hal_soc, (void *)wbm_srng);
279 
280 	/*
281 	 * Allocate Tx buffers as many as possible
282 	 * Populate Tx buffers into WBM2IPA ring
283 	 * This initial buffer population will simulate H/W as source ring,
284 	 * and update HP
285 	 */
286 	for (tx_buffer_count = 0;
287 		tx_buffer_count < num_entries - 1; tx_buffer_count++) {
288 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
289 		if (!nbuf)
290 			break;
291 
292 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
293 				(void *)wbm_srng);
294 		if (!ring_entry) {
295 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
296 				  "%s: Failed to get WBM ring entry",
297 				  __func__);
298 			qdf_nbuf_free(nbuf);
299 			break;
300 		}
301 
302 		qdf_nbuf_map_single(soc->osdev, nbuf,
303 				    QDF_DMA_BIDIRECTIONAL);
304 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
305 
306 		paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
307 		paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
308 		HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
309 		HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
310 		HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
311 				      HAL_WBM_SW0_BM_ID));
312 
313 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
314 			= (void *)nbuf;
315 
316 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
317 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, true);
318 	}
319 
320 	hal_srng_access_end_unlocked(soc->hal_soc, wbm_srng);
321 
322 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
323 
324 	if (tx_buffer_count) {
325 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
326 			  "%s: IPA WDI TX buffer: %d allocated",
327 			  __func__, tx_buffer_count);
328 	} else {
329 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
330 			  "%s: No IPA WDI TX buffer allocated",
331 			  __func__);
332 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
333 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
334 		retval = -ENOMEM;
335 	}
336 
337 	return retval;
338 }
339 
340 /**
341  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
342  * @soc: data path instance
343  * @pdev: core txrx pdev context
344  *
345  * This function will attach a DP RX instance into the main
346  * device (SOC) context.
347  *
348  * Return: QDF_STATUS_SUCCESS: success
349  *         QDF_STATUS_E_RESOURCES: Error return
350  */
351 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
352 {
353 	return QDF_STATUS_SUCCESS;
354 }
355 
356 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
357 {
358 	int error;
359 
360 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
361 		return QDF_STATUS_SUCCESS;
362 
363 	qdf_spinlock_create(&soc->remap_lock);
364 
365 	/* TX resource attach */
366 	error = dp_tx_ipa_uc_attach(soc, pdev);
367 	if (error) {
368 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
369 			  "%s: DP IPA UC TX attach fail code %d",
370 			  __func__, error);
371 		return error;
372 	}
373 
374 	/* RX resource attach */
375 	error = dp_rx_ipa_uc_attach(soc, pdev);
376 	if (error) {
377 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
378 			  "%s: DP IPA UC RX attach fail code %d",
379 			  __func__, error);
380 		dp_tx_ipa_uc_detach(soc, pdev);
381 		return error;
382 	}
383 
384 	return QDF_STATUS_SUCCESS;	/* success */
385 }
386 
387 /*
388  * dp_ipa_ring_resource_setup() - setup IPA ring resources
389  * @soc: data path SoC handle
390  *
391  * Return: none
392  */
393 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
394 		struct dp_pdev *pdev)
395 {
396 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
397 	struct hal_srng *hal_srng;
398 	struct hal_srng_params srng_params;
399 	qdf_dma_addr_t hp_addr;
400 	unsigned long addr_offset, dev_base_paddr;
401 
402 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
403 		return QDF_STATUS_SUCCESS;
404 
405 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
406 	hal_srng = soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
407 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
408 
409 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
410 		srng_params.ring_base_paddr;
411 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
412 		srng_params.ring_base_vaddr;
413 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
414 		(srng_params.num_entries * srng_params.entry_size) << 2;
415 	/*
416 	 * For the register backed memory addresses, use the scn->mem_pa to
417 	 * calculate the physical address of the shadow registers
418 	 */
419 	dev_base_paddr =
420 		(unsigned long)
421 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
422 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
423 		      (unsigned long)(hal_soc->dev_base_addr);
424 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
425 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
426 
427 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
428 		(unsigned int)addr_offset,
429 		(unsigned int)dev_base_paddr,
430 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
431 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
432 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
433 		srng_params.num_entries,
434 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
435 
436 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
437 	hal_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
438 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
439 
440 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
441 						srng_params.ring_base_paddr;
442 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
443 						srng_params.ring_base_vaddr;
444 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
445 		(srng_params.num_entries * srng_params.entry_size) << 2;
446 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
447 		      (unsigned long)(hal_soc->dev_base_addr);
448 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
449 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
450 
451 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
452 		(unsigned int)addr_offset,
453 		(unsigned int)dev_base_paddr,
454 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
455 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
456 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
457 		srng_params.num_entries,
458 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
459 
460 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
461 	hal_srng = soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
462 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
463 
464 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
465 						srng_params.ring_base_paddr;
466 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
467 						srng_params.ring_base_vaddr;
468 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
469 		(srng_params.num_entries * srng_params.entry_size) << 2;
470 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
471 		      (unsigned long)(hal_soc->dev_base_addr);
472 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
473 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
474 
475 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
476 		(unsigned int)addr_offset,
477 		(unsigned int)dev_base_paddr,
478 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
479 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
480 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
481 		srng_params.num_entries,
482 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
483 
484 	hal_srng = pdev->rx_refill_buf_ring2.hal_srng;
485 	hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params);
486 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
487 		srng_params.ring_base_paddr;
488 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
489 		srng_params.ring_base_vaddr;
490 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
491 		(srng_params.num_entries * srng_params.entry_size) << 2;
492 	hp_addr = hal_srng_get_hp_addr(hal_soc, (void *)hal_srng);
493 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
494 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
495 
496 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
497 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
498 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
499 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
500 		srng_params.num_entries,
501 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
502 
503 	return 0;
504 }
505 
506 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
507 					     qdf_shared_mem_t *shared_mem,
508 					     void *cpu_addr,
509 					     qdf_dma_addr_t dma_addr,
510 					     uint32_t size)
511 {
512 	qdf_dma_addr_t paddr;
513 	int ret;
514 
515 	shared_mem->vaddr = cpu_addr;
516 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
517 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
518 
519 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
520 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
521 
522 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
523 				      shared_mem->vaddr, dma_addr, size);
524 	if (ret) {
525 		dp_err("Unable to get DMA sgtable");
526 		return QDF_STATUS_E_NOMEM;
527 	}
528 
529 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
530 
531 	return QDF_STATUS_SUCCESS;
532 }
533 
534 /**
535  * dp_ipa_uc_get_resource() - Client request resource information
536  * @ppdev - handle to the device instance
537  *
538  *  IPA client will request IPA UC related resource information
539  *  Resource information will be distributed to IPA module
540  *  All of the required resources should be pre-allocated
541  *
542  * Return: QDF_STATUS
543  */
544 QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev)
545 {
546 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
547 	struct dp_soc *soc = pdev->soc;
548 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
549 
550 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
551 		return QDF_STATUS_SUCCESS;
552 
553 	ipa_res->tx_num_alloc_buffer =
554 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
555 
556 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
557 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
558 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
559 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
560 
561 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
562 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
563 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
564 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
565 
566 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
567 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
568 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
569 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
570 
571 	dp_ipa_get_shared_mem_info(
572 			soc->osdev, &ipa_res->rx_refill_ring,
573 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
574 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
575 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
576 
577 	if (!qdf_mem_get_dma_addr(soc->osdev,
578 				  &ipa_res->tx_comp_ring.mem_info) ||
579 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info))
580 		return QDF_STATUS_E_FAILURE;
581 
582 	return QDF_STATUS_SUCCESS;
583 }
584 
585 /**
586  * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG
587  * @ppdev - handle to the device instance
588  *
589  * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
590  * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
591  *
592  * Return: none
593  */
594 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev)
595 {
596 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
597 	struct dp_soc *soc = pdev->soc;
598 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
599 	struct hal_srng *wbm_srng =
600 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
601 	struct hal_srng *reo_srng =
602 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
603 	uint32_t tx_comp_doorbell_dmaaddr;
604 	uint32_t rx_ready_doorbell_dmaaddr;
605 
606 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
607 		return QDF_STATUS_SUCCESS;
608 
609 	ipa_res->tx_comp_doorbell_vaddr =
610 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
611 
612 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
613 		pld_smmu_map(soc->osdev->dev, ipa_res->tx_comp_doorbell_paddr,
614 			     &tx_comp_doorbell_dmaaddr, sizeof(uint32_t));
615 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
616 
617 		pld_smmu_map(soc->osdev->dev, ipa_res->rx_ready_doorbell_paddr,
618 			     &rx_ready_doorbell_dmaaddr, sizeof(uint32_t));
619 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
620 	}
621 
622 	hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
623 
624 	dp_info("paddr %pK vaddr %pK",
625 		(void *)ipa_res->tx_comp_doorbell_paddr,
626 		(void *)ipa_res->tx_comp_doorbell_vaddr);
627 
628 	hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
629 
630 	/*
631 	 * For RX, REO module on Napier/Hastings does reordering on incoming
632 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
633 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
634 	 * to IPA.
635 	 * Set the doorbell addr for the REO ring.
636 	 */
637 	hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
638 	return QDF_STATUS_SUCCESS;
639 }
640 
641 /**
642  * dp_ipa_op_response() - Handle OP command response from firmware
643  * @ppdev - handle to the device instance
644  * @op_msg: op response message from firmware
645  *
646  * Return: none
647  */
648 QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
649 {
650 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
651 
652 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
653 		return QDF_STATUS_SUCCESS;
654 
655 	if (pdev->ipa_uc_op_cb) {
656 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
657 	} else {
658 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
659 		    "%s: IPA callback function is not registered", __func__);
660 		qdf_mem_free(op_msg);
661 		return QDF_STATUS_E_FAILURE;
662 	}
663 
664 	return QDF_STATUS_SUCCESS;
665 }
666 
667 /**
668  * dp_ipa_register_op_cb() - Register OP handler function
669  * @ppdev - handle to the device instance
670  * @op_cb: handler function pointer
671  *
672  * Return: none
673  */
674 QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev,
675 				 ipa_uc_op_cb_type op_cb,
676 				 void *usr_ctxt)
677 {
678 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
679 
680 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
681 		return QDF_STATUS_SUCCESS;
682 
683 	pdev->ipa_uc_op_cb = op_cb;
684 	pdev->usr_ctxt = usr_ctxt;
685 
686 	return QDF_STATUS_SUCCESS;
687 }
688 
689 /**
690  * dp_ipa_get_stat() - Get firmware wdi status
691  * @ppdev - handle to the device instance
692  *
693  * Return: none
694  */
695 QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev)
696 {
697 	/* TBD */
698 	return QDF_STATUS_SUCCESS;
699 }
700 
701 /**
702  * dp_tx_send_ipa_data_frame() - send IPA data frame
703  * @vdev: vdev
704  * @skb: skb
705  *
706  * Return: skb/ NULL is for success
707  */
708 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
709 {
710 	qdf_nbuf_t ret;
711 
712 	/* Terminate the (single-element) list of tx frames */
713 	qdf_nbuf_set_next(skb, NULL);
714 	ret = dp_tx_send((struct dp_vdev_t *)vdev, skb);
715 	if (ret) {
716 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
717 			  "%s: Failed to tx", __func__);
718 		return ret;
719 	}
720 
721 	return NULL;
722 }
723 
724 /**
725  * dp_ipa_enable_autonomy() – Enable autonomy RX path
726  * @pdev - handle to the device instance
727  *
728  * Set all RX packet route to IPA REO ring
729  * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
730  * Return: none
731  */
732 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev)
733 {
734 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
735 	struct dp_soc *soc = pdev->soc;
736 	uint32_t ix0;
737 	uint32_t ix2;
738 
739 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
740 		return QDF_STATUS_SUCCESS;
741 
742 	qdf_spin_lock_bh(&soc->remap_lock);
743 	soc->reo_remapped = true;
744 	qdf_spin_unlock_bh(&soc->remap_lock);
745 
746 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
747 
748 	/* Call HAL API to remap REO rings to REO2IPA ring */
749 	ix0 = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
750 	      HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW4) |
751 	      HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW4) |
752 	      HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW4) |
753 	      HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW4) |
754 	      HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
755 	      HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
756 	      HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
757 
758 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
759 		ix2 = ((REO_REMAP_SW4 << 0) | (REO_REMAP_SW4 << 3) |
760 		       (REO_REMAP_SW4 << 6) | (REO_REMAP_SW4 << 9) |
761 		       (REO_REMAP_SW4 << 12) | (REO_REMAP_SW4 << 15) |
762 		       (REO_REMAP_SW4 << 18) | (REO_REMAP_SW4 << 21)) << 8;
763 
764 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
765 					   &ix2, &ix2);
766 	}
767 
768 	return QDF_STATUS_SUCCESS;
769 }
770 
771 /**
772  * dp_ipa_disable_autonomy() – Disable autonomy RX path
773  * @ppdev - handle to the device instance
774  *
775  * Disable RX packet routing to IPA REO
776  * Program Destination_Ring_Ctrl_IX_0 REO register to disable
777  * Return: none
778  */
779 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev)
780 {
781 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
782 	struct dp_soc *soc = pdev->soc;
783 	uint32_t ix0;
784 	uint32_t ix2;
785 	uint32_t ix3;
786 
787 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
788 		return QDF_STATUS_SUCCESS;
789 
790 	/* Call HAL API to remap REO rings to REO2IPA ring */
791 	ix0 = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
792 	      HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW1) |
793 	      HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW2) |
794 	      HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW3) |
795 	      HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW2) |
796 	      HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
797 	      HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
798 	      HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
799 
800 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
801 		dp_reo_remap_config(soc, &ix2, &ix3);
802 
803 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
804 					   &ix2, &ix3);
805 	}
806 
807 	qdf_spin_lock_bh(&soc->remap_lock);
808 	soc->reo_remapped = false;
809 	qdf_spin_unlock_bh(&soc->remap_lock);
810 
811 	return QDF_STATUS_SUCCESS;
812 }
813 
814 /* This should be configurable per H/W configuration enable status */
815 #define L3_HEADER_PADDING	2
816 
817 #ifdef CONFIG_IPA_WDI_UNIFIED_API
818 
819 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
820 static inline void dp_setup_mcc_sys_pipes(
821 		qdf_ipa_sys_connect_params_t *sys_in,
822 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
823 {
824 	/* Setup MCC sys pipe */
825 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
826 			DP_IPA_MAX_IFACE;
827 	for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
828 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
829 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
830 }
831 #else
832 static inline void dp_setup_mcc_sys_pipes(
833 		qdf_ipa_sys_connect_params_t *sys_in,
834 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
835 {
836 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
837 }
838 #endif
839 
840 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
841 				 struct dp_ipa_resources *ipa_res,
842 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
843 				 bool over_gsi)
844 {
845 	struct tcl_data_cmd *tcl_desc_ptr;
846 	uint8_t *desc_addr;
847 	uint32_t desc_size;
848 
849 	if (over_gsi)
850 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
851 	else
852 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
853 
854 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
855 		qdf_mem_get_dma_addr(soc->osdev,
856 				     &ipa_res->tx_comp_ring.mem_info);
857 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
858 		qdf_mem_get_dma_size(soc->osdev,
859 				     &ipa_res->tx_comp_ring.mem_info);
860 
861 	/* WBM Tail Pointer Address */
862 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
863 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
864 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
865 
866 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
867 		qdf_mem_get_dma_addr(soc->osdev,
868 				     &ipa_res->tx_ring.mem_info);
869 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
870 		qdf_mem_get_dma_size(soc->osdev,
871 				     &ipa_res->tx_ring.mem_info);
872 
873 	/* TCL Head Pointer Address */
874 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
875 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
876 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
877 
878 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
879 		ipa_res->tx_num_alloc_buffer;
880 
881 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
882 
883 	/* Preprogram TCL descriptor */
884 	desc_addr =
885 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
886 	desc_size = sizeof(struct tcl_data_cmd);
887 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
888 	tcl_desc_ptr = (struct tcl_data_cmd *)
889 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
890 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
891 		HAL_RX_BUF_RBM_SW2_BM;
892 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
893 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
894 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
895 }
896 
897 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
898 				 struct dp_ipa_resources *ipa_res,
899 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
900 				 bool over_gsi)
901 {
902 	if (over_gsi)
903 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
904 					IPA_CLIENT_WLAN2_PROD;
905 	else
906 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
907 					IPA_CLIENT_WLAN1_PROD;
908 
909 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
910 		qdf_mem_get_dma_addr(soc->osdev,
911 				     &ipa_res->rx_rdy_ring.mem_info);
912 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
913 		qdf_mem_get_dma_size(soc->osdev,
914 				     &ipa_res->rx_rdy_ring.mem_info);
915 
916 	/* REO Tail Pointer Address */
917 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
918 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
919 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
920 
921 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
922 		qdf_mem_get_dma_addr(soc->osdev,
923 				     &ipa_res->rx_refill_ring.mem_info);
924 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
925 		qdf_mem_get_dma_size(soc->osdev,
926 				     &ipa_res->rx_refill_ring.mem_info);
927 
928 	/* FW Head Pointer Address */
929 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
930 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
931 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
932 
933 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
934 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
935 }
936 
937 static void
938 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
939 			  struct dp_ipa_resources *ipa_res,
940 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
941 			  bool over_gsi)
942 {
943 	struct tcl_data_cmd *tcl_desc_ptr;
944 	uint8_t *desc_addr;
945 	uint32_t desc_size;
946 
947 	if (over_gsi)
948 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
949 			IPA_CLIENT_WLAN2_CONS;
950 	else
951 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
952 			IPA_CLIENT_WLAN1_CONS;
953 
954 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
955 		     &ipa_res->tx_comp_ring.sgtable,
956 		     sizeof(sgtable_t));
957 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
958 		qdf_mem_get_dma_size(soc->osdev,
959 				     &ipa_res->tx_comp_ring.mem_info);
960 	/* WBM Tail Pointer Address */
961 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
962 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
963 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
964 
965 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
966 		     &ipa_res->tx_ring.sgtable,
967 		     sizeof(sgtable_t));
968 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
969 		qdf_mem_get_dma_size(soc->osdev,
970 				     &ipa_res->tx_ring.mem_info);
971 	/* TCL Head Pointer Address */
972 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
973 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
974 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
975 
976 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
977 		ipa_res->tx_num_alloc_buffer;
978 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
979 
980 	/* Preprogram TCL descriptor */
981 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
982 			tx_smmu);
983 	desc_size = sizeof(struct tcl_data_cmd);
984 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
985 	tcl_desc_ptr = (struct tcl_data_cmd *)
986 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
987 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
988 		HAL_RX_BUF_RBM_SW2_BM;
989 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
990 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
991 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
992 }
993 
994 static void
995 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
996 			  struct dp_ipa_resources *ipa_res,
997 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
998 			  bool over_gsi)
999 {
1000 	if (over_gsi)
1001 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1002 					IPA_CLIENT_WLAN2_PROD;
1003 	else
1004 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1005 					IPA_CLIENT_WLAN1_PROD;
1006 
1007 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
1008 		     &ipa_res->rx_rdy_ring.sgtable,
1009 		     sizeof(sgtable_t));
1010 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
1011 		qdf_mem_get_dma_size(soc->osdev,
1012 				     &ipa_res->rx_rdy_ring.mem_info);
1013 	/* REO Tail Pointer Address */
1014 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
1015 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1016 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
1017 
1018 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
1019 		     &ipa_res->rx_refill_ring.sgtable,
1020 		     sizeof(sgtable_t));
1021 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
1022 		qdf_mem_get_dma_size(soc->osdev,
1023 				     &ipa_res->rx_refill_ring.mem_info);
1024 
1025 	/* FW Head Pointer Address */
1026 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
1027 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1028 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
1029 
1030 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
1031 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1032 }
1033 
1034 /**
1035  * dp_ipa_setup() - Setup and connect IPA pipes
1036  * @ppdev - handle to the device instance
1037  * @ipa_i2w_cb: IPA to WLAN callback
1038  * @ipa_w2i_cb: WLAN to IPA callback
1039  * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
1040  * @ipa_desc_size: IPA descriptor size
1041  * @ipa_priv: handle to the HTT instance
1042  * @is_rm_enabled: Is IPA RM enabled or not
1043  * @tx_pipe_handle: pointer to Tx pipe handle
1044  * @rx_pipe_handle: pointer to Rx pipe handle
1045  * @is_smmu_enabled: Is SMMU enabled or not
1046  * @sys_in: parameters to setup sys pipe in mcc mode
1047  *
1048  * Return: QDF_STATUS
1049  */
1050 QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
1051 			void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1052 			uint32_t ipa_desc_size, void *ipa_priv,
1053 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1054 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
1055 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
1056 {
1057 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
1058 	struct dp_soc *soc = pdev->soc;
1059 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1060 	qdf_ipa_ep_cfg_t *tx_cfg;
1061 	qdf_ipa_ep_cfg_t *rx_cfg;
1062 	qdf_ipa_wdi_pipe_setup_info_t *tx;
1063 	qdf_ipa_wdi_pipe_setup_info_t *rx;
1064 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
1065 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu;
1066 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1067 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1068 	int ret;
1069 
1070 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1071 		return QDF_STATUS_SUCCESS;
1072 
1073 
1074 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1075 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1076 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1077 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1078 
1079 	if (is_smmu_enabled)
1080 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
1081 	else
1082 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
1083 
1084 	dp_setup_mcc_sys_pipes(sys_in, &pipe_in);
1085 
1086 	/* TX PIPE */
1087 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1088 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in);
1089 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
1090 	} else {
1091 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1092 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
1093 	}
1094 
1095 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
1096 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1097 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
1098 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
1099 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
1100 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
1101 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
1102 
1103 	/**
1104 	 * Transfer Ring: WBM Ring
1105 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1106 	 * Event Ring: TCL ring
1107 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1108 	 */
1109 	if (is_smmu_enabled)
1110 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
1111 	else
1112 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
1113 
1114 	/* RX PIPE */
1115 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1116 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in);
1117 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
1118 	} else {
1119 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1120 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
1121 	}
1122 
1123 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
1124 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1125 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
1126 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
1127 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
1128 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
1129 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
1130 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
1131 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
1132 
1133 	/**
1134 	 * Transfer Ring: REO Ring
1135 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1136 	 * Event Ring: FW ring
1137 	 * Event Ring Doorbell PA: FW Head Pointer Address
1138 	 */
1139 	if (is_smmu_enabled)
1140 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
1141 	else
1142 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
1143 
1144 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1145 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1146 
1147 	/* Connect WDI IPA PIPEs */
1148 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1149 
1150 	if (ret) {
1151 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1152 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1153 			  __func__, ret);
1154 		return QDF_STATUS_E_FAILURE;
1155 	}
1156 
1157 	/* IPA uC Doorbell registers */
1158 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
1159 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1160 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1161 
1162 	ipa_res->tx_comp_doorbell_paddr =
1163 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1164 	ipa_res->rx_ready_doorbell_paddr =
1165 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1166 
1167 	return QDF_STATUS_SUCCESS;
1168 }
1169 
1170 /**
1171  * dp_ipa_setup_iface() - Setup IPA header and register interface
1172  * @ifname: Interface name
1173  * @mac_addr: Interface MAC address
1174  * @prod_client: IPA prod client type
1175  * @cons_client: IPA cons client type
1176  * @session_id: Session ID
1177  * @is_ipv6_enabled: Is IPV6 enabled or not
1178  *
1179  * Return: QDF_STATUS
1180  */
1181 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1182 			      qdf_ipa_client_type_t prod_client,
1183 			      qdf_ipa_client_type_t cons_client,
1184 			      uint8_t session_id, bool is_ipv6_enabled)
1185 {
1186 	qdf_ipa_wdi_reg_intf_in_params_t in;
1187 	qdf_ipa_wdi_hdr_info_t hdr_info;
1188 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1189 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1190 	int ret = -EINVAL;
1191 
1192 	dp_debug("Add Partial hdr: %s, %pM", ifname, mac_addr);
1193 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1194 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1195 
1196 	/* IPV4 header */
1197 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1198 
1199 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1200 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1201 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1202 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1203 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1204 
1205 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1206 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1207 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1208 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
1209 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1210 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1211 		htonl(session_id << 16);
1212 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1213 
1214 	/* IPV6 header */
1215 	if (is_ipv6_enabled) {
1216 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1217 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1218 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1219 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1220 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1221 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1222 	}
1223 
1224 	dp_debug("registering for session_id: %u", session_id);
1225 
1226 	ret = qdf_ipa_wdi_reg_intf(&in);
1227 
1228 	if (ret) {
1229 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1230 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1231 		    __func__, ret);
1232 		return QDF_STATUS_E_FAILURE;
1233 	}
1234 
1235 	return QDF_STATUS_SUCCESS;
1236 }
1237 
1238 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1239 
1240 /**
1241  * dp_ipa_setup() - Setup and connect IPA pipes
1242  * @ppdev - handle to the device instance
1243  * @ipa_i2w_cb: IPA to WLAN callback
1244  * @ipa_w2i_cb: WLAN to IPA callback
1245  * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
1246  * @ipa_desc_size: IPA descriptor size
1247  * @ipa_priv: handle to the HTT instance
1248  * @is_rm_enabled: Is IPA RM enabled or not
1249  * @tx_pipe_handle: pointer to Tx pipe handle
1250  * @rx_pipe_handle: pointer to Rx pipe handle
1251  *
1252  * Return: QDF_STATUS
1253  */
1254 QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
1255 			void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1256 			uint32_t ipa_desc_size, void *ipa_priv,
1257 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1258 			uint32_t *rx_pipe_handle)
1259 {
1260 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
1261 	struct dp_soc *soc = pdev->soc;
1262 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1263 	qdf_ipa_wdi_pipe_setup_info_t *tx;
1264 	qdf_ipa_wdi_pipe_setup_info_t *rx;
1265 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1266 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1267 	struct tcl_data_cmd *tcl_desc_ptr;
1268 	uint8_t *desc_addr;
1269 	uint32_t desc_size;
1270 	int ret;
1271 
1272 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1273 		return QDF_STATUS_SUCCESS;
1274 
1275 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1276 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1277 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1278 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1279 
1280 	/* TX PIPE */
1281 	/**
1282 	 * Transfer Ring: WBM Ring
1283 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1284 	 * Event Ring: TCL ring
1285 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1286 	 */
1287 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1288 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
1289 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1290 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
1291 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
1292 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
1293 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
1294 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
1295 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1296 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1297 		ipa_res->tx_comp_ring_base_paddr;
1298 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1299 		ipa_res->tx_comp_ring_size;
1300 	/* WBM Tail Pointer Address */
1301 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1302 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1303 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1304 		ipa_res->tx_ring_base_paddr;
1305 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
1306 	/* TCL Head Pointer Address */
1307 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1308 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1309 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1310 		ipa_res->tx_num_alloc_buffer;
1311 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1312 
1313 	/* Preprogram TCL descriptor */
1314 	desc_addr =
1315 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1316 	desc_size = sizeof(struct tcl_data_cmd);
1317 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1318 	tcl_desc_ptr = (struct tcl_data_cmd *)
1319 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1320 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1321 						HAL_RX_BUF_RBM_SW2_BM;
1322 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1323 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1324 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1325 
1326 	/* RX PIPE */
1327 	/**
1328 	 * Transfer Ring: REO Ring
1329 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1330 	 * Event Ring: FW ring
1331 	 * Event Ring Doorbell PA: FW Head Pointer Address
1332 	 */
1333 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1334 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
1335 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1336 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
1337 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
1338 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
1339 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
1340 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
1341 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
1342 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
1343 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
1344 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1345 						ipa_res->rx_rdy_ring_base_paddr;
1346 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1347 						ipa_res->rx_rdy_ring_size;
1348 	/* REO Tail Pointer Address */
1349 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1350 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1351 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1352 					ipa_res->rx_refill_ring_base_paddr;
1353 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1354 						ipa_res->rx_refill_ring_size;
1355 	/* FW Head Pointer Address */
1356 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1357 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1358 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
1359 						L3_HEADER_PADDING;
1360 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1361 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1362 
1363 	/* Connect WDI IPA PIPE */
1364 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1365 	if (ret) {
1366 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1367 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1368 			  __func__, ret);
1369 		return QDF_STATUS_E_FAILURE;
1370 	}
1371 
1372 	/* IPA uC Doorbell registers */
1373 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1374 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
1375 		  __func__,
1376 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1377 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1378 
1379 	ipa_res->tx_comp_doorbell_paddr =
1380 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1381 	ipa_res->tx_comp_doorbell_vaddr =
1382 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
1383 	ipa_res->rx_ready_doorbell_paddr =
1384 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1385 
1386 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1387 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1388 		  __func__,
1389 		  "transfer_ring_base_pa",
1390 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
1391 		  "transfer_ring_size",
1392 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
1393 		  "transfer_ring_doorbell_pa",
1394 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
1395 		  "event_ring_base_pa",
1396 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
1397 		  "event_ring_size",
1398 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
1399 		  "event_ring_doorbell_pa",
1400 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
1401 		  "num_pkt_buffers",
1402 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
1403 		  "tx_comp_doorbell_paddr",
1404 		  (void *)ipa_res->tx_comp_doorbell_paddr);
1405 
1406 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1407 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1408 		  __func__,
1409 		  "transfer_ring_base_pa",
1410 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
1411 		  "transfer_ring_size",
1412 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
1413 		  "transfer_ring_doorbell_pa",
1414 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
1415 		  "event_ring_base_pa",
1416 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
1417 		  "event_ring_size",
1418 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
1419 		  "event_ring_doorbell_pa",
1420 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
1421 		  "num_pkt_buffers",
1422 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
1423 		  "tx_comp_doorbell_paddr",
1424 		  (void *)ipa_res->rx_ready_doorbell_paddr);
1425 
1426 	return QDF_STATUS_SUCCESS;
1427 }
1428 
1429 /**
1430  * dp_ipa_setup_iface() - Setup IPA header and register interface
1431  * @ifname: Interface name
1432  * @mac_addr: Interface MAC address
1433  * @prod_client: IPA prod client type
1434  * @cons_client: IPA cons client type
1435  * @session_id: Session ID
1436  * @is_ipv6_enabled: Is IPV6 enabled or not
1437  *
1438  * Return: QDF_STATUS
1439  */
1440 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1441 			      qdf_ipa_client_type_t prod_client,
1442 			      qdf_ipa_client_type_t cons_client,
1443 			      uint8_t session_id, bool is_ipv6_enabled)
1444 {
1445 	qdf_ipa_wdi_reg_intf_in_params_t in;
1446 	qdf_ipa_wdi_hdr_info_t hdr_info;
1447 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1448 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1449 	int ret = -EINVAL;
1450 
1451 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1452 		  "%s: Add Partial hdr: %s, %pM",
1453 		  __func__, ifname, mac_addr);
1454 
1455 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1456 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1457 
1458 	/* IPV4 header */
1459 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1460 
1461 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1462 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1463 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1464 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1465 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1466 
1467 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1468 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1469 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1470 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1471 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1472 		htonl(session_id << 16);
1473 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1474 
1475 	/* IPV6 header */
1476 	if (is_ipv6_enabled) {
1477 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1478 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1479 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1480 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1481 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1482 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1483 	}
1484 
1485 	ret = qdf_ipa_wdi_reg_intf(&in);
1486 	if (ret) {
1487 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1488 		       ret);
1489 		return QDF_STATUS_E_FAILURE;
1490 	}
1491 
1492 	return QDF_STATUS_SUCCESS;
1493 }
1494 
1495 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1496 
1497 /**
1498  * dp_ipa_cleanup() - Disconnect IPA pipes
1499  * @tx_pipe_handle: Tx pipe handle
1500  * @rx_pipe_handle: Rx pipe handle
1501  *
1502  * Return: QDF_STATUS
1503  */
1504 QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
1505 {
1506 	int ret;
1507 
1508 	ret = qdf_ipa_wdi_disconn_pipes();
1509 	if (ret) {
1510 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
1511 		       ret);
1512 		return QDF_STATUS_E_FAILURE;
1513 	}
1514 
1515 	return QDF_STATUS_SUCCESS;
1516 }
1517 
1518 /**
1519  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
1520  * @ifname: Interface name
1521  * @is_ipv6_enabled: Is IPV6 enabled or not
1522  *
1523  * Return: QDF_STATUS
1524  */
1525 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
1526 {
1527 	int ret;
1528 
1529 	ret = qdf_ipa_wdi_dereg_intf(ifname);
1530 	if (ret) {
1531 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1532 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
1533 			  __func__, ret);
1534 		return QDF_STATUS_E_FAILURE;
1535 	}
1536 
1537 	return QDF_STATUS_SUCCESS;
1538 }
1539 
1540 /**
1541  * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
1542  * @ppdev - handle to the device instance
1543  *
1544  * Return: QDF_STATUS
1545  */
1546 QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev)
1547 {
1548 	QDF_STATUS result;
1549 
1550 	result = qdf_ipa_wdi_enable_pipes();
1551 	if (result) {
1552 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1553 			  "%s: Enable WDI PIPE fail, code %d",
1554 			  __func__, result);
1555 		return QDF_STATUS_E_FAILURE;
1556 	}
1557 
1558 	return QDF_STATUS_SUCCESS;
1559 }
1560 
1561 /**
1562  * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes
1563  * @ppdev - handle to the device instance
1564  *
1565  * Return: QDF_STATUS
1566  */
1567 QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev)
1568 {
1569 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
1570 	struct dp_soc *soc = pdev->soc;
1571 	QDF_STATUS result;
1572 
1573 	result = qdf_ipa_wdi_disable_pipes();
1574 	if (result)
1575 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1576 			  "%s: Disable WDI PIPE fail, code %d",
1577 			  __func__, result);
1578 
1579 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1580 
1581 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
1582 }
1583 
1584 /**
1585  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
1586  * @client: Client type
1587  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
1588  *
1589  * Return: QDF_STATUS
1590  */
1591 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
1592 {
1593 	qdf_ipa_wdi_perf_profile_t profile;
1594 	QDF_STATUS result;
1595 
1596 	profile.client = client;
1597 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
1598 
1599 	result = qdf_ipa_wdi_set_perf_profile(&profile);
1600 	if (result) {
1601 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1602 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
1603 			  __func__, result);
1604 		return QDF_STATUS_E_FAILURE;
1605 	}
1606 
1607 	return QDF_STATUS_SUCCESS;
1608 }
1609 
1610 /**
1611  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
1612  * @pdev: pdev
1613  * @vdev: vdev
1614  * @nbuf: skb
1615  *
1616  * Return: nbuf if TX fails and NULL if TX succeeds
1617  */
1618 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
1619 				       struct dp_vdev *vdev,
1620 				       qdf_nbuf_t nbuf)
1621 {
1622 	struct cdp_tid_rx_stats *tid_stats;
1623 	struct dp_peer *vdev_peer;
1624 	uint16_t len;
1625 	uint8_t tid;
1626 
1627 	vdev_peer = vdev->vap_bss_peer;
1628 	if (qdf_unlikely(!vdev_peer))
1629 		return nbuf;
1630 
1631 	tid = qdf_nbuf_get_priority(nbuf);
1632 	tid_stats = &pdev->stats.tid_stats.tid_rx_stats[tid];
1633 
1634 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1635 	len = qdf_nbuf_len(nbuf);
1636 
1637 	if (dp_tx_send(vdev, nbuf)) {
1638 		DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
1639 		tid_stats->fail_cnt[INTRABSS_DROP]++;
1640 		return nbuf;
1641 	}
1642 
1643 	DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
1644 	tid_stats->intrabss_cnt++;
1645 	return NULL;
1646 }
1647 
1648 bool dp_ipa_rx_intrabss_fwd(struct cdp_vdev *pvdev, qdf_nbuf_t nbuf,
1649 			    bool *fwd_success)
1650 {
1651 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
1652 	struct dp_pdev *pdev;
1653 	struct dp_peer *da_peer;
1654 	struct dp_peer *sa_peer;
1655 	qdf_nbuf_t nbuf_copy;
1656 	uint8_t da_is_bcmc;
1657 	struct ethhdr *eh;
1658 	uint8_t local_id;
1659 
1660 	*fwd_success = false; /* set default as failure */
1661 
1662 	/*
1663 	 * WDI 3.0 skb->cb[] info from IPA driver
1664 	 * skb->cb[0] = vdev_id
1665 	 * skb->cb[1].bit#1 = da_is_bcmc
1666 	 */
1667 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
1668 
1669 	if (qdf_unlikely(!vdev))
1670 		return false;
1671 
1672 	pdev = vdev->pdev;
1673 	if (qdf_unlikely(!pdev))
1674 		return false;
1675 
1676 	/* no fwd for station mode and just pass up to stack */
1677 	if (vdev->opmode == wlan_op_mode_sta)
1678 		return false;
1679 
1680 	if (da_is_bcmc) {
1681 		nbuf_copy = qdf_nbuf_copy(nbuf);
1682 		if (!nbuf_copy)
1683 			return false;
1684 
1685 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
1686 			qdf_nbuf_free(nbuf_copy);
1687 		else
1688 			*fwd_success = true;
1689 
1690 		/* return false to pass original pkt up to stack */
1691 		return false;
1692 	}
1693 
1694 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
1695 
1696 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
1697 		return false;
1698 
1699 	da_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_dest,
1700 				       &local_id);
1701 	if (!da_peer)
1702 		return false;
1703 
1704 	if (da_peer->vdev != vdev)
1705 		return false;
1706 
1707 	sa_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_source,
1708 				       &local_id);
1709 	if (!sa_peer)
1710 		return false;
1711 
1712 	if (sa_peer->vdev != vdev)
1713 		return false;
1714 
1715 	/*
1716 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
1717 	 * Need to add skb to internal tracking table to avoid nbuf memory
1718 	 * leak check for unallocated skb.
1719 	 */
1720 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
1721 
1722 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
1723 		qdf_nbuf_free(nbuf);
1724 	else
1725 		*fwd_success = true;
1726 
1727 	return true;
1728 }
1729 
1730 #endif
1731