xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hif.h>
25 #include <htt.h>
26 #include <wdi_event.h>
27 #include <queue.h>
28 #include "dp_types.h"
29 #include "dp_htt.h"
30 #include "dp_tx.h"
31 #include "dp_rx.h"
32 #include "dp_ipa.h"
33 
34 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
35 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
36 
37 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
38  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
39  * This causes back pressure, resulting in a FW crash.
40  * By leaving some entries with no buffer attached, WBM will be able to write
41  * to the ring, and from dumps we can figure out the buffer which is causing
42  * this issue.
43  */
44 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
45 /**
46  *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
47  * @ix0_reg: reo destination ring IX0 value
48  * @ix2_reg: reo destination ring IX2 value
49  * @ix3_reg: reo destination ring IX3 value
50  */
51 struct dp_ipa_reo_remap_record {
52 	uint64_t timestamp;
53 	uint32_t ix0_reg;
54 	uint32_t ix2_reg;
55 	uint32_t ix3_reg;
56 };
57 
58 #define REO_REMAP_HISTORY_SIZE 32
59 
60 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
61 
62 static qdf_atomic_t dp_ipa_reo_remap_history_index;
63 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
64 {
65 	int next = qdf_atomic_inc_return(index);
66 
67 	if (next == REO_REMAP_HISTORY_SIZE)
68 		qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
69 
70 	return next % REO_REMAP_HISTORY_SIZE;
71 }
72 
73 /**
74  * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
75  * @ix0_val: reo destination ring IX0 value
76  * @ix2_val: reo destination ring IX2 value
77  * @ix3_val: reo destination ring IX3 value
78  *
79  * Return: None
80  */
81 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
82 					 uint32_t ix3_val)
83 {
84 	int idx = dp_ipa_reo_remap_record_index_next(
85 				&dp_ipa_reo_remap_history_index);
86 	struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
87 
88 	record->timestamp = qdf_get_log_timestamp();
89 	record->ix0_reg = ix0_val;
90 	record->ix2_reg = ix2_val;
91 	record->ix3_reg = ix3_val;
92 }
93 
94 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
95 						   qdf_nbuf_t nbuf,
96 						   bool create)
97 {
98 	qdf_mem_info_t mem_map_table = {0};
99 
100 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
101 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
102 				 skb_end_pointer(nbuf) - nbuf->data);
103 
104 	if (create)
105 		qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
106 	else
107 		qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
108 
109 	return QDF_STATUS_SUCCESS;
110 }
111 
112 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
113 					     qdf_nbuf_t nbuf,
114 					     bool create)
115 {
116 	struct dp_pdev *pdev;
117 	int i;
118 
119 	for (i = 0; i < soc->pdev_count; i++) {
120 		pdev = soc->pdev_list[i];
121 		if (pdev && pdev->monitor_configured)
122 			return QDF_STATUS_SUCCESS;
123 	}
124 
125 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
126 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
127 		return QDF_STATUS_SUCCESS;
128 
129 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
130 		return QDF_STATUS_SUCCESS;
131 
132 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
133 }
134 
135 #ifdef RX_DESC_MULTI_PAGE_ALLOC
136 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
137 							 struct dp_pdev *pdev,
138 							 bool create)
139 {
140 	struct rx_desc_pool *rx_pool;
141 	uint8_t pdev_id;
142 	uint32_t num_desc, page_id, offset, i;
143 	uint16_t num_desc_per_page;
144 	union dp_rx_desc_list_elem_t *rx_desc_elem;
145 	struct dp_rx_desc *rx_desc;
146 	qdf_nbuf_t nbuf;
147 
148 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
149 		return QDF_STATUS_SUCCESS;
150 
151 	pdev_id = pdev->pdev_id;
152 	rx_pool = &soc->rx_desc_buf[pdev_id];
153 
154 	qdf_spin_lock_bh(&rx_pool->lock);
155 	num_desc = rx_pool->pool_size;
156 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
157 	for (i = 0; i < num_desc; i++) {
158 		page_id = i / num_desc_per_page;
159 		offset = i % num_desc_per_page;
160 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
161 			break;
162 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
163 		rx_desc = &rx_desc_elem->rx_desc;
164 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
165 			continue;
166 		nbuf = rx_desc->nbuf;
167 
168 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
169 	}
170 	qdf_spin_unlock_bh(&rx_pool->lock);
171 
172 	return QDF_STATUS_SUCCESS;
173 }
174 #else
175 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
176 							 struct dp_pdev *pdev,
177 							 bool create)
178 {
179 	struct rx_desc_pool *rx_pool;
180 	uint8_t pdev_id;
181 	qdf_nbuf_t nbuf;
182 	int i;
183 
184 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
185 		return QDF_STATUS_SUCCESS;
186 
187 	pdev_id = pdev->pdev_id;
188 	rx_pool = &soc->rx_desc_buf[pdev_id];
189 
190 	qdf_spin_lock_bh(&rx_pool->lock);
191 	for (i = 0; i < rx_pool->pool_size; i++) {
192 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
193 		    rx_pool->array[i].rx_desc.unmapped)
194 			continue;
195 
196 		nbuf = rx_pool->array[i].rx_desc.nbuf;
197 
198 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
199 	}
200 	qdf_spin_unlock_bh(&rx_pool->lock);
201 
202 	return QDF_STATUS_SUCCESS;
203 }
204 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
205 
206 /**
207  * dp_tx_ipa_uc_detach - Free autonomy TX resources
208  * @soc: data path instance
209  * @pdev: core txrx pdev context
210  *
211  * Free allocated TX buffers with WBM SRNG
212  *
213  * Return: none
214  */
215 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
216 {
217 	int idx;
218 	qdf_nbuf_t nbuf;
219 	struct dp_ipa_resources *ipa_res;
220 
221 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
222 		nbuf = (qdf_nbuf_t)
223 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
224 		if (!nbuf)
225 			continue;
226 
227 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
228 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, false);
229 
230 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
231 		qdf_nbuf_free(nbuf);
232 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
233 						(void *)NULL;
234 	}
235 
236 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
237 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
238 
239 	ipa_res = &pdev->ipa_resource;
240 	iounmap(ipa_res->tx_comp_doorbell_vaddr);
241 
242 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
243 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
244 }
245 
246 /**
247  * dp_rx_ipa_uc_detach - free autonomy RX resources
248  * @soc: data path instance
249  * @pdev: core txrx pdev context
250  *
251  * This function will detach DP RX into main device context
252  * will free DP Rx resources.
253  *
254  * Return: none
255  */
256 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
257 {
258 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
259 
260 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
261 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
262 }
263 
264 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
265 {
266 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
267 		return QDF_STATUS_SUCCESS;
268 
269 	/* TX resource detach */
270 	dp_tx_ipa_uc_detach(soc, pdev);
271 
272 	/* RX resource detach */
273 	dp_rx_ipa_uc_detach(soc, pdev);
274 
275 	return QDF_STATUS_SUCCESS;	/* success */
276 }
277 
278 /**
279  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
280  * @soc: data path instance
281  * @pdev: Physical device handle
282  *
283  * Allocate TX buffer from non-cacheable memory
284  * Attache allocated TX buffers with WBM SRNG
285  *
286  * Return: int
287  */
288 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
289 {
290 	uint32_t tx_buffer_count;
291 	uint32_t ring_base_align = 8;
292 	qdf_dma_addr_t buffer_paddr;
293 	struct hal_srng *wbm_srng = (struct hal_srng *)
294 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
295 	struct hal_srng_params srng_params;
296 	uint32_t paddr_lo;
297 	uint32_t paddr_hi;
298 	void *ring_entry;
299 	int num_entries;
300 	qdf_nbuf_t nbuf;
301 	int retval = QDF_STATUS_SUCCESS;
302 	int max_alloc_count = 0;
303 
304 	/*
305 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
306 	 * unsigned int uc_tx_buf_sz =
307 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
308 	 */
309 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
310 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
311 
312 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
313 			    &srng_params);
314 	num_entries = srng_params.num_entries;
315 
316 	max_alloc_count =
317 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
318 	if (max_alloc_count <= 0) {
319 		dp_err("incorrect value for buffer count %u", max_alloc_count);
320 		return -EINVAL;
321 	}
322 
323 	dp_info("requested %d buffers to be posted to wbm ring",
324 		max_alloc_count);
325 
326 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
327 		qdf_mem_malloc(num_entries *
328 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
329 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
330 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
331 		return -ENOMEM;
332 	}
333 
334 	hal_srng_access_start_unlocked(soc->hal_soc,
335 				       hal_srng_to_hal_ring_handle(wbm_srng));
336 
337 	/*
338 	 * Allocate Tx buffers as many as possible.
339 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
340 	 * Populate Tx buffers into WBM2IPA ring
341 	 * This initial buffer population will simulate H/W as source ring,
342 	 * and update HP
343 	 */
344 	for (tx_buffer_count = 0;
345 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
346 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
347 		if (!nbuf)
348 			break;
349 
350 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
351 				hal_srng_to_hal_ring_handle(wbm_srng));
352 		if (!ring_entry) {
353 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
354 				  "%s: Failed to get WBM ring entry",
355 				  __func__);
356 			qdf_nbuf_free(nbuf);
357 			break;
358 		}
359 
360 		qdf_nbuf_map_single(soc->osdev, nbuf,
361 				    QDF_DMA_BIDIRECTIONAL);
362 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
363 
364 		paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
365 		paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
366 		HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
367 		HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
368 		HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
369 				      HAL_WBM_SW0_BM_ID));
370 
371 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
372 			= (void *)nbuf;
373 
374 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
375 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, true);
376 	}
377 
378 	hal_srng_access_end_unlocked(soc->hal_soc,
379 				     hal_srng_to_hal_ring_handle(wbm_srng));
380 
381 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
382 
383 	if (tx_buffer_count) {
384 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
385 	} else {
386 		dp_err("No IPA WDI TX buffer allocated!");
387 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
388 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
389 		retval = -ENOMEM;
390 	}
391 
392 	return retval;
393 }
394 
395 /**
396  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
397  * @soc: data path instance
398  * @pdev: core txrx pdev context
399  *
400  * This function will attach a DP RX instance into the main
401  * device (SOC) context.
402  *
403  * Return: QDF_STATUS_SUCCESS: success
404  *         QDF_STATUS_E_RESOURCES: Error return
405  */
406 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
407 {
408 	return QDF_STATUS_SUCCESS;
409 }
410 
411 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
412 {
413 	int error;
414 
415 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
416 		return QDF_STATUS_SUCCESS;
417 
418 	/* TX resource attach */
419 	error = dp_tx_ipa_uc_attach(soc, pdev);
420 	if (error) {
421 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
422 			  "%s: DP IPA UC TX attach fail code %d",
423 			  __func__, error);
424 		return error;
425 	}
426 
427 	/* RX resource attach */
428 	error = dp_rx_ipa_uc_attach(soc, pdev);
429 	if (error) {
430 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
431 			  "%s: DP IPA UC RX attach fail code %d",
432 			  __func__, error);
433 		dp_tx_ipa_uc_detach(soc, pdev);
434 		return error;
435 	}
436 
437 	return QDF_STATUS_SUCCESS;	/* success */
438 }
439 
440 /*
441  * dp_ipa_ring_resource_setup() - setup IPA ring resources
442  * @soc: data path SoC handle
443  *
444  * Return: none
445  */
446 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
447 		struct dp_pdev *pdev)
448 {
449 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
450 	struct hal_srng *hal_srng;
451 	struct hal_srng_params srng_params;
452 	qdf_dma_addr_t hp_addr;
453 	unsigned long addr_offset, dev_base_paddr;
454 	uint32_t ix0;
455 
456 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
457 		return QDF_STATUS_SUCCESS;
458 
459 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
460 	hal_srng = (struct hal_srng *)
461 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
462 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
463 			    hal_srng_to_hal_ring_handle(hal_srng),
464 			    &srng_params);
465 
466 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
467 		srng_params.ring_base_paddr;
468 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
469 		srng_params.ring_base_vaddr;
470 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
471 		(srng_params.num_entries * srng_params.entry_size) << 2;
472 	/*
473 	 * For the register backed memory addresses, use the scn->mem_pa to
474 	 * calculate the physical address of the shadow registers
475 	 */
476 	dev_base_paddr =
477 		(unsigned long)
478 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
479 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
480 		      (unsigned long)(hal_soc->dev_base_addr);
481 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
482 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
483 
484 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
485 		(unsigned int)addr_offset,
486 		(unsigned int)dev_base_paddr,
487 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
488 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
489 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
490 		srng_params.num_entries,
491 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
492 
493 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
494 	hal_srng = (struct hal_srng *)
495 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
496 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
497 			    hal_srng_to_hal_ring_handle(hal_srng),
498 			    &srng_params);
499 
500 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
501 						srng_params.ring_base_paddr;
502 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
503 						srng_params.ring_base_vaddr;
504 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
505 		(srng_params.num_entries * srng_params.entry_size) << 2;
506 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
507 		      (unsigned long)(hal_soc->dev_base_addr);
508 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
509 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
510 
511 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
512 		(unsigned int)addr_offset,
513 		(unsigned int)dev_base_paddr,
514 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
515 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
516 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
517 		srng_params.num_entries,
518 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
519 
520 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
521 	hal_srng = (struct hal_srng *)
522 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
523 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
524 			    hal_srng_to_hal_ring_handle(hal_srng),
525 			    &srng_params);
526 
527 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
528 						srng_params.ring_base_paddr;
529 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
530 						srng_params.ring_base_vaddr;
531 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
532 		(srng_params.num_entries * srng_params.entry_size) << 2;
533 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
534 		      (unsigned long)(hal_soc->dev_base_addr);
535 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
536 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
537 
538 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
539 		(unsigned int)addr_offset,
540 		(unsigned int)dev_base_paddr,
541 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
542 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
543 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
544 		srng_params.num_entries,
545 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
546 
547 	hal_srng = (struct hal_srng *)
548 			pdev->rx_refill_buf_ring2.hal_srng;
549 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
550 			    hal_srng_to_hal_ring_handle(hal_srng),
551 			    &srng_params);
552 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
553 		srng_params.ring_base_paddr;
554 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
555 		srng_params.ring_base_vaddr;
556 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
557 		(srng_params.num_entries * srng_params.entry_size) << 2;
558 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
559 				       hal_srng_to_hal_ring_handle(hal_srng));
560 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
561 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
562 
563 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
564 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
565 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
566 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
567 		srng_params.num_entries,
568 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
569 
570 	/*
571 	 * Set DEST_RING_MAPPING_4 to SW2 as default value for
572 	 * DESTINATION_RING_CTRL_IX_0.
573 	 */
574 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
575 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
576 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
577 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
578 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
579 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
580 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
581 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
582 
583 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
584 
585 	return 0;
586 }
587 
588 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
589 					     qdf_shared_mem_t *shared_mem,
590 					     void *cpu_addr,
591 					     qdf_dma_addr_t dma_addr,
592 					     uint32_t size)
593 {
594 	qdf_dma_addr_t paddr;
595 	int ret;
596 
597 	shared_mem->vaddr = cpu_addr;
598 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
599 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
600 
601 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
602 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
603 
604 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
605 				      shared_mem->vaddr, dma_addr, size);
606 	if (ret) {
607 		dp_err("Unable to get DMA sgtable");
608 		return QDF_STATUS_E_NOMEM;
609 	}
610 
611 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
612 
613 	return QDF_STATUS_SUCCESS;
614 }
615 
616 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
617 {
618 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
619 	struct dp_pdev *pdev =
620 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
621 	struct dp_ipa_resources *ipa_res;
622 
623 	if (!pdev) {
624 		dp_err("%s invalid instance", __func__);
625 		return QDF_STATUS_E_FAILURE;
626 	}
627 
628 	ipa_res = &pdev->ipa_resource;
629 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
630 		return QDF_STATUS_SUCCESS;
631 
632 	ipa_res->tx_num_alloc_buffer =
633 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
634 
635 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
636 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
637 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
638 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
639 
640 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
641 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
642 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
643 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
644 
645 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
646 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
647 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
648 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
649 
650 	dp_ipa_get_shared_mem_info(
651 			soc->osdev, &ipa_res->rx_refill_ring,
652 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
653 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
654 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
655 
656 	if (!qdf_mem_get_dma_addr(soc->osdev,
657 				  &ipa_res->tx_comp_ring.mem_info) ||
658 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info))
659 		return QDF_STATUS_E_FAILURE;
660 
661 	return QDF_STATUS_SUCCESS;
662 }
663 
664 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
665 {
666 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
667 	struct dp_pdev *pdev =
668 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
669 	struct dp_ipa_resources *ipa_res;
670 	struct hal_srng *wbm_srng = (struct hal_srng *)
671 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
672 	struct hal_srng *reo_srng = (struct hal_srng *)
673 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
674 	uint32_t tx_comp_doorbell_dmaaddr;
675 	uint32_t rx_ready_doorbell_dmaaddr;
676 
677 	if (!pdev) {
678 		dp_err("%s invalid instance", __func__);
679 		return QDF_STATUS_E_FAILURE;
680 	}
681 
682 	ipa_res = &pdev->ipa_resource;
683 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
684 		return QDF_STATUS_SUCCESS;
685 
686 	ipa_res->tx_comp_doorbell_vaddr =
687 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
688 
689 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
690 		pld_smmu_map(soc->osdev->dev, ipa_res->tx_comp_doorbell_paddr,
691 			     &tx_comp_doorbell_dmaaddr, sizeof(uint32_t));
692 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
693 
694 		pld_smmu_map(soc->osdev->dev, ipa_res->rx_ready_doorbell_paddr,
695 			     &rx_ready_doorbell_dmaaddr, sizeof(uint32_t));
696 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
697 	}
698 
699 	hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
700 
701 	dp_info("paddr %pK vaddr %pK",
702 		(void *)ipa_res->tx_comp_doorbell_paddr,
703 		(void *)ipa_res->tx_comp_doorbell_vaddr);
704 
705 	hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
706 
707 	/*
708 	 * For RX, REO module on Napier/Hastings does reordering on incoming
709 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
710 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
711 	 * to IPA.
712 	 * Set the doorbell addr for the REO ring.
713 	 */
714 	hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
715 	return QDF_STATUS_SUCCESS;
716 }
717 
718 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
719 			      uint8_t *op_msg)
720 {
721 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
722 	struct dp_pdev *pdev =
723 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
724 
725 	if (!pdev) {
726 		dp_err("%s invalid instance", __func__);
727 		return QDF_STATUS_E_FAILURE;
728 	}
729 
730 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
731 		return QDF_STATUS_SUCCESS;
732 
733 	if (pdev->ipa_uc_op_cb) {
734 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
735 	} else {
736 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
737 		    "%s: IPA callback function is not registered", __func__);
738 		qdf_mem_free(op_msg);
739 		return QDF_STATUS_E_FAILURE;
740 	}
741 
742 	return QDF_STATUS_SUCCESS;
743 }
744 
745 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
746 				 ipa_uc_op_cb_type op_cb,
747 				 void *usr_ctxt)
748 {
749 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
750 	struct dp_pdev *pdev =
751 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
752 
753 	if (!pdev) {
754 		dp_err("%s invalid instance", __func__);
755 		return QDF_STATUS_E_FAILURE;
756 	}
757 
758 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
759 		return QDF_STATUS_SUCCESS;
760 
761 	pdev->ipa_uc_op_cb = op_cb;
762 	pdev->usr_ctxt = usr_ctxt;
763 
764 	return QDF_STATUS_SUCCESS;
765 }
766 
767 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
768 {
769 	/* TBD */
770 	return QDF_STATUS_SUCCESS;
771 }
772 
773 /**
774  * dp_tx_send_ipa_data_frame() - send IPA data frame
775  * @soc_hdl: datapath soc handle
776  * @vdev_id: id of the virtual device
777  * @skb: skb to transmit
778  *
779  * Return: skb/ NULL is for success
780  */
781 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
782 				     qdf_nbuf_t skb)
783 {
784 	qdf_nbuf_t ret;
785 
786 	/* Terminate the (single-element) list of tx frames */
787 	qdf_nbuf_set_next(skb, NULL);
788 	ret = dp_tx_send(soc_hdl, vdev_id, skb);
789 	if (ret) {
790 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
791 			  "%s: Failed to tx", __func__);
792 		return ret;
793 	}
794 
795 	return NULL;
796 }
797 
798 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
799 {
800 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
801 	struct dp_pdev *pdev =
802 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
803 	uint32_t ix0;
804 	uint32_t ix2;
805 
806 	if (!pdev) {
807 		dp_err("%s invalid instance", __func__);
808 		return QDF_STATUS_E_FAILURE;
809 	}
810 
811 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
812 		return QDF_STATUS_SUCCESS;
813 
814 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
815 		return QDF_STATUS_E_AGAIN;
816 
817 	/* Call HAL API to remap REO rings to REO2IPA ring */
818 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
819 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) |
820 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 2) |
821 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) |
822 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) |
823 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
824 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
825 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
826 
827 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
828 		ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) |
829 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) |
830 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
831 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
832 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) |
833 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
834 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) |
835 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
836 
837 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
838 					   &ix2, &ix2);
839 		dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
840 	} else {
841 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
842 					   NULL, NULL);
843 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
844 	}
845 
846 	return QDF_STATUS_SUCCESS;
847 }
848 
849 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
850 {
851 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
852 	struct dp_pdev *pdev =
853 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
854 	uint32_t ix0;
855 	uint32_t ix2;
856 	uint32_t ix3;
857 
858 	if (!pdev) {
859 		dp_err("%s invalid instance", __func__);
860 		return QDF_STATUS_E_FAILURE;
861 	}
862 
863 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
864 		return QDF_STATUS_SUCCESS;
865 
866 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
867 		return QDF_STATUS_E_AGAIN;
868 
869 	/* Call HAL API to remap REO rings to REO2IPA ring */
870 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
871 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
872 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
873 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
874 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
875 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
876 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
877 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
878 
879 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
880 		dp_reo_remap_config(soc, &ix2, &ix3);
881 
882 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
883 					   &ix2, &ix3);
884 		dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
885 	} else {
886 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
887 					   NULL, NULL);
888 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
889 	}
890 
891 	return QDF_STATUS_SUCCESS;
892 }
893 
894 /* This should be configurable per H/W configuration enable status */
895 #define L3_HEADER_PADDING	2
896 
897 #ifdef CONFIG_IPA_WDI_UNIFIED_API
898 
899 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
900 static inline void dp_setup_mcc_sys_pipes(
901 		qdf_ipa_sys_connect_params_t *sys_in,
902 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
903 {
904 	/* Setup MCC sys pipe */
905 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
906 			DP_IPA_MAX_IFACE;
907 	for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
908 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
909 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
910 }
911 #else
912 static inline void dp_setup_mcc_sys_pipes(
913 		qdf_ipa_sys_connect_params_t *sys_in,
914 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
915 {
916 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
917 }
918 #endif
919 
920 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
921 				 struct dp_ipa_resources *ipa_res,
922 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
923 				 bool over_gsi)
924 {
925 	struct tcl_data_cmd *tcl_desc_ptr;
926 	uint8_t *desc_addr;
927 	uint32_t desc_size;
928 
929 	if (over_gsi)
930 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
931 	else
932 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
933 
934 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
935 		qdf_mem_get_dma_addr(soc->osdev,
936 				     &ipa_res->tx_comp_ring.mem_info);
937 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
938 		qdf_mem_get_dma_size(soc->osdev,
939 				     &ipa_res->tx_comp_ring.mem_info);
940 
941 	/* WBM Tail Pointer Address */
942 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
943 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
944 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
945 
946 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
947 		qdf_mem_get_dma_addr(soc->osdev,
948 				     &ipa_res->tx_ring.mem_info);
949 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
950 		qdf_mem_get_dma_size(soc->osdev,
951 				     &ipa_res->tx_ring.mem_info);
952 
953 	/* TCL Head Pointer Address */
954 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
955 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
956 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
957 
958 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
959 		ipa_res->tx_num_alloc_buffer;
960 
961 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
962 
963 	/* Preprogram TCL descriptor */
964 	desc_addr =
965 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
966 	desc_size = sizeof(struct tcl_data_cmd);
967 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
968 	tcl_desc_ptr = (struct tcl_data_cmd *)
969 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
970 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
971 		HAL_RX_BUF_RBM_SW2_BM;
972 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
973 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
974 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
975 }
976 
977 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
978 				 struct dp_ipa_resources *ipa_res,
979 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
980 				 bool over_gsi)
981 {
982 	if (over_gsi)
983 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
984 					IPA_CLIENT_WLAN2_PROD;
985 	else
986 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
987 					IPA_CLIENT_WLAN1_PROD;
988 
989 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
990 		qdf_mem_get_dma_addr(soc->osdev,
991 				     &ipa_res->rx_rdy_ring.mem_info);
992 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
993 		qdf_mem_get_dma_size(soc->osdev,
994 				     &ipa_res->rx_rdy_ring.mem_info);
995 
996 	/* REO Tail Pointer Address */
997 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
998 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
999 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
1000 
1001 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1002 		qdf_mem_get_dma_addr(soc->osdev,
1003 				     &ipa_res->rx_refill_ring.mem_info);
1004 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1005 		qdf_mem_get_dma_size(soc->osdev,
1006 				     &ipa_res->rx_refill_ring.mem_info);
1007 
1008 	/* FW Head Pointer Address */
1009 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1010 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1011 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
1012 
1013 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
1014 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1015 }
1016 
1017 static void
1018 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
1019 			  struct dp_ipa_resources *ipa_res,
1020 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
1021 			  bool over_gsi)
1022 {
1023 	struct tcl_data_cmd *tcl_desc_ptr;
1024 	uint8_t *desc_addr;
1025 	uint32_t desc_size;
1026 
1027 	if (over_gsi)
1028 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1029 			IPA_CLIENT_WLAN2_CONS;
1030 	else
1031 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1032 			IPA_CLIENT_WLAN1_CONS;
1033 
1034 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
1035 		     &ipa_res->tx_comp_ring.sgtable,
1036 		     sizeof(sgtable_t));
1037 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
1038 		qdf_mem_get_dma_size(soc->osdev,
1039 				     &ipa_res->tx_comp_ring.mem_info);
1040 	/* WBM Tail Pointer Address */
1041 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
1042 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1043 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
1044 
1045 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
1046 		     &ipa_res->tx_ring.sgtable,
1047 		     sizeof(sgtable_t));
1048 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
1049 		qdf_mem_get_dma_size(soc->osdev,
1050 				     &ipa_res->tx_ring.mem_info);
1051 	/* TCL Head Pointer Address */
1052 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
1053 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1054 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
1055 
1056 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
1057 		ipa_res->tx_num_alloc_buffer;
1058 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
1059 
1060 	/* Preprogram TCL descriptor */
1061 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
1062 			tx_smmu);
1063 	desc_size = sizeof(struct tcl_data_cmd);
1064 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1065 	tcl_desc_ptr = (struct tcl_data_cmd *)
1066 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
1067 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1068 		HAL_RX_BUF_RBM_SW2_BM;
1069 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1070 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1071 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1072 }
1073 
1074 static void
1075 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
1076 			  struct dp_ipa_resources *ipa_res,
1077 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
1078 			  bool over_gsi)
1079 {
1080 	if (over_gsi)
1081 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1082 					IPA_CLIENT_WLAN2_PROD;
1083 	else
1084 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1085 					IPA_CLIENT_WLAN1_PROD;
1086 
1087 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
1088 		     &ipa_res->rx_rdy_ring.sgtable,
1089 		     sizeof(sgtable_t));
1090 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
1091 		qdf_mem_get_dma_size(soc->osdev,
1092 				     &ipa_res->rx_rdy_ring.mem_info);
1093 	/* REO Tail Pointer Address */
1094 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
1095 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1096 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
1097 
1098 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
1099 		     &ipa_res->rx_refill_ring.sgtable,
1100 		     sizeof(sgtable_t));
1101 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
1102 		qdf_mem_get_dma_size(soc->osdev,
1103 				     &ipa_res->rx_refill_ring.mem_info);
1104 
1105 	/* FW Head Pointer Address */
1106 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
1107 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1108 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
1109 
1110 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
1111 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1112 }
1113 
1114 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1115 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1116 			void *ipa_wdi_meter_notifier_cb,
1117 			uint32_t ipa_desc_size, void *ipa_priv,
1118 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1119 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
1120 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
1121 {
1122 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1123 	struct dp_pdev *pdev =
1124 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1125 	struct dp_ipa_resources *ipa_res;
1126 	qdf_ipa_ep_cfg_t *tx_cfg;
1127 	qdf_ipa_ep_cfg_t *rx_cfg;
1128 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
1129 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
1130 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
1131 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu;
1132 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1133 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1134 	int ret;
1135 
1136 	if (!pdev) {
1137 		dp_err("%s invalid instance", __func__);
1138 		return QDF_STATUS_E_FAILURE;
1139 	}
1140 
1141 	ipa_res = &pdev->ipa_resource;
1142 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1143 		return QDF_STATUS_SUCCESS;
1144 
1145 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1146 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1147 
1148 	if (is_smmu_enabled)
1149 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
1150 	else
1151 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
1152 
1153 	dp_setup_mcc_sys_pipes(sys_in, &pipe_in);
1154 
1155 	/* TX PIPE */
1156 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1157 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in);
1158 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
1159 	} else {
1160 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1161 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
1162 	}
1163 
1164 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
1165 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1166 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
1167 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
1168 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
1169 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
1170 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
1171 
1172 	/**
1173 	 * Transfer Ring: WBM Ring
1174 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1175 	 * Event Ring: TCL ring
1176 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1177 	 */
1178 	if (is_smmu_enabled)
1179 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
1180 	else
1181 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
1182 
1183 	/* RX PIPE */
1184 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1185 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in);
1186 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
1187 	} else {
1188 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1189 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
1190 	}
1191 
1192 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
1193 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1194 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
1195 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
1196 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
1197 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
1198 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
1199 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
1200 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
1201 
1202 	/**
1203 	 * Transfer Ring: REO Ring
1204 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1205 	 * Event Ring: FW ring
1206 	 * Event Ring Doorbell PA: FW Head Pointer Address
1207 	 */
1208 	if (is_smmu_enabled)
1209 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
1210 	else
1211 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
1212 
1213 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1214 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1215 
1216 	/* Connect WDI IPA PIPEs */
1217 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1218 
1219 	if (ret) {
1220 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1221 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1222 			  __func__, ret);
1223 		return QDF_STATUS_E_FAILURE;
1224 	}
1225 
1226 	/* IPA uC Doorbell registers */
1227 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
1228 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1229 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1230 
1231 	ipa_res->tx_comp_doorbell_paddr =
1232 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1233 	ipa_res->rx_ready_doorbell_paddr =
1234 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1235 
1236 	return QDF_STATUS_SUCCESS;
1237 }
1238 
1239 /**
1240  * dp_ipa_setup_iface() - Setup IPA header and register interface
1241  * @ifname: Interface name
1242  * @mac_addr: Interface MAC address
1243  * @prod_client: IPA prod client type
1244  * @cons_client: IPA cons client type
1245  * @session_id: Session ID
1246  * @is_ipv6_enabled: Is IPV6 enabled or not
1247  *
1248  * Return: QDF_STATUS
1249  */
1250 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1251 			      qdf_ipa_client_type_t prod_client,
1252 			      qdf_ipa_client_type_t cons_client,
1253 			      uint8_t session_id, bool is_ipv6_enabled)
1254 {
1255 	qdf_ipa_wdi_reg_intf_in_params_t in;
1256 	qdf_ipa_wdi_hdr_info_t hdr_info;
1257 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1258 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1259 	int ret = -EINVAL;
1260 
1261 	dp_debug("Add Partial hdr: %s, %pM", ifname, mac_addr);
1262 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1263 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1264 
1265 	/* IPV4 header */
1266 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1267 
1268 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1269 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1270 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1271 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1272 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1273 
1274 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1275 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1276 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1277 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
1278 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1279 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1280 		htonl(session_id << 16);
1281 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1282 
1283 	/* IPV6 header */
1284 	if (is_ipv6_enabled) {
1285 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1286 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1287 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1288 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1289 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1290 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1291 	}
1292 
1293 	dp_debug("registering for session_id: %u", session_id);
1294 
1295 	ret = qdf_ipa_wdi_reg_intf(&in);
1296 
1297 	if (ret) {
1298 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1299 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1300 		    __func__, ret);
1301 		return QDF_STATUS_E_FAILURE;
1302 	}
1303 
1304 	return QDF_STATUS_SUCCESS;
1305 }
1306 
1307 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1308 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1309 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1310 			void *ipa_wdi_meter_notifier_cb,
1311 			uint32_t ipa_desc_size, void *ipa_priv,
1312 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1313 			uint32_t *rx_pipe_handle)
1314 {
1315 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1316 	struct dp_pdev *pdev =
1317 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1318 	struct dp_ipa_resources *ipa_res;
1319 	qdf_ipa_wdi_pipe_setup_info_t *tx;
1320 	qdf_ipa_wdi_pipe_setup_info_t *rx;
1321 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1322 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1323 	struct tcl_data_cmd *tcl_desc_ptr;
1324 	uint8_t *desc_addr;
1325 	uint32_t desc_size;
1326 	int ret;
1327 
1328 	if (!pdev) {
1329 		dp_err("%s invalid instance", __func__);
1330 		return QDF_STATUS_E_FAILURE;
1331 	}
1332 
1333 	ipa_res = &pdev->ipa_resource;
1334 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1335 		return QDF_STATUS_SUCCESS;
1336 
1337 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1338 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1339 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1340 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1341 
1342 	/* TX PIPE */
1343 	/**
1344 	 * Transfer Ring: WBM Ring
1345 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1346 	 * Event Ring: TCL ring
1347 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1348 	 */
1349 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1350 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
1351 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1352 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
1353 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
1354 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
1355 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
1356 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
1357 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1358 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1359 		ipa_res->tx_comp_ring_base_paddr;
1360 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1361 		ipa_res->tx_comp_ring_size;
1362 	/* WBM Tail Pointer Address */
1363 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1364 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1365 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1366 		ipa_res->tx_ring_base_paddr;
1367 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
1368 	/* TCL Head Pointer Address */
1369 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1370 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1371 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1372 		ipa_res->tx_num_alloc_buffer;
1373 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1374 
1375 	/* Preprogram TCL descriptor */
1376 	desc_addr =
1377 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1378 	desc_size = sizeof(struct tcl_data_cmd);
1379 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1380 	tcl_desc_ptr = (struct tcl_data_cmd *)
1381 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1382 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1383 						HAL_RX_BUF_RBM_SW2_BM;
1384 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1385 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1386 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1387 
1388 	/* RX PIPE */
1389 	/**
1390 	 * Transfer Ring: REO Ring
1391 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1392 	 * Event Ring: FW ring
1393 	 * Event Ring Doorbell PA: FW Head Pointer Address
1394 	 */
1395 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1396 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
1397 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1398 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
1399 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
1400 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
1401 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
1402 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
1403 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
1404 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
1405 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
1406 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1407 						ipa_res->rx_rdy_ring_base_paddr;
1408 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1409 						ipa_res->rx_rdy_ring_size;
1410 	/* REO Tail Pointer Address */
1411 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1412 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1413 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1414 					ipa_res->rx_refill_ring_base_paddr;
1415 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1416 						ipa_res->rx_refill_ring_size;
1417 	/* FW Head Pointer Address */
1418 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1419 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1420 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
1421 						L3_HEADER_PADDING;
1422 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1423 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1424 
1425 	/* Connect WDI IPA PIPE */
1426 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1427 	if (ret) {
1428 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1429 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1430 			  __func__, ret);
1431 		return QDF_STATUS_E_FAILURE;
1432 	}
1433 
1434 	/* IPA uC Doorbell registers */
1435 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1436 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
1437 		  __func__,
1438 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1439 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1440 
1441 	ipa_res->tx_comp_doorbell_paddr =
1442 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1443 	ipa_res->tx_comp_doorbell_vaddr =
1444 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
1445 	ipa_res->rx_ready_doorbell_paddr =
1446 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1447 
1448 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1449 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1450 		  __func__,
1451 		  "transfer_ring_base_pa",
1452 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
1453 		  "transfer_ring_size",
1454 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
1455 		  "transfer_ring_doorbell_pa",
1456 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
1457 		  "event_ring_base_pa",
1458 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
1459 		  "event_ring_size",
1460 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
1461 		  "event_ring_doorbell_pa",
1462 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
1463 		  "num_pkt_buffers",
1464 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
1465 		  "tx_comp_doorbell_paddr",
1466 		  (void *)ipa_res->tx_comp_doorbell_paddr);
1467 
1468 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1469 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1470 		  __func__,
1471 		  "transfer_ring_base_pa",
1472 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
1473 		  "transfer_ring_size",
1474 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
1475 		  "transfer_ring_doorbell_pa",
1476 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
1477 		  "event_ring_base_pa",
1478 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
1479 		  "event_ring_size",
1480 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
1481 		  "event_ring_doorbell_pa",
1482 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
1483 		  "num_pkt_buffers",
1484 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
1485 		  "tx_comp_doorbell_paddr",
1486 		  (void *)ipa_res->rx_ready_doorbell_paddr);
1487 
1488 	return QDF_STATUS_SUCCESS;
1489 }
1490 
1491 /**
1492  * dp_ipa_setup_iface() - Setup IPA header and register interface
1493  * @ifname: Interface name
1494  * @mac_addr: Interface MAC address
1495  * @prod_client: IPA prod client type
1496  * @cons_client: IPA cons client type
1497  * @session_id: Session ID
1498  * @is_ipv6_enabled: Is IPV6 enabled or not
1499  *
1500  * Return: QDF_STATUS
1501  */
1502 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1503 			      qdf_ipa_client_type_t prod_client,
1504 			      qdf_ipa_client_type_t cons_client,
1505 			      uint8_t session_id, bool is_ipv6_enabled)
1506 {
1507 	qdf_ipa_wdi_reg_intf_in_params_t in;
1508 	qdf_ipa_wdi_hdr_info_t hdr_info;
1509 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1510 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1511 	int ret = -EINVAL;
1512 
1513 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1514 		  "%s: Add Partial hdr: %s, %pM",
1515 		  __func__, ifname, mac_addr);
1516 
1517 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1518 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1519 
1520 	/* IPV4 header */
1521 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1522 
1523 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1524 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1525 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1526 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1527 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1528 
1529 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1530 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1531 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1532 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1533 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1534 		htonl(session_id << 16);
1535 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1536 
1537 	/* IPV6 header */
1538 	if (is_ipv6_enabled) {
1539 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1540 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1541 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1542 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1543 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1544 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1545 	}
1546 
1547 	ret = qdf_ipa_wdi_reg_intf(&in);
1548 	if (ret) {
1549 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1550 		       ret);
1551 		return QDF_STATUS_E_FAILURE;
1552 	}
1553 
1554 	return QDF_STATUS_SUCCESS;
1555 }
1556 
1557 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1558 
1559 /**
1560  * dp_ipa_cleanup() - Disconnect IPA pipes
1561  * @tx_pipe_handle: Tx pipe handle
1562  * @rx_pipe_handle: Rx pipe handle
1563  *
1564  * Return: QDF_STATUS
1565  */
1566 QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
1567 {
1568 	int ret;
1569 
1570 	ret = qdf_ipa_wdi_disconn_pipes();
1571 	if (ret) {
1572 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
1573 		       ret);
1574 		return QDF_STATUS_E_FAILURE;
1575 	}
1576 
1577 	return QDF_STATUS_SUCCESS;
1578 }
1579 
1580 /**
1581  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
1582  * @ifname: Interface name
1583  * @is_ipv6_enabled: Is IPV6 enabled or not
1584  *
1585  * Return: QDF_STATUS
1586  */
1587 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
1588 {
1589 	int ret;
1590 
1591 	ret = qdf_ipa_wdi_dereg_intf(ifname);
1592 	if (ret) {
1593 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1594 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
1595 			  __func__, ret);
1596 		return QDF_STATUS_E_FAILURE;
1597 	}
1598 
1599 	return QDF_STATUS_SUCCESS;
1600 }
1601 
1602 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1603 {
1604 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1605 	struct dp_pdev *pdev =
1606 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1607 	QDF_STATUS result;
1608 
1609 	if (!pdev) {
1610 		dp_err("%s invalid instance", __func__);
1611 		return QDF_STATUS_E_FAILURE;
1612 	}
1613 
1614 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
1615 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
1616 
1617 	result = qdf_ipa_wdi_enable_pipes();
1618 	if (result) {
1619 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1620 			  "%s: Enable WDI PIPE fail, code %d",
1621 			  __func__, result);
1622 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1623 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1624 		return QDF_STATUS_E_FAILURE;
1625 	}
1626 
1627 	return QDF_STATUS_SUCCESS;
1628 }
1629 
1630 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1631 {
1632 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1633 	struct dp_pdev *pdev =
1634 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1635 	QDF_STATUS result;
1636 
1637 	if (!pdev) {
1638 		dp_err("%s invalid instance", __func__);
1639 		return QDF_STATUS_E_FAILURE;
1640 	}
1641 
1642 	result = qdf_ipa_wdi_disable_pipes();
1643 	if (result)
1644 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1645 			  "%s: Disable WDI PIPE fail, code %d",
1646 			  __func__, result);
1647 
1648 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1649 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1650 
1651 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
1652 }
1653 
1654 /**
1655  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
1656  * @client: Client type
1657  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
1658  *
1659  * Return: QDF_STATUS
1660  */
1661 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
1662 {
1663 	qdf_ipa_wdi_perf_profile_t profile;
1664 	QDF_STATUS result;
1665 
1666 	profile.client = client;
1667 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
1668 
1669 	result = qdf_ipa_wdi_set_perf_profile(&profile);
1670 	if (result) {
1671 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1672 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
1673 			  __func__, result);
1674 		return QDF_STATUS_E_FAILURE;
1675 	}
1676 
1677 	return QDF_STATUS_SUCCESS;
1678 }
1679 
1680 /**
1681  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
1682  * @pdev: pdev
1683  * @vdev: vdev
1684  * @nbuf: skb
1685  *
1686  * Return: nbuf if TX fails and NULL if TX succeeds
1687  */
1688 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
1689 				       struct dp_vdev *vdev,
1690 				       qdf_nbuf_t nbuf)
1691 {
1692 	struct dp_peer *vdev_peer;
1693 	uint16_t len;
1694 
1695 	vdev_peer = vdev->vap_bss_peer;
1696 	if (qdf_unlikely(!vdev_peer))
1697 		return nbuf;
1698 
1699 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1700 	len = qdf_nbuf_len(nbuf);
1701 
1702 	if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
1703 		DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
1704 		return nbuf;
1705 	}
1706 
1707 	DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
1708 	return NULL;
1709 }
1710 
1711 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1712 			    qdf_nbuf_t nbuf, bool *fwd_success)
1713 {
1714 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1715 	struct dp_vdev *vdev =
1716 		dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1717 	struct dp_pdev *pdev;
1718 	struct dp_peer *da_peer;
1719 	struct dp_peer *sa_peer;
1720 	qdf_nbuf_t nbuf_copy;
1721 	uint8_t da_is_bcmc;
1722 	struct ethhdr *eh;
1723 
1724 	*fwd_success = false; /* set default as failure */
1725 
1726 	/*
1727 	 * WDI 3.0 skb->cb[] info from IPA driver
1728 	 * skb->cb[0] = vdev_id
1729 	 * skb->cb[1].bit#1 = da_is_bcmc
1730 	 */
1731 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
1732 
1733 	if (qdf_unlikely(!vdev))
1734 		return false;
1735 
1736 	pdev = vdev->pdev;
1737 	if (qdf_unlikely(!pdev))
1738 		return false;
1739 
1740 	/* no fwd for station mode and just pass up to stack */
1741 	if (vdev->opmode == wlan_op_mode_sta)
1742 		return false;
1743 
1744 	if (da_is_bcmc) {
1745 		nbuf_copy = qdf_nbuf_copy(nbuf);
1746 		if (!nbuf_copy)
1747 			return false;
1748 
1749 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
1750 			qdf_nbuf_free(nbuf_copy);
1751 		else
1752 			*fwd_success = true;
1753 
1754 		/* return false to pass original pkt up to stack */
1755 		return false;
1756 	}
1757 
1758 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
1759 
1760 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
1761 		return false;
1762 
1763 	da_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_dest);
1764 	if (!da_peer)
1765 		return false;
1766 
1767 	if (da_peer->vdev != vdev)
1768 		return false;
1769 
1770 	sa_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_source);
1771 	if (!sa_peer)
1772 		return false;
1773 
1774 	if (sa_peer->vdev != vdev)
1775 		return false;
1776 
1777 	/*
1778 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
1779 	 * Need to add skb to internal tracking table to avoid nbuf memory
1780 	 * leak check for unallocated skb.
1781 	 */
1782 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
1783 
1784 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
1785 		qdf_nbuf_free(nbuf);
1786 	else
1787 		*fwd_success = true;
1788 
1789 	return true;
1790 }
1791 
1792 #ifdef MDM_PLATFORM
1793 bool dp_ipa_is_mdm_platform(void)
1794 {
1795 	return true;
1796 }
1797 #else
1798 bool dp_ipa_is_mdm_platform(void)
1799 {
1800 	return false;
1801 }
1802 #endif
1803 
1804 /**
1805  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
1806  * @soc: soc
1807  * @nbuf: skb
1808  *
1809  * Return: nbuf if success and otherwise NULL
1810  */
1811 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
1812 {
1813 	uint8_t *rx_pkt_tlvs;
1814 
1815 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1816 		return nbuf;
1817 
1818 	/* WLAN IPA is run-time disabled */
1819 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
1820 		return nbuf;
1821 
1822 	/* Linearize the skb since IPA assumes linear buffer */
1823 	if (qdf_likely(qdf_nbuf_is_frag(nbuf))) {
1824 		if (qdf_nbuf_linearize(nbuf)) {
1825 			dp_err_rl("nbuf linearize failed");
1826 			return NULL;
1827 		}
1828 	}
1829 
1830 	rx_pkt_tlvs = qdf_mem_malloc(RX_PKT_TLVS_LEN);
1831 	if (!rx_pkt_tlvs) {
1832 		dp_err_rl("rx_pkt_tlvs alloc failed");
1833 		return NULL;
1834 	}
1835 
1836 	qdf_mem_copy(rx_pkt_tlvs, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN);
1837 
1838 	/* Pad L3_HEADER_PADDING before ethhdr and after rx_pkt_tlvs */
1839 	qdf_nbuf_push_head(nbuf, L3_HEADER_PADDING);
1840 
1841 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_pkt_tlvs, RX_PKT_TLVS_LEN);
1842 
1843 	/* L3_HEADDING_PADDING is not accounted for real skb length */
1844 	qdf_nbuf_set_len(nbuf, qdf_nbuf_len(nbuf) - L3_HEADER_PADDING);
1845 
1846 	qdf_mem_free(rx_pkt_tlvs);
1847 
1848 	return nbuf;
1849 }
1850 
1851 #endif
1852