xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision abdb33bb009aab537f78f07c738e48e6661fd0e0)
1 /*
2  * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hif.h>
25 #include <htt.h>
26 #include <wdi_event.h>
27 #include <queue.h>
28 #include "dp_types.h"
29 #include "dp_htt.h"
30 #include "dp_tx.h"
31 #include "dp_rx.h"
32 #include "dp_ipa.h"
33 
34 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
35 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
36 
37 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
38  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
39  * This causes back pressure, resulting in a FW crash.
40  * By leaving some entries with no buffer attached, WBM will be able to write
41  * to the ring, and from dumps we can figure out the buffer which is causing
42  * this issue.
43  */
44 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
45 /**
46  *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
47  * @ix0_reg: reo destination ring IX0 value
48  * @ix2_reg: reo destination ring IX2 value
49  * @ix3_reg: reo destination ring IX3 value
50  */
51 struct dp_ipa_reo_remap_record {
52 	uint64_t timestamp;
53 	uint32_t ix0_reg;
54 	uint32_t ix2_reg;
55 	uint32_t ix3_reg;
56 };
57 
58 #define REO_REMAP_HISTORY_SIZE 32
59 
60 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
61 
62 static qdf_atomic_t dp_ipa_reo_remap_history_index;
63 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
64 {
65 	int next = qdf_atomic_inc_return(index);
66 
67 	if (next == REO_REMAP_HISTORY_SIZE)
68 		qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
69 
70 	return next % REO_REMAP_HISTORY_SIZE;
71 }
72 
73 /**
74  * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
75  * @ix0_val: reo destination ring IX0 value
76  * @ix2_val: reo destination ring IX2 value
77  * @ix3_val: reo destination ring IX3 value
78  *
79  * Return: None
80  */
81 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
82 					 uint32_t ix3_val)
83 {
84 	int idx = dp_ipa_reo_remap_record_index_next(
85 				&dp_ipa_reo_remap_history_index);
86 	struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
87 
88 	record->timestamp = qdf_get_log_timestamp();
89 	record->ix0_reg = ix0_val;
90 	record->ix2_reg = ix2_val;
91 	record->ix3_reg = ix3_val;
92 }
93 
94 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
95 						   qdf_nbuf_t nbuf,
96 						   bool create)
97 {
98 	qdf_mem_info_t mem_map_table = {0};
99 
100 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
101 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
102 				 skb_end_pointer(nbuf) - nbuf->data);
103 
104 	if (create)
105 		qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
106 	else
107 		qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
108 
109 	return QDF_STATUS_SUCCESS;
110 }
111 
112 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
113 					     qdf_nbuf_t nbuf,
114 					     bool create)
115 {
116 	struct dp_pdev *pdev;
117 	int i;
118 
119 	for (i = 0; i < soc->pdev_count; i++) {
120 		pdev = soc->pdev_list[i];
121 		if (pdev && pdev->monitor_configured)
122 			return QDF_STATUS_SUCCESS;
123 	}
124 
125 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
126 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
127 		return QDF_STATUS_SUCCESS;
128 
129 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
130 		return QDF_STATUS_SUCCESS;
131 
132 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
133 }
134 
135 #ifdef RX_DESC_MULTI_PAGE_ALLOC
136 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
137 							 struct dp_pdev *pdev,
138 							 bool create)
139 {
140 	struct rx_desc_pool *rx_pool;
141 	uint8_t pdev_id;
142 	uint32_t num_desc, page_id, offset, i;
143 	uint16_t num_desc_per_page;
144 	union dp_rx_desc_list_elem_t *rx_desc_elem;
145 	struct dp_rx_desc *rx_desc;
146 	qdf_nbuf_t nbuf;
147 
148 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
149 		return QDF_STATUS_SUCCESS;
150 
151 	pdev_id = pdev->pdev_id;
152 	rx_pool = &soc->rx_desc_buf[pdev_id];
153 
154 	qdf_spin_lock_bh(&rx_pool->lock);
155 	num_desc = rx_pool->pool_size;
156 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
157 	for (i = 0; i < num_desc; i++) {
158 		page_id = i / num_desc_per_page;
159 		offset = i % num_desc_per_page;
160 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
161 			break;
162 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
163 		rx_desc = &rx_desc_elem->rx_desc;
164 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
165 			continue;
166 		nbuf = rx_desc->nbuf;
167 
168 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
169 	}
170 	qdf_spin_unlock_bh(&rx_pool->lock);
171 
172 	return QDF_STATUS_SUCCESS;
173 }
174 #else
175 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
176 							 struct dp_pdev *pdev,
177 							 bool create)
178 {
179 	struct rx_desc_pool *rx_pool;
180 	uint8_t pdev_id;
181 	qdf_nbuf_t nbuf;
182 	int i;
183 
184 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
185 		return QDF_STATUS_SUCCESS;
186 
187 	pdev_id = pdev->pdev_id;
188 	rx_pool = &soc->rx_desc_buf[pdev_id];
189 
190 	qdf_spin_lock_bh(&rx_pool->lock);
191 	for (i = 0; i < rx_pool->pool_size; i++) {
192 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
193 		    rx_pool->array[i].rx_desc.unmapped)
194 			continue;
195 
196 		nbuf = rx_pool->array[i].rx_desc.nbuf;
197 
198 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
199 	}
200 	qdf_spin_unlock_bh(&rx_pool->lock);
201 
202 	return QDF_STATUS_SUCCESS;
203 }
204 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
205 
206 /**
207  * dp_tx_ipa_uc_detach - Free autonomy TX resources
208  * @soc: data path instance
209  * @pdev: core txrx pdev context
210  *
211  * Free allocated TX buffers with WBM SRNG
212  *
213  * Return: none
214  */
215 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
216 {
217 	int idx;
218 	qdf_nbuf_t nbuf;
219 	struct dp_ipa_resources *ipa_res;
220 
221 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
222 		nbuf = (qdf_nbuf_t)
223 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
224 		if (!nbuf)
225 			continue;
226 
227 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
228 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, false);
229 
230 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
231 		qdf_nbuf_free(nbuf);
232 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
233 						(void *)NULL;
234 	}
235 
236 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
237 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
238 
239 	ipa_res = &pdev->ipa_resource;
240 	iounmap(ipa_res->tx_comp_doorbell_vaddr);
241 
242 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
243 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
244 }
245 
246 /**
247  * dp_rx_ipa_uc_detach - free autonomy RX resources
248  * @soc: data path instance
249  * @pdev: core txrx pdev context
250  *
251  * This function will detach DP RX into main device context
252  * will free DP Rx resources.
253  *
254  * Return: none
255  */
256 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
257 {
258 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
259 
260 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
261 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
262 }
263 
264 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
265 {
266 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
267 		return QDF_STATUS_SUCCESS;
268 
269 	/* TX resource detach */
270 	dp_tx_ipa_uc_detach(soc, pdev);
271 
272 	/* RX resource detach */
273 	dp_rx_ipa_uc_detach(soc, pdev);
274 
275 	return QDF_STATUS_SUCCESS;	/* success */
276 }
277 
278 /**
279  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
280  * @soc: data path instance
281  * @pdev: Physical device handle
282  *
283  * Allocate TX buffer from non-cacheable memory
284  * Attache allocated TX buffers with WBM SRNG
285  *
286  * Return: int
287  */
288 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
289 {
290 	uint32_t tx_buffer_count;
291 	uint32_t ring_base_align = 8;
292 	qdf_dma_addr_t buffer_paddr;
293 	struct hal_srng *wbm_srng = (struct hal_srng *)
294 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
295 	struct hal_srng_params srng_params;
296 	uint32_t paddr_lo;
297 	uint32_t paddr_hi;
298 	void *ring_entry;
299 	int num_entries;
300 	qdf_nbuf_t nbuf;
301 	int retval = QDF_STATUS_SUCCESS;
302 	int max_alloc_count = 0;
303 
304 	/*
305 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
306 	 * unsigned int uc_tx_buf_sz =
307 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
308 	 */
309 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
310 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
311 
312 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
313 			    &srng_params);
314 	num_entries = srng_params.num_entries;
315 
316 	max_alloc_count =
317 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
318 	if (max_alloc_count <= 0) {
319 		dp_err("incorrect value for buffer count %u", max_alloc_count);
320 		return -EINVAL;
321 	}
322 
323 	dp_info("requested %d buffers to be posted to wbm ring",
324 		max_alloc_count);
325 
326 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
327 		qdf_mem_malloc(num_entries *
328 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
329 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
330 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
331 		return -ENOMEM;
332 	}
333 
334 	hal_srng_access_start_unlocked(soc->hal_soc,
335 				       hal_srng_to_hal_ring_handle(wbm_srng));
336 
337 	/*
338 	 * Allocate Tx buffers as many as possible.
339 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
340 	 * Populate Tx buffers into WBM2IPA ring
341 	 * This initial buffer population will simulate H/W as source ring,
342 	 * and update HP
343 	 */
344 	for (tx_buffer_count = 0;
345 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
346 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
347 		if (!nbuf)
348 			break;
349 
350 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
351 				hal_srng_to_hal_ring_handle(wbm_srng));
352 		if (!ring_entry) {
353 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
354 				  "%s: Failed to get WBM ring entry",
355 				  __func__);
356 			qdf_nbuf_free(nbuf);
357 			break;
358 		}
359 
360 		qdf_nbuf_map_single(soc->osdev, nbuf,
361 				    QDF_DMA_BIDIRECTIONAL);
362 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
363 
364 		paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
365 		paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
366 		HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
367 		HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
368 		HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
369 				      HAL_WBM_SW0_BM_ID));
370 
371 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
372 			= (void *)nbuf;
373 
374 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
375 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, true);
376 	}
377 
378 	hal_srng_access_end_unlocked(soc->hal_soc,
379 				     hal_srng_to_hal_ring_handle(wbm_srng));
380 
381 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
382 
383 	if (tx_buffer_count) {
384 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
385 	} else {
386 		dp_err("No IPA WDI TX buffer allocated!");
387 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
388 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
389 		retval = -ENOMEM;
390 	}
391 
392 	return retval;
393 }
394 
395 /**
396  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
397  * @soc: data path instance
398  * @pdev: core txrx pdev context
399  *
400  * This function will attach a DP RX instance into the main
401  * device (SOC) context.
402  *
403  * Return: QDF_STATUS_SUCCESS: success
404  *         QDF_STATUS_E_RESOURCES: Error return
405  */
406 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
407 {
408 	return QDF_STATUS_SUCCESS;
409 }
410 
411 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
412 {
413 	int error;
414 
415 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
416 		return QDF_STATUS_SUCCESS;
417 
418 	/* TX resource attach */
419 	error = dp_tx_ipa_uc_attach(soc, pdev);
420 	if (error) {
421 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
422 			  "%s: DP IPA UC TX attach fail code %d",
423 			  __func__, error);
424 		return error;
425 	}
426 
427 	/* RX resource attach */
428 	error = dp_rx_ipa_uc_attach(soc, pdev);
429 	if (error) {
430 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
431 			  "%s: DP IPA UC RX attach fail code %d",
432 			  __func__, error);
433 		dp_tx_ipa_uc_detach(soc, pdev);
434 		return error;
435 	}
436 
437 	return QDF_STATUS_SUCCESS;	/* success */
438 }
439 
440 /*
441  * dp_ipa_ring_resource_setup() - setup IPA ring resources
442  * @soc: data path SoC handle
443  *
444  * Return: none
445  */
446 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
447 		struct dp_pdev *pdev)
448 {
449 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
450 	struct hal_srng *hal_srng;
451 	struct hal_srng_params srng_params;
452 	qdf_dma_addr_t hp_addr;
453 	unsigned long addr_offset, dev_base_paddr;
454 	uint32_t ix0;
455 
456 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
457 		return QDF_STATUS_SUCCESS;
458 
459 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
460 	hal_srng = (struct hal_srng *)
461 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
462 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
463 			    hal_srng_to_hal_ring_handle(hal_srng),
464 			    &srng_params);
465 
466 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
467 		srng_params.ring_base_paddr;
468 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
469 		srng_params.ring_base_vaddr;
470 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
471 		(srng_params.num_entries * srng_params.entry_size) << 2;
472 	/*
473 	 * For the register backed memory addresses, use the scn->mem_pa to
474 	 * calculate the physical address of the shadow registers
475 	 */
476 	dev_base_paddr =
477 		(unsigned long)
478 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
479 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
480 		      (unsigned long)(hal_soc->dev_base_addr);
481 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
482 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
483 
484 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
485 		(unsigned int)addr_offset,
486 		(unsigned int)dev_base_paddr,
487 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
488 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
489 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
490 		srng_params.num_entries,
491 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
492 
493 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
494 	hal_srng = (struct hal_srng *)
495 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
496 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
497 			    hal_srng_to_hal_ring_handle(hal_srng),
498 			    &srng_params);
499 
500 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
501 						srng_params.ring_base_paddr;
502 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
503 						srng_params.ring_base_vaddr;
504 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
505 		(srng_params.num_entries * srng_params.entry_size) << 2;
506 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
507 		      (unsigned long)(hal_soc->dev_base_addr);
508 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
509 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
510 
511 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
512 		(unsigned int)addr_offset,
513 		(unsigned int)dev_base_paddr,
514 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
515 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
516 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
517 		srng_params.num_entries,
518 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
519 
520 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
521 	hal_srng = (struct hal_srng *)
522 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
523 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
524 			    hal_srng_to_hal_ring_handle(hal_srng),
525 			    &srng_params);
526 
527 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
528 						srng_params.ring_base_paddr;
529 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
530 						srng_params.ring_base_vaddr;
531 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
532 		(srng_params.num_entries * srng_params.entry_size) << 2;
533 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
534 		      (unsigned long)(hal_soc->dev_base_addr);
535 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
536 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
537 
538 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
539 		(unsigned int)addr_offset,
540 		(unsigned int)dev_base_paddr,
541 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
542 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
543 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
544 		srng_params.num_entries,
545 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
546 
547 	hal_srng = (struct hal_srng *)
548 			pdev->rx_refill_buf_ring2.hal_srng;
549 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
550 			    hal_srng_to_hal_ring_handle(hal_srng),
551 			    &srng_params);
552 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
553 		srng_params.ring_base_paddr;
554 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
555 		srng_params.ring_base_vaddr;
556 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
557 		(srng_params.num_entries * srng_params.entry_size) << 2;
558 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
559 				       hal_srng_to_hal_ring_handle(hal_srng));
560 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
561 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
562 
563 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
564 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
565 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
566 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
567 		srng_params.num_entries,
568 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
569 
570 	/*
571 	 * Set DEST_RING_MAPPING_4 to SW2 as default value for
572 	 * DESTINATION_RING_CTRL_IX_0.
573 	 */
574 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
575 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
576 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
577 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
578 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
579 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
580 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
581 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
582 
583 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
584 
585 	return 0;
586 }
587 
588 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
589 					     qdf_shared_mem_t *shared_mem,
590 					     void *cpu_addr,
591 					     qdf_dma_addr_t dma_addr,
592 					     uint32_t size)
593 {
594 	qdf_dma_addr_t paddr;
595 	int ret;
596 
597 	shared_mem->vaddr = cpu_addr;
598 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
599 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
600 
601 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
602 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
603 
604 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
605 				      shared_mem->vaddr, dma_addr, size);
606 	if (ret) {
607 		dp_err("Unable to get DMA sgtable");
608 		return QDF_STATUS_E_NOMEM;
609 	}
610 
611 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
612 
613 	return QDF_STATUS_SUCCESS;
614 }
615 
616 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
617 {
618 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
619 	struct dp_pdev *pdev =
620 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
621 	struct dp_ipa_resources *ipa_res;
622 
623 	if (!pdev) {
624 		dp_err("%s invalid instance", __func__);
625 		return QDF_STATUS_E_FAILURE;
626 	}
627 
628 	ipa_res = &pdev->ipa_resource;
629 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
630 		return QDF_STATUS_SUCCESS;
631 
632 	ipa_res->tx_num_alloc_buffer =
633 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
634 
635 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
636 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
637 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
638 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
639 
640 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
641 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
642 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
643 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
644 
645 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
646 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
647 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
648 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
649 
650 	dp_ipa_get_shared_mem_info(
651 			soc->osdev, &ipa_res->rx_refill_ring,
652 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
653 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
654 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
655 
656 	if (!qdf_mem_get_dma_addr(soc->osdev,
657 				  &ipa_res->tx_comp_ring.mem_info) ||
658 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info))
659 		return QDF_STATUS_E_FAILURE;
660 
661 	return QDF_STATUS_SUCCESS;
662 }
663 
664 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
665 {
666 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
667 	struct dp_pdev *pdev =
668 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
669 	struct dp_ipa_resources *ipa_res;
670 	struct hal_srng *wbm_srng = (struct hal_srng *)
671 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
672 	struct hal_srng *reo_srng = (struct hal_srng *)
673 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
674 	uint32_t tx_comp_doorbell_dmaaddr;
675 	uint32_t rx_ready_doorbell_dmaaddr;
676 
677 	if (!pdev) {
678 		dp_err("%s invalid instance", __func__);
679 		return QDF_STATUS_E_FAILURE;
680 	}
681 
682 	ipa_res = &pdev->ipa_resource;
683 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
684 		return QDF_STATUS_SUCCESS;
685 
686 	ipa_res->tx_comp_doorbell_vaddr =
687 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
688 
689 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
690 		pld_smmu_map(soc->osdev->dev, ipa_res->tx_comp_doorbell_paddr,
691 			     &tx_comp_doorbell_dmaaddr, sizeof(uint32_t));
692 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
693 
694 		pld_smmu_map(soc->osdev->dev, ipa_res->rx_ready_doorbell_paddr,
695 			     &rx_ready_doorbell_dmaaddr, sizeof(uint32_t));
696 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
697 	}
698 
699 	hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
700 
701 	dp_info("paddr %pK vaddr %pK",
702 		(void *)ipa_res->tx_comp_doorbell_paddr,
703 		(void *)ipa_res->tx_comp_doorbell_vaddr);
704 
705 	/*
706 	 * For RX, REO module on Napier/Hastings does reordering on incoming
707 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
708 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
709 	 * to IPA.
710 	 * Set the doorbell addr for the REO ring.
711 	 */
712 	hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
713 	return QDF_STATUS_SUCCESS;
714 }
715 
716 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
717 			      uint8_t *op_msg)
718 {
719 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
720 	struct dp_pdev *pdev =
721 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
722 
723 	if (!pdev) {
724 		dp_err("%s invalid instance", __func__);
725 		return QDF_STATUS_E_FAILURE;
726 	}
727 
728 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
729 		return QDF_STATUS_SUCCESS;
730 
731 	if (pdev->ipa_uc_op_cb) {
732 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
733 	} else {
734 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
735 		    "%s: IPA callback function is not registered", __func__);
736 		qdf_mem_free(op_msg);
737 		return QDF_STATUS_E_FAILURE;
738 	}
739 
740 	return QDF_STATUS_SUCCESS;
741 }
742 
743 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
744 				 ipa_uc_op_cb_type op_cb,
745 				 void *usr_ctxt)
746 {
747 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
748 	struct dp_pdev *pdev =
749 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
750 
751 	if (!pdev) {
752 		dp_err("%s invalid instance", __func__);
753 		return QDF_STATUS_E_FAILURE;
754 	}
755 
756 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
757 		return QDF_STATUS_SUCCESS;
758 
759 	pdev->ipa_uc_op_cb = op_cb;
760 	pdev->usr_ctxt = usr_ctxt;
761 
762 	return QDF_STATUS_SUCCESS;
763 }
764 
765 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
766 {
767 	/* TBD */
768 	return QDF_STATUS_SUCCESS;
769 }
770 
771 /**
772  * dp_tx_send_ipa_data_frame() - send IPA data frame
773  * @soc_hdl: datapath soc handle
774  * @vdev_id: id of the virtual device
775  * @skb: skb to transmit
776  *
777  * Return: skb/ NULL is for success
778  */
779 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
780 				     qdf_nbuf_t skb)
781 {
782 	qdf_nbuf_t ret;
783 
784 	/* Terminate the (single-element) list of tx frames */
785 	qdf_nbuf_set_next(skb, NULL);
786 	ret = dp_tx_send(soc_hdl, vdev_id, skb);
787 	if (ret) {
788 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
789 			  "%s: Failed to tx", __func__);
790 		return ret;
791 	}
792 
793 	return NULL;
794 }
795 
796 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
797 {
798 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
799 	struct dp_pdev *pdev =
800 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
801 	uint32_t ix0;
802 	uint32_t ix2;
803 
804 	if (!pdev) {
805 		dp_err("%s invalid instance", __func__);
806 		return QDF_STATUS_E_FAILURE;
807 	}
808 
809 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
810 		return QDF_STATUS_SUCCESS;
811 
812 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
813 		return QDF_STATUS_E_AGAIN;
814 
815 	/* Call HAL API to remap REO rings to REO2IPA ring */
816 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
817 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) |
818 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 2) |
819 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) |
820 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) |
821 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
822 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
823 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
824 
825 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
826 		ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) |
827 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) |
828 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
829 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
830 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) |
831 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
832 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) |
833 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
834 
835 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
836 					   &ix2, &ix2);
837 		dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
838 	} else {
839 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
840 					   NULL, NULL);
841 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
842 	}
843 
844 	return QDF_STATUS_SUCCESS;
845 }
846 
847 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
848 {
849 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
850 	struct dp_pdev *pdev =
851 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
852 	uint32_t ix0;
853 	uint32_t ix2;
854 	uint32_t ix3;
855 
856 	if (!pdev) {
857 		dp_err("%s invalid instance", __func__);
858 		return QDF_STATUS_E_FAILURE;
859 	}
860 
861 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
862 		return QDF_STATUS_SUCCESS;
863 
864 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
865 		return QDF_STATUS_E_AGAIN;
866 
867 	/* Call HAL API to remap REO rings to REO2IPA ring */
868 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
869 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
870 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
871 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
872 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
873 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
874 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
875 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
876 
877 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
878 		dp_reo_remap_config(soc, &ix2, &ix3);
879 
880 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
881 					   &ix2, &ix3);
882 		dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
883 	} else {
884 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
885 					   NULL, NULL);
886 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
887 	}
888 
889 	return QDF_STATUS_SUCCESS;
890 }
891 
892 /* This should be configurable per H/W configuration enable status */
893 #define L3_HEADER_PADDING	2
894 
895 #ifdef CONFIG_IPA_WDI_UNIFIED_API
896 
897 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
898 static inline void dp_setup_mcc_sys_pipes(
899 		qdf_ipa_sys_connect_params_t *sys_in,
900 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
901 {
902 	/* Setup MCC sys pipe */
903 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
904 			DP_IPA_MAX_IFACE;
905 	for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
906 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
907 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
908 }
909 #else
910 static inline void dp_setup_mcc_sys_pipes(
911 		qdf_ipa_sys_connect_params_t *sys_in,
912 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
913 {
914 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
915 }
916 #endif
917 
918 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
919 				 struct dp_ipa_resources *ipa_res,
920 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
921 				 bool over_gsi)
922 {
923 	struct tcl_data_cmd *tcl_desc_ptr;
924 	uint8_t *desc_addr;
925 	uint32_t desc_size;
926 
927 	if (over_gsi)
928 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
929 	else
930 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
931 
932 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
933 		qdf_mem_get_dma_addr(soc->osdev,
934 				     &ipa_res->tx_comp_ring.mem_info);
935 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
936 		qdf_mem_get_dma_size(soc->osdev,
937 				     &ipa_res->tx_comp_ring.mem_info);
938 
939 	/* WBM Tail Pointer Address */
940 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
941 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
942 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
943 
944 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
945 		qdf_mem_get_dma_addr(soc->osdev,
946 				     &ipa_res->tx_ring.mem_info);
947 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
948 		qdf_mem_get_dma_size(soc->osdev,
949 				     &ipa_res->tx_ring.mem_info);
950 
951 	/* TCL Head Pointer Address */
952 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
953 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
954 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
955 
956 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
957 		ipa_res->tx_num_alloc_buffer;
958 
959 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
960 
961 	/* Preprogram TCL descriptor */
962 	desc_addr =
963 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
964 	desc_size = sizeof(struct tcl_data_cmd);
965 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
966 	tcl_desc_ptr = (struct tcl_data_cmd *)
967 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
968 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
969 		HAL_RX_BUF_RBM_SW2_BM;
970 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
971 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
972 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
973 }
974 
975 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
976 				 struct dp_ipa_resources *ipa_res,
977 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
978 				 bool over_gsi)
979 {
980 	if (over_gsi)
981 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
982 					IPA_CLIENT_WLAN2_PROD;
983 	else
984 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
985 					IPA_CLIENT_WLAN1_PROD;
986 
987 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
988 		qdf_mem_get_dma_addr(soc->osdev,
989 				     &ipa_res->rx_rdy_ring.mem_info);
990 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
991 		qdf_mem_get_dma_size(soc->osdev,
992 				     &ipa_res->rx_rdy_ring.mem_info);
993 
994 	/* REO Tail Pointer Address */
995 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
996 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
997 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
998 
999 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1000 		qdf_mem_get_dma_addr(soc->osdev,
1001 				     &ipa_res->rx_refill_ring.mem_info);
1002 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1003 		qdf_mem_get_dma_size(soc->osdev,
1004 				     &ipa_res->rx_refill_ring.mem_info);
1005 
1006 	/* FW Head Pointer Address */
1007 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1008 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1009 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
1010 
1011 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
1012 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1013 }
1014 
1015 static void
1016 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
1017 			  struct dp_ipa_resources *ipa_res,
1018 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
1019 			  bool over_gsi)
1020 {
1021 	struct tcl_data_cmd *tcl_desc_ptr;
1022 	uint8_t *desc_addr;
1023 	uint32_t desc_size;
1024 
1025 	if (over_gsi)
1026 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1027 			IPA_CLIENT_WLAN2_CONS;
1028 	else
1029 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1030 			IPA_CLIENT_WLAN1_CONS;
1031 
1032 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
1033 		     &ipa_res->tx_comp_ring.sgtable,
1034 		     sizeof(sgtable_t));
1035 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
1036 		qdf_mem_get_dma_size(soc->osdev,
1037 				     &ipa_res->tx_comp_ring.mem_info);
1038 	/* WBM Tail Pointer Address */
1039 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
1040 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1041 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
1042 
1043 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
1044 		     &ipa_res->tx_ring.sgtable,
1045 		     sizeof(sgtable_t));
1046 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
1047 		qdf_mem_get_dma_size(soc->osdev,
1048 				     &ipa_res->tx_ring.mem_info);
1049 	/* TCL Head Pointer Address */
1050 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
1051 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1052 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
1053 
1054 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
1055 		ipa_res->tx_num_alloc_buffer;
1056 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
1057 
1058 	/* Preprogram TCL descriptor */
1059 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
1060 			tx_smmu);
1061 	desc_size = sizeof(struct tcl_data_cmd);
1062 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1063 	tcl_desc_ptr = (struct tcl_data_cmd *)
1064 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
1065 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1066 		HAL_RX_BUF_RBM_SW2_BM;
1067 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1068 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1069 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1070 }
1071 
1072 static void
1073 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
1074 			  struct dp_ipa_resources *ipa_res,
1075 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
1076 			  bool over_gsi)
1077 {
1078 	if (over_gsi)
1079 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1080 					IPA_CLIENT_WLAN2_PROD;
1081 	else
1082 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1083 					IPA_CLIENT_WLAN1_PROD;
1084 
1085 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
1086 		     &ipa_res->rx_rdy_ring.sgtable,
1087 		     sizeof(sgtable_t));
1088 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
1089 		qdf_mem_get_dma_size(soc->osdev,
1090 				     &ipa_res->rx_rdy_ring.mem_info);
1091 	/* REO Tail Pointer Address */
1092 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
1093 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1094 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
1095 
1096 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
1097 		     &ipa_res->rx_refill_ring.sgtable,
1098 		     sizeof(sgtable_t));
1099 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
1100 		qdf_mem_get_dma_size(soc->osdev,
1101 				     &ipa_res->rx_refill_ring.mem_info);
1102 
1103 	/* FW Head Pointer Address */
1104 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
1105 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1106 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
1107 
1108 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
1109 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1110 }
1111 
1112 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1113 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1114 			void *ipa_wdi_meter_notifier_cb,
1115 			uint32_t ipa_desc_size, void *ipa_priv,
1116 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1117 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
1118 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
1119 {
1120 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1121 	struct dp_pdev *pdev =
1122 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1123 	struct dp_ipa_resources *ipa_res;
1124 	qdf_ipa_ep_cfg_t *tx_cfg;
1125 	qdf_ipa_ep_cfg_t *rx_cfg;
1126 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
1127 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
1128 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
1129 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu;
1130 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1131 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1132 	int ret;
1133 
1134 	if (!pdev) {
1135 		dp_err("%s invalid instance", __func__);
1136 		return QDF_STATUS_E_FAILURE;
1137 	}
1138 
1139 	ipa_res = &pdev->ipa_resource;
1140 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1141 		return QDF_STATUS_SUCCESS;
1142 
1143 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1144 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1145 
1146 	if (is_smmu_enabled)
1147 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
1148 	else
1149 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
1150 
1151 	dp_setup_mcc_sys_pipes(sys_in, &pipe_in);
1152 
1153 	/* TX PIPE */
1154 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1155 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in);
1156 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
1157 	} else {
1158 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1159 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
1160 	}
1161 
1162 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
1163 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1164 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
1165 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
1166 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
1167 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
1168 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
1169 
1170 	/**
1171 	 * Transfer Ring: WBM Ring
1172 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1173 	 * Event Ring: TCL ring
1174 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1175 	 */
1176 	if (is_smmu_enabled)
1177 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
1178 	else
1179 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
1180 
1181 	/* RX PIPE */
1182 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1183 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in);
1184 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
1185 	} else {
1186 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1187 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
1188 	}
1189 
1190 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
1191 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1192 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
1193 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
1194 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
1195 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
1196 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
1197 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
1198 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
1199 
1200 	/**
1201 	 * Transfer Ring: REO Ring
1202 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1203 	 * Event Ring: FW ring
1204 	 * Event Ring Doorbell PA: FW Head Pointer Address
1205 	 */
1206 	if (is_smmu_enabled)
1207 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
1208 	else
1209 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
1210 
1211 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1212 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1213 
1214 	/* Connect WDI IPA PIPEs */
1215 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1216 
1217 	if (ret) {
1218 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1219 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1220 			  __func__, ret);
1221 		return QDF_STATUS_E_FAILURE;
1222 	}
1223 
1224 	/* IPA uC Doorbell registers */
1225 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
1226 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1227 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1228 
1229 	ipa_res->tx_comp_doorbell_paddr =
1230 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1231 	ipa_res->rx_ready_doorbell_paddr =
1232 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1233 
1234 	soc->ipa_first_tx_db_access = true;
1235 
1236 	return QDF_STATUS_SUCCESS;
1237 }
1238 
1239 /**
1240  * dp_ipa_setup_iface() - Setup IPA header and register interface
1241  * @ifname: Interface name
1242  * @mac_addr: Interface MAC address
1243  * @prod_client: IPA prod client type
1244  * @cons_client: IPA cons client type
1245  * @session_id: Session ID
1246  * @is_ipv6_enabled: Is IPV6 enabled or not
1247  *
1248  * Return: QDF_STATUS
1249  */
1250 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1251 			      qdf_ipa_client_type_t prod_client,
1252 			      qdf_ipa_client_type_t cons_client,
1253 			      uint8_t session_id, bool is_ipv6_enabled)
1254 {
1255 	qdf_ipa_wdi_reg_intf_in_params_t in;
1256 	qdf_ipa_wdi_hdr_info_t hdr_info;
1257 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1258 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1259 	int ret = -EINVAL;
1260 
1261 	dp_debug("Add Partial hdr: %s, %pM", ifname, mac_addr);
1262 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1263 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1264 
1265 	/* IPV4 header */
1266 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1267 
1268 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1269 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1270 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1271 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1272 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1273 
1274 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1275 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1276 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1277 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
1278 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1279 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1280 		htonl(session_id << 16);
1281 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1282 
1283 	/* IPV6 header */
1284 	if (is_ipv6_enabled) {
1285 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1286 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1287 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1288 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1289 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1290 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1291 	}
1292 
1293 	dp_debug("registering for session_id: %u", session_id);
1294 
1295 	ret = qdf_ipa_wdi_reg_intf(&in);
1296 
1297 	if (ret) {
1298 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1299 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1300 		    __func__, ret);
1301 		return QDF_STATUS_E_FAILURE;
1302 	}
1303 
1304 	return QDF_STATUS_SUCCESS;
1305 }
1306 
1307 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1308 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1309 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1310 			void *ipa_wdi_meter_notifier_cb,
1311 			uint32_t ipa_desc_size, void *ipa_priv,
1312 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1313 			uint32_t *rx_pipe_handle)
1314 {
1315 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1316 	struct dp_pdev *pdev =
1317 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1318 	struct dp_ipa_resources *ipa_res;
1319 	qdf_ipa_wdi_pipe_setup_info_t *tx;
1320 	qdf_ipa_wdi_pipe_setup_info_t *rx;
1321 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1322 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1323 	struct tcl_data_cmd *tcl_desc_ptr;
1324 	uint8_t *desc_addr;
1325 	uint32_t desc_size;
1326 	int ret;
1327 
1328 	if (!pdev) {
1329 		dp_err("%s invalid instance", __func__);
1330 		return QDF_STATUS_E_FAILURE;
1331 	}
1332 
1333 	ipa_res = &pdev->ipa_resource;
1334 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1335 		return QDF_STATUS_SUCCESS;
1336 
1337 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1338 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1339 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1340 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1341 
1342 	/* TX PIPE */
1343 	/**
1344 	 * Transfer Ring: WBM Ring
1345 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1346 	 * Event Ring: TCL ring
1347 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1348 	 */
1349 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1350 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
1351 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1352 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
1353 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
1354 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
1355 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
1356 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
1357 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1358 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1359 		ipa_res->tx_comp_ring_base_paddr;
1360 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1361 		ipa_res->tx_comp_ring_size;
1362 	/* WBM Tail Pointer Address */
1363 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1364 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1365 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1366 		ipa_res->tx_ring_base_paddr;
1367 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
1368 	/* TCL Head Pointer Address */
1369 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1370 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1371 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1372 		ipa_res->tx_num_alloc_buffer;
1373 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1374 
1375 	/* Preprogram TCL descriptor */
1376 	desc_addr =
1377 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1378 	desc_size = sizeof(struct tcl_data_cmd);
1379 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1380 	tcl_desc_ptr = (struct tcl_data_cmd *)
1381 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1382 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1383 						HAL_RX_BUF_RBM_SW2_BM;
1384 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1385 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1386 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1387 
1388 	/* RX PIPE */
1389 	/**
1390 	 * Transfer Ring: REO Ring
1391 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1392 	 * Event Ring: FW ring
1393 	 * Event Ring Doorbell PA: FW Head Pointer Address
1394 	 */
1395 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1396 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
1397 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1398 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
1399 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
1400 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
1401 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
1402 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
1403 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
1404 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
1405 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
1406 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1407 						ipa_res->rx_rdy_ring_base_paddr;
1408 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1409 						ipa_res->rx_rdy_ring_size;
1410 	/* REO Tail Pointer Address */
1411 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1412 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1413 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1414 					ipa_res->rx_refill_ring_base_paddr;
1415 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1416 						ipa_res->rx_refill_ring_size;
1417 	/* FW Head Pointer Address */
1418 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1419 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1420 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
1421 						L3_HEADER_PADDING;
1422 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1423 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1424 
1425 	/* Connect WDI IPA PIPE */
1426 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1427 	if (ret) {
1428 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1429 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1430 			  __func__, ret);
1431 		return QDF_STATUS_E_FAILURE;
1432 	}
1433 
1434 	/* IPA uC Doorbell registers */
1435 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1436 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
1437 		  __func__,
1438 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1439 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1440 
1441 	ipa_res->tx_comp_doorbell_paddr =
1442 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1443 	ipa_res->tx_comp_doorbell_vaddr =
1444 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
1445 	ipa_res->rx_ready_doorbell_paddr =
1446 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1447 
1448 	soc->ipa_first_tx_db_access = true;
1449 
1450 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1451 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1452 		  __func__,
1453 		  "transfer_ring_base_pa",
1454 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
1455 		  "transfer_ring_size",
1456 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
1457 		  "transfer_ring_doorbell_pa",
1458 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
1459 		  "event_ring_base_pa",
1460 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
1461 		  "event_ring_size",
1462 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
1463 		  "event_ring_doorbell_pa",
1464 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
1465 		  "num_pkt_buffers",
1466 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
1467 		  "tx_comp_doorbell_paddr",
1468 		  (void *)ipa_res->tx_comp_doorbell_paddr);
1469 
1470 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1471 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1472 		  __func__,
1473 		  "transfer_ring_base_pa",
1474 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
1475 		  "transfer_ring_size",
1476 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
1477 		  "transfer_ring_doorbell_pa",
1478 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
1479 		  "event_ring_base_pa",
1480 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
1481 		  "event_ring_size",
1482 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
1483 		  "event_ring_doorbell_pa",
1484 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
1485 		  "num_pkt_buffers",
1486 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
1487 		  "tx_comp_doorbell_paddr",
1488 		  (void *)ipa_res->rx_ready_doorbell_paddr);
1489 
1490 	return QDF_STATUS_SUCCESS;
1491 }
1492 
1493 /**
1494  * dp_ipa_setup_iface() - Setup IPA header and register interface
1495  * @ifname: Interface name
1496  * @mac_addr: Interface MAC address
1497  * @prod_client: IPA prod client type
1498  * @cons_client: IPA cons client type
1499  * @session_id: Session ID
1500  * @is_ipv6_enabled: Is IPV6 enabled or not
1501  *
1502  * Return: QDF_STATUS
1503  */
1504 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1505 			      qdf_ipa_client_type_t prod_client,
1506 			      qdf_ipa_client_type_t cons_client,
1507 			      uint8_t session_id, bool is_ipv6_enabled)
1508 {
1509 	qdf_ipa_wdi_reg_intf_in_params_t in;
1510 	qdf_ipa_wdi_hdr_info_t hdr_info;
1511 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1512 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1513 	int ret = -EINVAL;
1514 
1515 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1516 		  "%s: Add Partial hdr: %s, %pM",
1517 		  __func__, ifname, mac_addr);
1518 
1519 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1520 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1521 
1522 	/* IPV4 header */
1523 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1524 
1525 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1526 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1527 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1528 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1529 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1530 
1531 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1532 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1533 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1534 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1535 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1536 		htonl(session_id << 16);
1537 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1538 
1539 	/* IPV6 header */
1540 	if (is_ipv6_enabled) {
1541 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1542 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1543 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1544 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1545 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1546 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1547 	}
1548 
1549 	ret = qdf_ipa_wdi_reg_intf(&in);
1550 	if (ret) {
1551 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1552 		       ret);
1553 		return QDF_STATUS_E_FAILURE;
1554 	}
1555 
1556 	return QDF_STATUS_SUCCESS;
1557 }
1558 
1559 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1560 
1561 /**
1562  * dp_ipa_cleanup() - Disconnect IPA pipes
1563  * @tx_pipe_handle: Tx pipe handle
1564  * @rx_pipe_handle: Rx pipe handle
1565  *
1566  * Return: QDF_STATUS
1567  */
1568 QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
1569 {
1570 	int ret;
1571 
1572 	ret = qdf_ipa_wdi_disconn_pipes();
1573 	if (ret) {
1574 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
1575 		       ret);
1576 		return QDF_STATUS_E_FAILURE;
1577 	}
1578 
1579 	return QDF_STATUS_SUCCESS;
1580 }
1581 
1582 /**
1583  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
1584  * @ifname: Interface name
1585  * @is_ipv6_enabled: Is IPV6 enabled or not
1586  *
1587  * Return: QDF_STATUS
1588  */
1589 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
1590 {
1591 	int ret;
1592 
1593 	ret = qdf_ipa_wdi_dereg_intf(ifname);
1594 	if (ret) {
1595 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1596 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
1597 			  __func__, ret);
1598 		return QDF_STATUS_E_FAILURE;
1599 	}
1600 
1601 	return QDF_STATUS_SUCCESS;
1602 }
1603 
1604 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1605 {
1606 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1607 	struct dp_pdev *pdev =
1608 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1609 	struct hal_srng *wbm_srng = (struct hal_srng *)
1610 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1611 	struct dp_ipa_resources *ipa_res;
1612 	QDF_STATUS result;
1613 
1614 	if (!pdev) {
1615 		dp_err("%s invalid instance", __func__);
1616 		return QDF_STATUS_E_FAILURE;
1617 	}
1618 
1619 	ipa_res = &pdev->ipa_resource;
1620 
1621 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
1622 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
1623 
1624 	result = qdf_ipa_wdi_enable_pipes();
1625 	if (result) {
1626 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1627 			  "%s: Enable WDI PIPE fail, code %d",
1628 			  __func__, result);
1629 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1630 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1631 		return QDF_STATUS_E_FAILURE;
1632 	}
1633 
1634 	if (soc->ipa_first_tx_db_access) {
1635 		hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
1636 		soc->ipa_first_tx_db_access = false;
1637 	}
1638 
1639 	return QDF_STATUS_SUCCESS;
1640 }
1641 
1642 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1643 {
1644 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1645 	struct dp_pdev *pdev =
1646 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1647 	QDF_STATUS result;
1648 
1649 	if (!pdev) {
1650 		dp_err("%s invalid instance", __func__);
1651 		return QDF_STATUS_E_FAILURE;
1652 	}
1653 
1654 	result = qdf_ipa_wdi_disable_pipes();
1655 	if (result) {
1656 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1657 			  "%s: Disable WDI PIPE fail, code %d",
1658 			  __func__, result);
1659 		qdf_assert_always(0);
1660 		return QDF_STATUS_E_FAILURE;
1661 	}
1662 
1663 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1664 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1665 
1666 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
1667 }
1668 
1669 /**
1670  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
1671  * @client: Client type
1672  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
1673  *
1674  * Return: QDF_STATUS
1675  */
1676 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
1677 {
1678 	qdf_ipa_wdi_perf_profile_t profile;
1679 	QDF_STATUS result;
1680 
1681 	profile.client = client;
1682 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
1683 
1684 	result = qdf_ipa_wdi_set_perf_profile(&profile);
1685 	if (result) {
1686 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1687 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
1688 			  __func__, result);
1689 		return QDF_STATUS_E_FAILURE;
1690 	}
1691 
1692 	return QDF_STATUS_SUCCESS;
1693 }
1694 
1695 /**
1696  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
1697  * @pdev: pdev
1698  * @vdev: vdev
1699  * @nbuf: skb
1700  *
1701  * Return: nbuf if TX fails and NULL if TX succeeds
1702  */
1703 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
1704 				       struct dp_vdev *vdev,
1705 				       qdf_nbuf_t nbuf)
1706 {
1707 	struct dp_peer *vdev_peer;
1708 	uint16_t len;
1709 
1710 	vdev_peer = vdev->vap_bss_peer;
1711 	if (qdf_unlikely(!vdev_peer))
1712 		return nbuf;
1713 
1714 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1715 	len = qdf_nbuf_len(nbuf);
1716 
1717 	if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
1718 		DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
1719 		return nbuf;
1720 	}
1721 
1722 	DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
1723 	return NULL;
1724 }
1725 
1726 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1727 			    qdf_nbuf_t nbuf, bool *fwd_success)
1728 {
1729 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1730 	struct dp_vdev *vdev =
1731 		dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1732 	struct dp_pdev *pdev;
1733 	struct dp_peer *da_peer;
1734 	struct dp_peer *sa_peer;
1735 	qdf_nbuf_t nbuf_copy;
1736 	uint8_t da_is_bcmc;
1737 	struct ethhdr *eh;
1738 
1739 	*fwd_success = false; /* set default as failure */
1740 
1741 	/*
1742 	 * WDI 3.0 skb->cb[] info from IPA driver
1743 	 * skb->cb[0] = vdev_id
1744 	 * skb->cb[1].bit#1 = da_is_bcmc
1745 	 */
1746 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
1747 
1748 	if (qdf_unlikely(!vdev))
1749 		return false;
1750 
1751 	pdev = vdev->pdev;
1752 	if (qdf_unlikely(!pdev))
1753 		return false;
1754 
1755 	/* no fwd for station mode and just pass up to stack */
1756 	if (vdev->opmode == wlan_op_mode_sta)
1757 		return false;
1758 
1759 	if (da_is_bcmc) {
1760 		nbuf_copy = qdf_nbuf_copy(nbuf);
1761 		if (!nbuf_copy)
1762 			return false;
1763 
1764 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
1765 			qdf_nbuf_free(nbuf_copy);
1766 		else
1767 			*fwd_success = true;
1768 
1769 		/* return false to pass original pkt up to stack */
1770 		return false;
1771 	}
1772 
1773 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
1774 
1775 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
1776 		return false;
1777 
1778 	da_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_dest);
1779 	if (!da_peer)
1780 		return false;
1781 
1782 	if (da_peer->vdev != vdev)
1783 		return false;
1784 
1785 	sa_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_source);
1786 	if (!sa_peer)
1787 		return false;
1788 
1789 	if (sa_peer->vdev != vdev)
1790 		return false;
1791 
1792 	/*
1793 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
1794 	 * Need to add skb to internal tracking table to avoid nbuf memory
1795 	 * leak check for unallocated skb.
1796 	 */
1797 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
1798 
1799 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
1800 		qdf_nbuf_free(nbuf);
1801 	else
1802 		*fwd_success = true;
1803 
1804 	return true;
1805 }
1806 
1807 #ifdef MDM_PLATFORM
1808 bool dp_ipa_is_mdm_platform(void)
1809 {
1810 	return true;
1811 }
1812 #else
1813 bool dp_ipa_is_mdm_platform(void)
1814 {
1815 	return false;
1816 }
1817 #endif
1818 
1819 /**
1820  * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
1821  * @soc: soc
1822  * @nbuf: source skb
1823  *
1824  * Return: new nbuf if success and otherwise NULL
1825  */
1826 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
1827 					     qdf_nbuf_t nbuf)
1828 {
1829 	uint8_t *src_nbuf_data;
1830 	uint8_t *dst_nbuf_data;
1831 	qdf_nbuf_t dst_nbuf;
1832 	qdf_nbuf_t temp_nbuf = nbuf;
1833 	uint32_t nbuf_len = qdf_nbuf_len(nbuf);
1834 	bool is_nbuf_head = true;
1835 	uint32_t copy_len = 0;
1836 
1837 	dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
1838 				  RX_BUFFER_RESERVATION,
1839 				  RX_DATA_BUFFER_ALIGNMENT, FALSE);
1840 
1841 	if (!dst_nbuf) {
1842 		dp_err_rl("nbuf allocate fail");
1843 		return NULL;
1844 	}
1845 
1846 	if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
1847 		qdf_nbuf_free(dst_nbuf);
1848 		dp_err_rl("nbuf is jumbo data");
1849 		return NULL;
1850 	}
1851 
1852 	/* prepeare to copy all data into new skb */
1853 	dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
1854 	while (temp_nbuf) {
1855 		src_nbuf_data = qdf_nbuf_data(temp_nbuf);
1856 		/* first head nbuf */
1857 		if (is_nbuf_head) {
1858 			qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
1859 				     RX_PKT_TLVS_LEN);
1860 			/* leave extra 2 bytes L3_HEADER_PADDING */
1861 			dst_nbuf_data += (RX_PKT_TLVS_LEN + L3_HEADER_PADDING);
1862 			src_nbuf_data += RX_PKT_TLVS_LEN;
1863 			copy_len = qdf_nbuf_headlen(temp_nbuf) -
1864 						RX_PKT_TLVS_LEN;
1865 			temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
1866 			is_nbuf_head = false;
1867 		} else {
1868 			copy_len = qdf_nbuf_len(temp_nbuf);
1869 			temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
1870 		}
1871 		qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
1872 		dst_nbuf_data += copy_len;
1873 	}
1874 
1875 	qdf_nbuf_set_len(dst_nbuf, nbuf_len);
1876 	/* copy is done, free original nbuf */
1877 	qdf_nbuf_free(nbuf);
1878 
1879 	return dst_nbuf;
1880 }
1881 
1882 /**
1883  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
1884  * @soc: soc
1885  * @nbuf: skb
1886  *
1887  * Return: nbuf if success and otherwise NULL
1888  */
1889 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
1890 {
1891 
1892 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1893 		return nbuf;
1894 
1895 	/* WLAN IPA is run-time disabled */
1896 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
1897 		return nbuf;
1898 
1899 	if (!qdf_nbuf_is_frag(nbuf))
1900 		return nbuf;
1901 
1902 	/* linearize skb for IPA */
1903 	return dp_ipa_frag_nbuf_linearize(soc, nbuf);
1904 }
1905 
1906 #endif
1907