xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hif.h>
25 #include <htt.h>
26 #include <wdi_event.h>
27 #include <queue.h>
28 #include "dp_types.h"
29 #include "dp_htt.h"
30 #include "dp_tx.h"
31 #include "dp_rx.h"
32 #include "dp_ipa.h"
33 
34 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
35 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
36 
37 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
38  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
39  * This causes back pressure, resulting in a FW crash.
40  * By leaving some entries with no buffer attached, WBM will be able to write
41  * to the ring, and from dumps we can figure out the buffer which is causing
42  * this issue.
43  */
44 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
45 
46 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
47 						   qdf_nbuf_t nbuf,
48 						   bool create)
49 {
50 	qdf_mem_info_t mem_map_table = {0};
51 
52 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
53 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
54 				 skb_end_pointer(nbuf) - nbuf->data);
55 
56 	if (create)
57 		qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
58 	else
59 		qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
60 
61 	return QDF_STATUS_SUCCESS;
62 }
63 
64 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
65 					     qdf_nbuf_t nbuf,
66 					     bool create)
67 {
68 	struct dp_pdev *pdev;
69 	int i;
70 
71 	for (i = 0; i < soc->pdev_count; i++) {
72 		pdev = soc->pdev_list[i];
73 		if (pdev && pdev->monitor_configured)
74 			return QDF_STATUS_SUCCESS;
75 	}
76 
77 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
78 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
79 		return QDF_STATUS_SUCCESS;
80 
81 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
82 		return QDF_STATUS_SUCCESS;
83 
84 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
85 }
86 
87 #ifdef RX_DESC_MULTI_PAGE_ALLOC
88 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
89 							 struct dp_pdev *pdev,
90 							 bool create)
91 {
92 	struct rx_desc_pool *rx_pool;
93 	uint8_t pdev_id;
94 	uint32_t num_desc, page_id, offset, i;
95 	uint16_t num_desc_per_page;
96 	union dp_rx_desc_list_elem_t *rx_desc_elem;
97 	struct dp_rx_desc *rx_desc;
98 	qdf_nbuf_t nbuf;
99 
100 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
101 		return QDF_STATUS_SUCCESS;
102 
103 	pdev_id = pdev->pdev_id;
104 	rx_pool = &soc->rx_desc_buf[pdev_id];
105 
106 	qdf_spin_lock_bh(&rx_pool->lock);
107 	num_desc = rx_pool->pool_size;
108 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
109 	for (i = 0; i < num_desc; i++) {
110 		page_id = i / num_desc_per_page;
111 		offset = i % num_desc_per_page;
112 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
113 			break;
114 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
115 		rx_desc = &rx_desc_elem->rx_desc;
116 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
117 			continue;
118 		nbuf = rx_desc->nbuf;
119 
120 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
121 	}
122 	qdf_spin_unlock_bh(&rx_pool->lock);
123 
124 	return QDF_STATUS_SUCCESS;
125 }
126 #else
127 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
128 							 struct dp_pdev *pdev,
129 							 bool create)
130 {
131 	struct rx_desc_pool *rx_pool;
132 	uint8_t pdev_id;
133 	qdf_nbuf_t nbuf;
134 	int i;
135 
136 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
137 		return QDF_STATUS_SUCCESS;
138 
139 	pdev_id = pdev->pdev_id;
140 	rx_pool = &soc->rx_desc_buf[pdev_id];
141 
142 	qdf_spin_lock_bh(&rx_pool->lock);
143 	for (i = 0; i < rx_pool->pool_size; i++) {
144 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
145 		    rx_pool->array[i].rx_desc.unmapped)
146 			continue;
147 
148 		nbuf = rx_pool->array[i].rx_desc.nbuf;
149 
150 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
151 	}
152 	qdf_spin_unlock_bh(&rx_pool->lock);
153 
154 	return QDF_STATUS_SUCCESS;
155 }
156 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
157 
158 /**
159  * dp_tx_ipa_uc_detach - Free autonomy TX resources
160  * @soc: data path instance
161  * @pdev: core txrx pdev context
162  *
163  * Free allocated TX buffers with WBM SRNG
164  *
165  * Return: none
166  */
167 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
168 {
169 	int idx;
170 	qdf_nbuf_t nbuf;
171 	struct dp_ipa_resources *ipa_res;
172 
173 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
174 		nbuf = (qdf_nbuf_t)
175 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
176 		if (!nbuf)
177 			continue;
178 
179 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
180 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, false);
181 
182 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
183 		qdf_nbuf_free(nbuf);
184 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
185 						(void *)NULL;
186 	}
187 
188 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
189 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
190 
191 	ipa_res = &pdev->ipa_resource;
192 	iounmap(ipa_res->tx_comp_doorbell_vaddr);
193 
194 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
195 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
196 }
197 
198 /**
199  * dp_rx_ipa_uc_detach - free autonomy RX resources
200  * @soc: data path instance
201  * @pdev: core txrx pdev context
202  *
203  * This function will detach DP RX into main device context
204  * will free DP Rx resources.
205  *
206  * Return: none
207  */
208 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
209 {
210 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
211 
212 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
213 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
214 }
215 
216 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
217 {
218 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
219 		return QDF_STATUS_SUCCESS;
220 
221 	/* TX resource detach */
222 	dp_tx_ipa_uc_detach(soc, pdev);
223 
224 	/* RX resource detach */
225 	dp_rx_ipa_uc_detach(soc, pdev);
226 
227 	return QDF_STATUS_SUCCESS;	/* success */
228 }
229 
230 /**
231  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
232  * @soc: data path instance
233  * @pdev: Physical device handle
234  *
235  * Allocate TX buffer from non-cacheable memory
236  * Attache allocated TX buffers with WBM SRNG
237  *
238  * Return: int
239  */
240 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
241 {
242 	uint32_t tx_buffer_count;
243 	uint32_t ring_base_align = 8;
244 	qdf_dma_addr_t buffer_paddr;
245 	struct hal_srng *wbm_srng = (struct hal_srng *)
246 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
247 	struct hal_srng_params srng_params;
248 	uint32_t paddr_lo;
249 	uint32_t paddr_hi;
250 	void *ring_entry;
251 	int num_entries;
252 	qdf_nbuf_t nbuf;
253 	int retval = QDF_STATUS_SUCCESS;
254 	int max_alloc_count = 0;
255 
256 	/*
257 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
258 	 * unsigned int uc_tx_buf_sz =
259 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
260 	 */
261 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
262 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
263 
264 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
265 			    &srng_params);
266 	num_entries = srng_params.num_entries;
267 
268 	max_alloc_count =
269 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
270 	if (max_alloc_count <= 0) {
271 		dp_err("incorrect value for buffer count %u", max_alloc_count);
272 		return -EINVAL;
273 	}
274 
275 	dp_info("requested %d buffers to be posted to wbm ring",
276 		max_alloc_count);
277 
278 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
279 		qdf_mem_malloc(num_entries *
280 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
281 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
282 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
283 		return -ENOMEM;
284 	}
285 
286 	hal_srng_access_start_unlocked(soc->hal_soc,
287 				       hal_srng_to_hal_ring_handle(wbm_srng));
288 
289 	/*
290 	 * Allocate Tx buffers as many as possible.
291 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
292 	 * Populate Tx buffers into WBM2IPA ring
293 	 * This initial buffer population will simulate H/W as source ring,
294 	 * and update HP
295 	 */
296 	for (tx_buffer_count = 0;
297 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
298 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
299 		if (!nbuf)
300 			break;
301 
302 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
303 				hal_srng_to_hal_ring_handle(wbm_srng));
304 		if (!ring_entry) {
305 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
306 				  "%s: Failed to get WBM ring entry",
307 				  __func__);
308 			qdf_nbuf_free(nbuf);
309 			break;
310 		}
311 
312 		qdf_nbuf_map_single(soc->osdev, nbuf,
313 				    QDF_DMA_BIDIRECTIONAL);
314 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
315 
316 		paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
317 		paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
318 		HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
319 		HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
320 		HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
321 				      HAL_WBM_SW0_BM_ID));
322 
323 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
324 			= (void *)nbuf;
325 
326 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
327 			__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, true);
328 	}
329 
330 	hal_srng_access_end_unlocked(soc->hal_soc,
331 				     hal_srng_to_hal_ring_handle(wbm_srng));
332 
333 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
334 
335 	if (tx_buffer_count) {
336 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
337 	} else {
338 		dp_err("No IPA WDI TX buffer allocated!");
339 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
340 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
341 		retval = -ENOMEM;
342 	}
343 
344 	return retval;
345 }
346 
347 /**
348  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
349  * @soc: data path instance
350  * @pdev: core txrx pdev context
351  *
352  * This function will attach a DP RX instance into the main
353  * device (SOC) context.
354  *
355  * Return: QDF_STATUS_SUCCESS: success
356  *         QDF_STATUS_E_RESOURCES: Error return
357  */
358 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
359 {
360 	return QDF_STATUS_SUCCESS;
361 }
362 
363 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
364 {
365 	int error;
366 
367 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
368 		return QDF_STATUS_SUCCESS;
369 
370 	/* TX resource attach */
371 	error = dp_tx_ipa_uc_attach(soc, pdev);
372 	if (error) {
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 			  "%s: DP IPA UC TX attach fail code %d",
375 			  __func__, error);
376 		return error;
377 	}
378 
379 	/* RX resource attach */
380 	error = dp_rx_ipa_uc_attach(soc, pdev);
381 	if (error) {
382 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
383 			  "%s: DP IPA UC RX attach fail code %d",
384 			  __func__, error);
385 		dp_tx_ipa_uc_detach(soc, pdev);
386 		return error;
387 	}
388 
389 	return QDF_STATUS_SUCCESS;	/* success */
390 }
391 
392 /*
393  * dp_ipa_ring_resource_setup() - setup IPA ring resources
394  * @soc: data path SoC handle
395  *
396  * Return: none
397  */
398 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
399 		struct dp_pdev *pdev)
400 {
401 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
402 	struct hal_srng *hal_srng;
403 	struct hal_srng_params srng_params;
404 	qdf_dma_addr_t hp_addr;
405 	unsigned long addr_offset, dev_base_paddr;
406 
407 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
408 		return QDF_STATUS_SUCCESS;
409 
410 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
411 	hal_srng = (struct hal_srng *)
412 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
413 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
414 			    hal_srng_to_hal_ring_handle(hal_srng),
415 			    &srng_params);
416 
417 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
418 		srng_params.ring_base_paddr;
419 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
420 		srng_params.ring_base_vaddr;
421 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
422 		(srng_params.num_entries * srng_params.entry_size) << 2;
423 	/*
424 	 * For the register backed memory addresses, use the scn->mem_pa to
425 	 * calculate the physical address of the shadow registers
426 	 */
427 	dev_base_paddr =
428 		(unsigned long)
429 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
430 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
431 		      (unsigned long)(hal_soc->dev_base_addr);
432 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
433 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
434 
435 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
436 		(unsigned int)addr_offset,
437 		(unsigned int)dev_base_paddr,
438 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
439 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
440 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
441 		srng_params.num_entries,
442 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
443 
444 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
445 	hal_srng = (struct hal_srng *)
446 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
447 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
448 			    hal_srng_to_hal_ring_handle(hal_srng),
449 			    &srng_params);
450 
451 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
452 						srng_params.ring_base_paddr;
453 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
454 						srng_params.ring_base_vaddr;
455 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
456 		(srng_params.num_entries * srng_params.entry_size) << 2;
457 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
458 		      (unsigned long)(hal_soc->dev_base_addr);
459 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
460 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
461 
462 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
463 		(unsigned int)addr_offset,
464 		(unsigned int)dev_base_paddr,
465 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
466 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
467 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
468 		srng_params.num_entries,
469 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
470 
471 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
472 	hal_srng = (struct hal_srng *)
473 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
474 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
475 			    hal_srng_to_hal_ring_handle(hal_srng),
476 			    &srng_params);
477 
478 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
479 						srng_params.ring_base_paddr;
480 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
481 						srng_params.ring_base_vaddr;
482 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
483 		(srng_params.num_entries * srng_params.entry_size) << 2;
484 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
485 		      (unsigned long)(hal_soc->dev_base_addr);
486 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
487 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
488 
489 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
490 		(unsigned int)addr_offset,
491 		(unsigned int)dev_base_paddr,
492 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
493 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
494 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
495 		srng_params.num_entries,
496 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
497 
498 	hal_srng = (struct hal_srng *)
499 			pdev->rx_refill_buf_ring2.hal_srng;
500 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
501 			    hal_srng_to_hal_ring_handle(hal_srng),
502 			    &srng_params);
503 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
504 		srng_params.ring_base_paddr;
505 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
506 		srng_params.ring_base_vaddr;
507 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
508 		(srng_params.num_entries * srng_params.entry_size) << 2;
509 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
510 				       hal_srng_to_hal_ring_handle(hal_srng));
511 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
512 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
513 
514 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
515 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
516 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
517 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
518 		srng_params.num_entries,
519 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
520 
521 	return 0;
522 }
523 
524 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
525 					     qdf_shared_mem_t *shared_mem,
526 					     void *cpu_addr,
527 					     qdf_dma_addr_t dma_addr,
528 					     uint32_t size)
529 {
530 	qdf_dma_addr_t paddr;
531 	int ret;
532 
533 	shared_mem->vaddr = cpu_addr;
534 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
535 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
536 
537 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
538 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
539 
540 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
541 				      shared_mem->vaddr, dma_addr, size);
542 	if (ret) {
543 		dp_err("Unable to get DMA sgtable");
544 		return QDF_STATUS_E_NOMEM;
545 	}
546 
547 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
548 
549 	return QDF_STATUS_SUCCESS;
550 }
551 
552 /**
553  * dp_ipa_uc_get_resource() - Client request resource information
554  * @ppdev - handle to the device instance
555  *
556  *  IPA client will request IPA UC related resource information
557  *  Resource information will be distributed to IPA module
558  *  All of the required resources should be pre-allocated
559  *
560  * Return: QDF_STATUS
561  */
562 QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev)
563 {
564 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
565 	struct dp_soc *soc = pdev->soc;
566 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
567 
568 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
569 		return QDF_STATUS_SUCCESS;
570 
571 	ipa_res->tx_num_alloc_buffer =
572 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
573 
574 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
575 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
576 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
577 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
578 
579 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
580 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
581 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
582 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
583 
584 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
585 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
586 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
587 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
588 
589 	dp_ipa_get_shared_mem_info(
590 			soc->osdev, &ipa_res->rx_refill_ring,
591 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
592 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
593 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
594 
595 	if (!qdf_mem_get_dma_addr(soc->osdev,
596 				  &ipa_res->tx_comp_ring.mem_info) ||
597 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info))
598 		return QDF_STATUS_E_FAILURE;
599 
600 	return QDF_STATUS_SUCCESS;
601 }
602 
603 /**
604  * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG
605  * @ppdev - handle to the device instance
606  *
607  * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
608  * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
609  *
610  * Return: none
611  */
612 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev)
613 {
614 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
615 	struct dp_soc *soc = pdev->soc;
616 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
617 	struct hal_srng *wbm_srng = (struct hal_srng *)
618 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
619 	struct hal_srng *reo_srng = (struct hal_srng *)
620 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
621 	uint32_t tx_comp_doorbell_dmaaddr;
622 	uint32_t rx_ready_doorbell_dmaaddr;
623 
624 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
625 		return QDF_STATUS_SUCCESS;
626 
627 	ipa_res->tx_comp_doorbell_vaddr =
628 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
629 
630 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
631 		pld_smmu_map(soc->osdev->dev, ipa_res->tx_comp_doorbell_paddr,
632 			     &tx_comp_doorbell_dmaaddr, sizeof(uint32_t));
633 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
634 
635 		pld_smmu_map(soc->osdev->dev, ipa_res->rx_ready_doorbell_paddr,
636 			     &rx_ready_doorbell_dmaaddr, sizeof(uint32_t));
637 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
638 	}
639 
640 	hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
641 
642 	dp_info("paddr %pK vaddr %pK",
643 		(void *)ipa_res->tx_comp_doorbell_paddr,
644 		(void *)ipa_res->tx_comp_doorbell_vaddr);
645 
646 	hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
647 
648 	/*
649 	 * For RX, REO module on Napier/Hastings does reordering on incoming
650 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
651 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
652 	 * to IPA.
653 	 * Set the doorbell addr for the REO ring.
654 	 */
655 	hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
656 	return QDF_STATUS_SUCCESS;
657 }
658 
659 /**
660  * dp_ipa_op_response() - Handle OP command response from firmware
661  * @ppdev - handle to the device instance
662  * @op_msg: op response message from firmware
663  *
664  * Return: none
665  */
666 QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
667 {
668 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
669 
670 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
671 		return QDF_STATUS_SUCCESS;
672 
673 	if (pdev->ipa_uc_op_cb) {
674 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
675 	} else {
676 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
677 		    "%s: IPA callback function is not registered", __func__);
678 		qdf_mem_free(op_msg);
679 		return QDF_STATUS_E_FAILURE;
680 	}
681 
682 	return QDF_STATUS_SUCCESS;
683 }
684 
685 /**
686  * dp_ipa_register_op_cb() - Register OP handler function
687  * @ppdev - handle to the device instance
688  * @op_cb: handler function pointer
689  *
690  * Return: none
691  */
692 QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev,
693 				 ipa_uc_op_cb_type op_cb,
694 				 void *usr_ctxt)
695 {
696 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
697 
698 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
699 		return QDF_STATUS_SUCCESS;
700 
701 	pdev->ipa_uc_op_cb = op_cb;
702 	pdev->usr_ctxt = usr_ctxt;
703 
704 	return QDF_STATUS_SUCCESS;
705 }
706 
707 /**
708  * dp_ipa_get_stat() - Get firmware wdi status
709  * @ppdev - handle to the device instance
710  *
711  * Return: none
712  */
713 QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev)
714 {
715 	/* TBD */
716 	return QDF_STATUS_SUCCESS;
717 }
718 
719 /**
720  * dp_tx_send_ipa_data_frame() - send IPA data frame
721  * @vdev: vdev
722  * @skb: skb
723  *
724  * Return: skb/ NULL is for success
725  */
726 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
727 {
728 	qdf_nbuf_t ret;
729 
730 	/* Terminate the (single-element) list of tx frames */
731 	qdf_nbuf_set_next(skb, NULL);
732 	ret = dp_tx_send(vdev, skb);
733 	if (ret) {
734 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
735 			  "%s: Failed to tx", __func__);
736 		return ret;
737 	}
738 
739 	return NULL;
740 }
741 
742 /**
743  * dp_ipa_enable_autonomy() – Enable autonomy RX path
744  * @pdev - handle to the device instance
745  *
746  * Set all RX packet route to IPA REO ring
747  * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
748  * Return: none
749  */
750 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev)
751 {
752 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
753 	struct dp_soc *soc = pdev->soc;
754 	uint32_t ix0;
755 	uint32_t ix2;
756 
757 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
758 		return QDF_STATUS_SUCCESS;
759 
760 	/* Call HAL API to remap REO rings to REO2IPA ring */
761 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
762 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) |
763 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 2) |
764 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) |
765 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) |
766 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
767 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
768 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
769 
770 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
771 		ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) |
772 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) |
773 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
774 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
775 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) |
776 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
777 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) |
778 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
779 	}
780 
781 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
782 				   &ix2, &ix2);
783 
784 	return QDF_STATUS_SUCCESS;
785 }
786 
787 /**
788  * dp_ipa_disable_autonomy() – Disable autonomy RX path
789  * @ppdev - handle to the device instance
790  *
791  * Disable RX packet routing to IPA REO
792  * Program Destination_Ring_Ctrl_IX_0 REO register to disable
793  * Return: none
794  */
795 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev)
796 {
797 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
798 	struct dp_soc *soc = pdev->soc;
799 	uint32_t ix0;
800 	uint32_t ix2;
801 	uint32_t ix3;
802 
803 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
804 		return QDF_STATUS_SUCCESS;
805 
806 	/* Call HAL API to remap REO rings to REO2IPA ring */
807 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
808 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
809 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
810 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
811 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
812 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
813 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
814 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
815 
816 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
817 		dp_reo_remap_config(soc, &ix2, &ix3);
818 
819 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
820 					   &ix2, &ix3);
821 	}
822 
823 	return QDF_STATUS_SUCCESS;
824 }
825 
826 /* This should be configurable per H/W configuration enable status */
827 #define L3_HEADER_PADDING	2
828 
829 #ifdef CONFIG_IPA_WDI_UNIFIED_API
830 
831 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
832 static inline void dp_setup_mcc_sys_pipes(
833 		qdf_ipa_sys_connect_params_t *sys_in,
834 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
835 {
836 	/* Setup MCC sys pipe */
837 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
838 			DP_IPA_MAX_IFACE;
839 	for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
840 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
841 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
842 }
843 #else
844 static inline void dp_setup_mcc_sys_pipes(
845 		qdf_ipa_sys_connect_params_t *sys_in,
846 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
847 {
848 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
849 }
850 #endif
851 
852 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
853 				 struct dp_ipa_resources *ipa_res,
854 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
855 				 bool over_gsi)
856 {
857 	struct tcl_data_cmd *tcl_desc_ptr;
858 	uint8_t *desc_addr;
859 	uint32_t desc_size;
860 
861 	if (over_gsi)
862 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
863 	else
864 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
865 
866 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
867 		qdf_mem_get_dma_addr(soc->osdev,
868 				     &ipa_res->tx_comp_ring.mem_info);
869 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
870 		qdf_mem_get_dma_size(soc->osdev,
871 				     &ipa_res->tx_comp_ring.mem_info);
872 
873 	/* WBM Tail Pointer Address */
874 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
875 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
876 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
877 
878 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
879 		qdf_mem_get_dma_addr(soc->osdev,
880 				     &ipa_res->tx_ring.mem_info);
881 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
882 		qdf_mem_get_dma_size(soc->osdev,
883 				     &ipa_res->tx_ring.mem_info);
884 
885 	/* TCL Head Pointer Address */
886 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
887 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
888 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
889 
890 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
891 		ipa_res->tx_num_alloc_buffer;
892 
893 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
894 
895 	/* Preprogram TCL descriptor */
896 	desc_addr =
897 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
898 	desc_size = sizeof(struct tcl_data_cmd);
899 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
900 	tcl_desc_ptr = (struct tcl_data_cmd *)
901 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
902 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
903 		HAL_RX_BUF_RBM_SW2_BM;
904 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
905 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
906 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
907 }
908 
909 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
910 				 struct dp_ipa_resources *ipa_res,
911 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
912 				 bool over_gsi)
913 {
914 	if (over_gsi)
915 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
916 					IPA_CLIENT_WLAN2_PROD;
917 	else
918 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
919 					IPA_CLIENT_WLAN1_PROD;
920 
921 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
922 		qdf_mem_get_dma_addr(soc->osdev,
923 				     &ipa_res->rx_rdy_ring.mem_info);
924 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
925 		qdf_mem_get_dma_size(soc->osdev,
926 				     &ipa_res->rx_rdy_ring.mem_info);
927 
928 	/* REO Tail Pointer Address */
929 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
930 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
931 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
932 
933 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
934 		qdf_mem_get_dma_addr(soc->osdev,
935 				     &ipa_res->rx_refill_ring.mem_info);
936 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
937 		qdf_mem_get_dma_size(soc->osdev,
938 				     &ipa_res->rx_refill_ring.mem_info);
939 
940 	/* FW Head Pointer Address */
941 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
942 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
943 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
944 
945 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
946 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
947 }
948 
949 static void
950 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
951 			  struct dp_ipa_resources *ipa_res,
952 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
953 			  bool over_gsi)
954 {
955 	struct tcl_data_cmd *tcl_desc_ptr;
956 	uint8_t *desc_addr;
957 	uint32_t desc_size;
958 
959 	if (over_gsi)
960 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
961 			IPA_CLIENT_WLAN2_CONS;
962 	else
963 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
964 			IPA_CLIENT_WLAN1_CONS;
965 
966 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
967 		     &ipa_res->tx_comp_ring.sgtable,
968 		     sizeof(sgtable_t));
969 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
970 		qdf_mem_get_dma_size(soc->osdev,
971 				     &ipa_res->tx_comp_ring.mem_info);
972 	/* WBM Tail Pointer Address */
973 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
974 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
975 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
976 
977 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
978 		     &ipa_res->tx_ring.sgtable,
979 		     sizeof(sgtable_t));
980 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
981 		qdf_mem_get_dma_size(soc->osdev,
982 				     &ipa_res->tx_ring.mem_info);
983 	/* TCL Head Pointer Address */
984 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
985 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
986 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
987 
988 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
989 		ipa_res->tx_num_alloc_buffer;
990 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
991 
992 	/* Preprogram TCL descriptor */
993 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
994 			tx_smmu);
995 	desc_size = sizeof(struct tcl_data_cmd);
996 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
997 	tcl_desc_ptr = (struct tcl_data_cmd *)
998 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
999 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1000 		HAL_RX_BUF_RBM_SW2_BM;
1001 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1002 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1003 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1004 }
1005 
1006 static void
1007 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
1008 			  struct dp_ipa_resources *ipa_res,
1009 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
1010 			  bool over_gsi)
1011 {
1012 	if (over_gsi)
1013 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1014 					IPA_CLIENT_WLAN2_PROD;
1015 	else
1016 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1017 					IPA_CLIENT_WLAN1_PROD;
1018 
1019 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
1020 		     &ipa_res->rx_rdy_ring.sgtable,
1021 		     sizeof(sgtable_t));
1022 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
1023 		qdf_mem_get_dma_size(soc->osdev,
1024 				     &ipa_res->rx_rdy_ring.mem_info);
1025 	/* REO Tail Pointer Address */
1026 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
1027 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1028 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
1029 
1030 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
1031 		     &ipa_res->rx_refill_ring.sgtable,
1032 		     sizeof(sgtable_t));
1033 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
1034 		qdf_mem_get_dma_size(soc->osdev,
1035 				     &ipa_res->rx_refill_ring.mem_info);
1036 
1037 	/* FW Head Pointer Address */
1038 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
1039 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1040 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
1041 
1042 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
1043 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1044 }
1045 
1046 /**
1047  * dp_ipa_setup() - Setup and connect IPA pipes
1048  * @ppdev - handle to the device instance
1049  * @ipa_i2w_cb: IPA to WLAN callback
1050  * @ipa_w2i_cb: WLAN to IPA callback
1051  * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
1052  * @ipa_desc_size: IPA descriptor size
1053  * @ipa_priv: handle to the HTT instance
1054  * @is_rm_enabled: Is IPA RM enabled or not
1055  * @tx_pipe_handle: pointer to Tx pipe handle
1056  * @rx_pipe_handle: pointer to Rx pipe handle
1057  * @is_smmu_enabled: Is SMMU enabled or not
1058  * @sys_in: parameters to setup sys pipe in mcc mode
1059  *
1060  * Return: QDF_STATUS
1061  */
1062 QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
1063 			void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1064 			uint32_t ipa_desc_size, void *ipa_priv,
1065 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1066 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
1067 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
1068 {
1069 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
1070 	struct dp_soc *soc = pdev->soc;
1071 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1072 	qdf_ipa_ep_cfg_t *tx_cfg;
1073 	qdf_ipa_ep_cfg_t *rx_cfg;
1074 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
1075 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
1076 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
1077 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu;
1078 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1079 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1080 	int ret;
1081 
1082 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1083 		return QDF_STATUS_SUCCESS;
1084 
1085 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1086 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1087 
1088 	if (is_smmu_enabled)
1089 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
1090 	else
1091 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
1092 
1093 	dp_setup_mcc_sys_pipes(sys_in, &pipe_in);
1094 
1095 	/* TX PIPE */
1096 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1097 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in);
1098 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
1099 	} else {
1100 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1101 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
1102 	}
1103 
1104 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
1105 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1106 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
1107 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
1108 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
1109 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
1110 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
1111 
1112 	/**
1113 	 * Transfer Ring: WBM Ring
1114 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1115 	 * Event Ring: TCL ring
1116 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1117 	 */
1118 	if (is_smmu_enabled)
1119 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
1120 	else
1121 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
1122 
1123 	/* RX PIPE */
1124 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1125 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in);
1126 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
1127 	} else {
1128 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1129 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
1130 	}
1131 
1132 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
1133 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1134 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
1135 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
1136 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
1137 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
1138 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
1139 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
1140 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
1141 
1142 	/**
1143 	 * Transfer Ring: REO Ring
1144 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1145 	 * Event Ring: FW ring
1146 	 * Event Ring Doorbell PA: FW Head Pointer Address
1147 	 */
1148 	if (is_smmu_enabled)
1149 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
1150 	else
1151 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
1152 
1153 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1154 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1155 
1156 	/* Connect WDI IPA PIPEs */
1157 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1158 
1159 	if (ret) {
1160 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1161 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1162 			  __func__, ret);
1163 		return QDF_STATUS_E_FAILURE;
1164 	}
1165 
1166 	/* IPA uC Doorbell registers */
1167 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
1168 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1169 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1170 
1171 	ipa_res->tx_comp_doorbell_paddr =
1172 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1173 	ipa_res->rx_ready_doorbell_paddr =
1174 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1175 
1176 	return QDF_STATUS_SUCCESS;
1177 }
1178 
1179 /**
1180  * dp_ipa_setup_iface() - Setup IPA header and register interface
1181  * @ifname: Interface name
1182  * @mac_addr: Interface MAC address
1183  * @prod_client: IPA prod client type
1184  * @cons_client: IPA cons client type
1185  * @session_id: Session ID
1186  * @is_ipv6_enabled: Is IPV6 enabled or not
1187  *
1188  * Return: QDF_STATUS
1189  */
1190 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1191 			      qdf_ipa_client_type_t prod_client,
1192 			      qdf_ipa_client_type_t cons_client,
1193 			      uint8_t session_id, bool is_ipv6_enabled)
1194 {
1195 	qdf_ipa_wdi_reg_intf_in_params_t in;
1196 	qdf_ipa_wdi_hdr_info_t hdr_info;
1197 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1198 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1199 	int ret = -EINVAL;
1200 
1201 	dp_debug("Add Partial hdr: %s, %pM", ifname, mac_addr);
1202 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1203 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1204 
1205 	/* IPV4 header */
1206 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1207 
1208 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1209 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1210 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1211 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1212 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1213 
1214 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1215 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1216 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1217 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
1218 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1219 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1220 		htonl(session_id << 16);
1221 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1222 
1223 	/* IPV6 header */
1224 	if (is_ipv6_enabled) {
1225 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1226 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1227 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1228 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1229 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1230 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1231 	}
1232 
1233 	dp_debug("registering for session_id: %u", session_id);
1234 
1235 	ret = qdf_ipa_wdi_reg_intf(&in);
1236 
1237 	if (ret) {
1238 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1239 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1240 		    __func__, ret);
1241 		return QDF_STATUS_E_FAILURE;
1242 	}
1243 
1244 	return QDF_STATUS_SUCCESS;
1245 }
1246 
1247 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1248 
1249 /**
1250  * dp_ipa_setup() - Setup and connect IPA pipes
1251  * @ppdev - handle to the device instance
1252  * @ipa_i2w_cb: IPA to WLAN callback
1253  * @ipa_w2i_cb: WLAN to IPA callback
1254  * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
1255  * @ipa_desc_size: IPA descriptor size
1256  * @ipa_priv: handle to the HTT instance
1257  * @is_rm_enabled: Is IPA RM enabled or not
1258  * @tx_pipe_handle: pointer to Tx pipe handle
1259  * @rx_pipe_handle: pointer to Rx pipe handle
1260  *
1261  * Return: QDF_STATUS
1262  */
1263 QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
1264 			void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
1265 			uint32_t ipa_desc_size, void *ipa_priv,
1266 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1267 			uint32_t *rx_pipe_handle)
1268 {
1269 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
1270 	struct dp_soc *soc = pdev->soc;
1271 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1272 	qdf_ipa_wdi_pipe_setup_info_t *tx;
1273 	qdf_ipa_wdi_pipe_setup_info_t *rx;
1274 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1275 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1276 	struct tcl_data_cmd *tcl_desc_ptr;
1277 	uint8_t *desc_addr;
1278 	uint32_t desc_size;
1279 	int ret;
1280 
1281 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1282 		return QDF_STATUS_SUCCESS;
1283 
1284 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1285 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1286 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1287 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1288 
1289 	/* TX PIPE */
1290 	/**
1291 	 * Transfer Ring: WBM Ring
1292 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1293 	 * Event Ring: TCL ring
1294 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1295 	 */
1296 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1297 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
1298 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1299 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
1300 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
1301 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
1302 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
1303 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
1304 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1305 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1306 		ipa_res->tx_comp_ring_base_paddr;
1307 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1308 		ipa_res->tx_comp_ring_size;
1309 	/* WBM Tail Pointer Address */
1310 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1311 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1312 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1313 		ipa_res->tx_ring_base_paddr;
1314 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
1315 	/* TCL Head Pointer Address */
1316 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1317 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1318 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1319 		ipa_res->tx_num_alloc_buffer;
1320 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1321 
1322 	/* Preprogram TCL descriptor */
1323 	desc_addr =
1324 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1325 	desc_size = sizeof(struct tcl_data_cmd);
1326 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1327 	tcl_desc_ptr = (struct tcl_data_cmd *)
1328 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1329 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1330 						HAL_RX_BUF_RBM_SW2_BM;
1331 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1332 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1333 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1334 
1335 	/* RX PIPE */
1336 	/**
1337 	 * Transfer Ring: REO Ring
1338 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1339 	 * Event Ring: FW ring
1340 	 * Event Ring Doorbell PA: FW Head Pointer Address
1341 	 */
1342 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1343 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
1344 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1345 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
1346 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
1347 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
1348 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
1349 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
1350 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
1351 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
1352 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
1353 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1354 						ipa_res->rx_rdy_ring_base_paddr;
1355 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1356 						ipa_res->rx_rdy_ring_size;
1357 	/* REO Tail Pointer Address */
1358 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1359 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1360 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1361 					ipa_res->rx_refill_ring_base_paddr;
1362 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1363 						ipa_res->rx_refill_ring_size;
1364 	/* FW Head Pointer Address */
1365 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1366 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1367 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
1368 						L3_HEADER_PADDING;
1369 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1370 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1371 
1372 	/* Connect WDI IPA PIPE */
1373 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1374 	if (ret) {
1375 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1376 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1377 			  __func__, ret);
1378 		return QDF_STATUS_E_FAILURE;
1379 	}
1380 
1381 	/* IPA uC Doorbell registers */
1382 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1383 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
1384 		  __func__,
1385 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1386 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1387 
1388 	ipa_res->tx_comp_doorbell_paddr =
1389 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1390 	ipa_res->tx_comp_doorbell_vaddr =
1391 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
1392 	ipa_res->rx_ready_doorbell_paddr =
1393 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1394 
1395 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1396 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1397 		  __func__,
1398 		  "transfer_ring_base_pa",
1399 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
1400 		  "transfer_ring_size",
1401 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
1402 		  "transfer_ring_doorbell_pa",
1403 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
1404 		  "event_ring_base_pa",
1405 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
1406 		  "event_ring_size",
1407 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
1408 		  "event_ring_doorbell_pa",
1409 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
1410 		  "num_pkt_buffers",
1411 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
1412 		  "tx_comp_doorbell_paddr",
1413 		  (void *)ipa_res->tx_comp_doorbell_paddr);
1414 
1415 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1416 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1417 		  __func__,
1418 		  "transfer_ring_base_pa",
1419 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
1420 		  "transfer_ring_size",
1421 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
1422 		  "transfer_ring_doorbell_pa",
1423 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
1424 		  "event_ring_base_pa",
1425 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
1426 		  "event_ring_size",
1427 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
1428 		  "event_ring_doorbell_pa",
1429 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
1430 		  "num_pkt_buffers",
1431 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
1432 		  "tx_comp_doorbell_paddr",
1433 		  (void *)ipa_res->rx_ready_doorbell_paddr);
1434 
1435 	return QDF_STATUS_SUCCESS;
1436 }
1437 
1438 /**
1439  * dp_ipa_setup_iface() - Setup IPA header and register interface
1440  * @ifname: Interface name
1441  * @mac_addr: Interface MAC address
1442  * @prod_client: IPA prod client type
1443  * @cons_client: IPA cons client type
1444  * @session_id: Session ID
1445  * @is_ipv6_enabled: Is IPV6 enabled or not
1446  *
1447  * Return: QDF_STATUS
1448  */
1449 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1450 			      qdf_ipa_client_type_t prod_client,
1451 			      qdf_ipa_client_type_t cons_client,
1452 			      uint8_t session_id, bool is_ipv6_enabled)
1453 {
1454 	qdf_ipa_wdi_reg_intf_in_params_t in;
1455 	qdf_ipa_wdi_hdr_info_t hdr_info;
1456 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1457 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1458 	int ret = -EINVAL;
1459 
1460 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1461 		  "%s: Add Partial hdr: %s, %pM",
1462 		  __func__, ifname, mac_addr);
1463 
1464 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1465 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1466 
1467 	/* IPV4 header */
1468 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1469 
1470 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1471 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1472 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1473 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1474 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1475 
1476 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1477 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1478 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1479 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1480 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1481 		htonl(session_id << 16);
1482 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1483 
1484 	/* IPV6 header */
1485 	if (is_ipv6_enabled) {
1486 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1487 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1488 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1489 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1490 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1491 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1492 	}
1493 
1494 	ret = qdf_ipa_wdi_reg_intf(&in);
1495 	if (ret) {
1496 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1497 		       ret);
1498 		return QDF_STATUS_E_FAILURE;
1499 	}
1500 
1501 	return QDF_STATUS_SUCCESS;
1502 }
1503 
1504 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1505 
1506 /**
1507  * dp_ipa_cleanup() - Disconnect IPA pipes
1508  * @tx_pipe_handle: Tx pipe handle
1509  * @rx_pipe_handle: Rx pipe handle
1510  *
1511  * Return: QDF_STATUS
1512  */
1513 QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
1514 {
1515 	int ret;
1516 
1517 	ret = qdf_ipa_wdi_disconn_pipes();
1518 	if (ret) {
1519 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
1520 		       ret);
1521 		return QDF_STATUS_E_FAILURE;
1522 	}
1523 
1524 	return QDF_STATUS_SUCCESS;
1525 }
1526 
1527 /**
1528  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
1529  * @ifname: Interface name
1530  * @is_ipv6_enabled: Is IPV6 enabled or not
1531  *
1532  * Return: QDF_STATUS
1533  */
1534 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
1535 {
1536 	int ret;
1537 
1538 	ret = qdf_ipa_wdi_dereg_intf(ifname);
1539 	if (ret) {
1540 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1541 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
1542 			  __func__, ret);
1543 		return QDF_STATUS_E_FAILURE;
1544 	}
1545 
1546 	return QDF_STATUS_SUCCESS;
1547 }
1548 
1549 /**
1550  * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
1551  * @ppdev - handle to the device instance
1552  *
1553  * Return: QDF_STATUS
1554  */
1555 QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev)
1556 {
1557 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
1558 	struct dp_soc *soc = pdev->soc;
1559 	QDF_STATUS result;
1560 
1561 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
1562 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
1563 
1564 	result = qdf_ipa_wdi_enable_pipes();
1565 	if (result) {
1566 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1567 			  "%s: Enable WDI PIPE fail, code %d",
1568 			  __func__, result);
1569 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1570 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1571 		return QDF_STATUS_E_FAILURE;
1572 	}
1573 
1574 	return QDF_STATUS_SUCCESS;
1575 }
1576 
1577 /**
1578  * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes
1579  * @ppdev - handle to the device instance
1580  *
1581  * Return: QDF_STATUS
1582  */
1583 QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev)
1584 {
1585 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
1586 	struct dp_soc *soc = pdev->soc;
1587 	QDF_STATUS result;
1588 
1589 	result = qdf_ipa_wdi_disable_pipes();
1590 	if (result)
1591 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1592 			  "%s: Disable WDI PIPE fail, code %d",
1593 			  __func__, result);
1594 
1595 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1596 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1597 
1598 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
1599 }
1600 
1601 /**
1602  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
1603  * @client: Client type
1604  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
1605  *
1606  * Return: QDF_STATUS
1607  */
1608 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
1609 {
1610 	qdf_ipa_wdi_perf_profile_t profile;
1611 	QDF_STATUS result;
1612 
1613 	profile.client = client;
1614 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
1615 
1616 	result = qdf_ipa_wdi_set_perf_profile(&profile);
1617 	if (result) {
1618 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1619 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
1620 			  __func__, result);
1621 		return QDF_STATUS_E_FAILURE;
1622 	}
1623 
1624 	return QDF_STATUS_SUCCESS;
1625 }
1626 
1627 /**
1628  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
1629  * @pdev: pdev
1630  * @vdev: vdev
1631  * @nbuf: skb
1632  *
1633  * Return: nbuf if TX fails and NULL if TX succeeds
1634  */
1635 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
1636 				       struct dp_vdev *vdev,
1637 				       qdf_nbuf_t nbuf)
1638 {
1639 	struct dp_peer *vdev_peer;
1640 	uint16_t len;
1641 
1642 	vdev_peer = vdev->vap_bss_peer;
1643 	if (qdf_unlikely(!vdev_peer))
1644 		return nbuf;
1645 
1646 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1647 	len = qdf_nbuf_len(nbuf);
1648 
1649 	if (dp_tx_send(dp_vdev_to_cdp_vdev(vdev), nbuf)) {
1650 		DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
1651 		return nbuf;
1652 	}
1653 
1654 	DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
1655 	return NULL;
1656 }
1657 
1658 bool dp_ipa_rx_intrabss_fwd(struct cdp_vdev *pvdev, qdf_nbuf_t nbuf,
1659 			    bool *fwd_success)
1660 {
1661 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
1662 	struct dp_pdev *pdev;
1663 	struct dp_peer *da_peer;
1664 	struct dp_peer *sa_peer;
1665 	qdf_nbuf_t nbuf_copy;
1666 	uint8_t da_is_bcmc;
1667 	struct ethhdr *eh;
1668 	uint8_t local_id;
1669 
1670 	*fwd_success = false; /* set default as failure */
1671 
1672 	/*
1673 	 * WDI 3.0 skb->cb[] info from IPA driver
1674 	 * skb->cb[0] = vdev_id
1675 	 * skb->cb[1].bit#1 = da_is_bcmc
1676 	 */
1677 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
1678 
1679 	if (qdf_unlikely(!vdev))
1680 		return false;
1681 
1682 	pdev = vdev->pdev;
1683 	if (qdf_unlikely(!pdev))
1684 		return false;
1685 
1686 	/* no fwd for station mode and just pass up to stack */
1687 	if (vdev->opmode == wlan_op_mode_sta)
1688 		return false;
1689 
1690 	if (da_is_bcmc) {
1691 		nbuf_copy = qdf_nbuf_copy(nbuf);
1692 		if (!nbuf_copy)
1693 			return false;
1694 
1695 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
1696 			qdf_nbuf_free(nbuf_copy);
1697 		else
1698 			*fwd_success = true;
1699 
1700 		/* return false to pass original pkt up to stack */
1701 		return false;
1702 	}
1703 
1704 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
1705 
1706 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
1707 		return false;
1708 
1709 	da_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_dest,
1710 				       &local_id);
1711 	if (!da_peer)
1712 		return false;
1713 
1714 	if (da_peer->vdev != vdev)
1715 		return false;
1716 
1717 	sa_peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, eh->h_source,
1718 				       &local_id);
1719 	if (!sa_peer)
1720 		return false;
1721 
1722 	if (sa_peer->vdev != vdev)
1723 		return false;
1724 
1725 	/*
1726 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
1727 	 * Need to add skb to internal tracking table to avoid nbuf memory
1728 	 * leak check for unallocated skb.
1729 	 */
1730 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
1731 
1732 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
1733 		qdf_nbuf_free(nbuf);
1734 	else
1735 		*fwd_success = true;
1736 
1737 	return true;
1738 }
1739 
1740 #ifdef MDM_PLATFORM
1741 bool dp_ipa_is_mdm_platform(void)
1742 {
1743 	return true;
1744 }
1745 #else
1746 bool dp_ipa_is_mdm_platform(void)
1747 {
1748 	return false;
1749 }
1750 #endif
1751 
1752 /**
1753  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
1754  * @soc: soc
1755  * @nbuf: skb
1756  *
1757  * Return: nbuf if success and otherwise NULL
1758  */
1759 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
1760 {
1761 	uint8_t *rx_pkt_tlvs;
1762 
1763 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1764 		return nbuf;
1765 
1766 	/* WLAN IPA is run-time disabled */
1767 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
1768 		return nbuf;
1769 
1770 	/* Linearize the skb since IPA assumes linear buffer */
1771 	if (qdf_likely(qdf_nbuf_is_frag(nbuf))) {
1772 		if (qdf_nbuf_linearize(nbuf)) {
1773 			dp_err_rl("nbuf linearize failed");
1774 			return NULL;
1775 		}
1776 	}
1777 
1778 	rx_pkt_tlvs = qdf_mem_malloc(RX_PKT_TLVS_LEN);
1779 	if (!rx_pkt_tlvs) {
1780 		dp_err_rl("rx_pkt_tlvs alloc failed");
1781 		return NULL;
1782 	}
1783 
1784 	qdf_mem_copy(rx_pkt_tlvs, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN);
1785 
1786 	/* Pad L3_HEADER_PADDING before ethhdr and after rx_pkt_tlvs */
1787 	qdf_nbuf_push_head(nbuf, L3_HEADER_PADDING);
1788 
1789 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_pkt_tlvs, RX_PKT_TLVS_LEN);
1790 
1791 	/* L3_HEADDING_PADDING is not accounted for real skb length */
1792 	qdf_nbuf_set_len(nbuf, qdf_nbuf_len(nbuf) - L3_HEADER_PADDING);
1793 
1794 	qdf_mem_free(rx_pkt_tlvs);
1795 
1796 	return nbuf;
1797 }
1798 
1799 #endif
1800