xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hif.h>
25 #include <htt.h>
26 #include <wdi_event.h>
27 #include <queue.h>
28 #include "dp_types.h"
29 #include "dp_htt.h"
30 #include "dp_tx.h"
31 #include "dp_rx.h"
32 #include "dp_ipa.h"
33 
34 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
35 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
36 
37 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
38  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
39  * This causes back pressure, resulting in a FW crash.
40  * By leaving some entries with no buffer attached, WBM will be able to write
41  * to the ring, and from dumps we can figure out the buffer which is causing
42  * this issue.
43  */
44 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
45 /**
46  *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
47  * @ix0_reg: reo destination ring IX0 value
48  * @ix2_reg: reo destination ring IX2 value
49  * @ix3_reg: reo destination ring IX3 value
50  */
51 struct dp_ipa_reo_remap_record {
52 	uint64_t timestamp;
53 	uint32_t ix0_reg;
54 	uint32_t ix2_reg;
55 	uint32_t ix3_reg;
56 };
57 
58 #define REO_REMAP_HISTORY_SIZE 32
59 
60 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
61 
62 static qdf_atomic_t dp_ipa_reo_remap_history_index;
63 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
64 {
65 	int next = qdf_atomic_inc_return(index);
66 
67 	if (next == REO_REMAP_HISTORY_SIZE)
68 		qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
69 
70 	return next % REO_REMAP_HISTORY_SIZE;
71 }
72 
73 /**
74  * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
75  * @ix0_val: reo destination ring IX0 value
76  * @ix2_val: reo destination ring IX2 value
77  * @ix3_val: reo destination ring IX3 value
78  *
79  * Return: None
80  */
81 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
82 					 uint32_t ix3_val)
83 {
84 	int idx = dp_ipa_reo_remap_record_index_next(
85 				&dp_ipa_reo_remap_history_index);
86 	struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
87 
88 	record->timestamp = qdf_get_log_timestamp();
89 	record->ix0_reg = ix0_val;
90 	record->ix2_reg = ix2_val;
91 	record->ix3_reg = ix3_val;
92 }
93 
94 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
95 						   qdf_nbuf_t nbuf,
96 						   uint32_t size,
97 						   bool create)
98 {
99 	qdf_mem_info_t mem_map_table = {0};
100 
101 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
102 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
103 				 size);
104 
105 	if (create)
106 		qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
107 	else
108 		qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
109 
110 	return QDF_STATUS_SUCCESS;
111 }
112 
113 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
114 					     qdf_nbuf_t nbuf,
115 					     uint32_t size,
116 					     bool create)
117 {
118 	struct dp_pdev *pdev;
119 	int i;
120 
121 	for (i = 0; i < soc->pdev_count; i++) {
122 		pdev = soc->pdev_list[i];
123 		if (pdev && pdev->monitor_configured)
124 			return QDF_STATUS_SUCCESS;
125 	}
126 
127 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
128 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
129 		return QDF_STATUS_SUCCESS;
130 
131 	/**
132 	 * Even if ipa pipes is disabled, but if it's unmap
133 	 * operation and nbuf has done ipa smmu map before,
134 	 * do ipa smmu unmap as well.
135 	 */
136 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
137 		if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
138 			DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
139 		} else {
140 			return QDF_STATUS_SUCCESS;
141 		}
142 	}
143 
144 	if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
145 		if (create) {
146 			DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
147 		} else {
148 			DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
149 		}
150 		return QDF_STATUS_E_INVAL;
151 	}
152 
153 	qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
154 
155 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
156 }
157 
158 #ifdef RX_DESC_MULTI_PAGE_ALLOC
159 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
160 							 struct dp_pdev *pdev,
161 							 bool create)
162 {
163 	struct rx_desc_pool *rx_pool;
164 	uint8_t pdev_id;
165 	uint32_t num_desc, page_id, offset, i;
166 	uint16_t num_desc_per_page;
167 	union dp_rx_desc_list_elem_t *rx_desc_elem;
168 	struct dp_rx_desc *rx_desc;
169 	qdf_nbuf_t nbuf;
170 
171 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
172 		return QDF_STATUS_SUCCESS;
173 
174 	pdev_id = pdev->pdev_id;
175 	rx_pool = &soc->rx_desc_buf[pdev_id];
176 
177 	qdf_spin_lock_bh(&rx_pool->lock);
178 	num_desc = rx_pool->pool_size;
179 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
180 	for (i = 0; i < num_desc; i++) {
181 		page_id = i / num_desc_per_page;
182 		offset = i % num_desc_per_page;
183 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
184 			break;
185 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
186 		rx_desc = &rx_desc_elem->rx_desc;
187 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
188 			continue;
189 		nbuf = rx_desc->nbuf;
190 
191 		if (qdf_unlikely(create ==
192 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
193 			if (create) {
194 				DP_STATS_INC(soc,
195 					     rx.err.ipa_smmu_map_dup, 1);
196 			} else {
197 				DP_STATS_INC(soc,
198 					     rx.err.ipa_smmu_unmap_dup, 1);
199 			}
200 			continue;
201 		}
202 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
203 
204 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
205 						 rx_pool->buf_size, create);
206 	}
207 	qdf_spin_unlock_bh(&rx_pool->lock);
208 
209 	return QDF_STATUS_SUCCESS;
210 }
211 #else
212 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
213 							 struct dp_pdev *pdev,
214 							 bool create)
215 {
216 	struct rx_desc_pool *rx_pool;
217 	uint8_t pdev_id;
218 	qdf_nbuf_t nbuf;
219 	int i;
220 
221 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
222 		return QDF_STATUS_SUCCESS;
223 
224 	pdev_id = pdev->pdev_id;
225 	rx_pool = &soc->rx_desc_buf[pdev_id];
226 
227 	qdf_spin_lock_bh(&rx_pool->lock);
228 	for (i = 0; i < rx_pool->pool_size; i++) {
229 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
230 		    rx_pool->array[i].rx_desc.unmapped)
231 			continue;
232 
233 		nbuf = rx_pool->array[i].rx_desc.nbuf;
234 
235 		if (qdf_unlikely(create ==
236 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
237 			if (create) {
238 				DP_STATS_INC(soc,
239 					     rx.err.ipa_smmu_map_dup, 1);
240 			} else {
241 				DP_STATS_INC(soc,
242 					     rx.err.ipa_smmu_unmap_dup, 1);
243 			}
244 			continue;
245 		}
246 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
247 
248 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
249 						 rx_pool->buf_size, create);
250 	}
251 	qdf_spin_unlock_bh(&rx_pool->lock);
252 
253 	return QDF_STATUS_SUCCESS;
254 }
255 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
256 
257 /**
258  * dp_tx_ipa_uc_detach - Free autonomy TX resources
259  * @soc: data path instance
260  * @pdev: core txrx pdev context
261  *
262  * Free allocated TX buffers with WBM SRNG
263  *
264  * Return: none
265  */
266 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
267 {
268 	int idx;
269 	qdf_nbuf_t nbuf;
270 	struct dp_ipa_resources *ipa_res;
271 
272 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
273 		nbuf = (qdf_nbuf_t)
274 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
275 		if (!nbuf)
276 			continue;
277 
278 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
279 			__dp_ipa_handle_buf_smmu_mapping(
280 					soc, nbuf,
281 					skb_end_pointer(nbuf) - nbuf->data,
282 					false);
283 
284 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
285 		qdf_nbuf_free(nbuf);
286 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
287 						(void *)NULL;
288 	}
289 
290 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
291 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
292 
293 	ipa_res = &pdev->ipa_resource;
294 	iounmap(ipa_res->tx_comp_doorbell_vaddr);
295 
296 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
297 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
298 }
299 
300 /**
301  * dp_rx_ipa_uc_detach - free autonomy RX resources
302  * @soc: data path instance
303  * @pdev: core txrx pdev context
304  *
305  * This function will detach DP RX into main device context
306  * will free DP Rx resources.
307  *
308  * Return: none
309  */
310 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
311 {
312 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
313 
314 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
315 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
316 }
317 
318 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
319 {
320 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
321 		return QDF_STATUS_SUCCESS;
322 
323 	/* TX resource detach */
324 	dp_tx_ipa_uc_detach(soc, pdev);
325 
326 	/* RX resource detach */
327 	dp_rx_ipa_uc_detach(soc, pdev);
328 
329 	return QDF_STATUS_SUCCESS;	/* success */
330 }
331 
332 /**
333  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
334  * @soc: data path instance
335  * @pdev: Physical device handle
336  *
337  * Allocate TX buffer from non-cacheable memory
338  * Attache allocated TX buffers with WBM SRNG
339  *
340  * Return: int
341  */
342 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
343 {
344 	uint32_t tx_buffer_count;
345 	uint32_t ring_base_align = 8;
346 	qdf_dma_addr_t buffer_paddr;
347 	struct hal_srng *wbm_srng = (struct hal_srng *)
348 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
349 	struct hal_srng_params srng_params;
350 	uint32_t paddr_lo;
351 	uint32_t paddr_hi;
352 	void *ring_entry;
353 	int num_entries;
354 	qdf_nbuf_t nbuf;
355 	int retval = QDF_STATUS_SUCCESS;
356 	int max_alloc_count = 0;
357 
358 	/*
359 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
360 	 * unsigned int uc_tx_buf_sz =
361 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
362 	 */
363 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
364 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
365 
366 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
367 			    &srng_params);
368 	num_entries = srng_params.num_entries;
369 
370 	max_alloc_count =
371 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
372 	if (max_alloc_count <= 0) {
373 		dp_err("incorrect value for buffer count %u", max_alloc_count);
374 		return -EINVAL;
375 	}
376 
377 	dp_info("requested %d buffers to be posted to wbm ring",
378 		max_alloc_count);
379 
380 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
381 		qdf_mem_malloc(num_entries *
382 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
383 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
384 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
385 		return -ENOMEM;
386 	}
387 
388 	hal_srng_access_start_unlocked(soc->hal_soc,
389 				       hal_srng_to_hal_ring_handle(wbm_srng));
390 
391 	/*
392 	 * Allocate Tx buffers as many as possible.
393 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
394 	 * Populate Tx buffers into WBM2IPA ring
395 	 * This initial buffer population will simulate H/W as source ring,
396 	 * and update HP
397 	 */
398 	for (tx_buffer_count = 0;
399 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
400 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
401 		if (!nbuf)
402 			break;
403 
404 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
405 				hal_srng_to_hal_ring_handle(wbm_srng));
406 		if (!ring_entry) {
407 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
408 				  "%s: Failed to get WBM ring entry",
409 				  __func__);
410 			qdf_nbuf_free(nbuf);
411 			break;
412 		}
413 
414 		qdf_nbuf_map_single(soc->osdev, nbuf,
415 				    QDF_DMA_BIDIRECTIONAL);
416 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
417 
418 		paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
419 		paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
420 		HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
421 		HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
422 		HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
423 				      HAL_WBM_SW0_BM_ID));
424 
425 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
426 			= (void *)nbuf;
427 
428 		if (qdf_mem_smmu_s1_enabled(soc->osdev))
429 			__dp_ipa_handle_buf_smmu_mapping(
430 					soc, nbuf,
431 					skb_end_pointer(nbuf) - nbuf->data,
432 					true);
433 	}
434 
435 	hal_srng_access_end_unlocked(soc->hal_soc,
436 				     hal_srng_to_hal_ring_handle(wbm_srng));
437 
438 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
439 
440 	if (tx_buffer_count) {
441 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
442 	} else {
443 		dp_err("No IPA WDI TX buffer allocated!");
444 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
445 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
446 		retval = -ENOMEM;
447 	}
448 
449 	return retval;
450 }
451 
452 /**
453  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
454  * @soc: data path instance
455  * @pdev: core txrx pdev context
456  *
457  * This function will attach a DP RX instance into the main
458  * device (SOC) context.
459  *
460  * Return: QDF_STATUS_SUCCESS: success
461  *         QDF_STATUS_E_RESOURCES: Error return
462  */
463 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
464 {
465 	return QDF_STATUS_SUCCESS;
466 }
467 
468 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
469 {
470 	int error;
471 
472 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
473 		return QDF_STATUS_SUCCESS;
474 
475 	/* TX resource attach */
476 	error = dp_tx_ipa_uc_attach(soc, pdev);
477 	if (error) {
478 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
479 			  "%s: DP IPA UC TX attach fail code %d",
480 			  __func__, error);
481 		return error;
482 	}
483 
484 	/* RX resource attach */
485 	error = dp_rx_ipa_uc_attach(soc, pdev);
486 	if (error) {
487 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
488 			  "%s: DP IPA UC RX attach fail code %d",
489 			  __func__, error);
490 		dp_tx_ipa_uc_detach(soc, pdev);
491 		return error;
492 	}
493 
494 	return QDF_STATUS_SUCCESS;	/* success */
495 }
496 
497 /*
498  * dp_ipa_ring_resource_setup() - setup IPA ring resources
499  * @soc: data path SoC handle
500  *
501  * Return: none
502  */
503 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
504 		struct dp_pdev *pdev)
505 {
506 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
507 	struct hal_srng *hal_srng;
508 	struct hal_srng_params srng_params;
509 	qdf_dma_addr_t hp_addr;
510 	unsigned long addr_offset, dev_base_paddr;
511 	uint32_t ix0;
512 
513 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
514 		return QDF_STATUS_SUCCESS;
515 
516 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
517 	hal_srng = (struct hal_srng *)
518 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
519 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
520 			    hal_srng_to_hal_ring_handle(hal_srng),
521 			    &srng_params);
522 
523 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
524 		srng_params.ring_base_paddr;
525 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
526 		srng_params.ring_base_vaddr;
527 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
528 		(srng_params.num_entries * srng_params.entry_size) << 2;
529 	/*
530 	 * For the register backed memory addresses, use the scn->mem_pa to
531 	 * calculate the physical address of the shadow registers
532 	 */
533 	dev_base_paddr =
534 		(unsigned long)
535 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
536 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
537 		      (unsigned long)(hal_soc->dev_base_addr);
538 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
539 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
540 
541 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
542 		(unsigned int)addr_offset,
543 		(unsigned int)dev_base_paddr,
544 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
545 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
546 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
547 		srng_params.num_entries,
548 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
549 
550 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
551 	hal_srng = (struct hal_srng *)
552 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
553 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
554 			    hal_srng_to_hal_ring_handle(hal_srng),
555 			    &srng_params);
556 
557 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
558 						srng_params.ring_base_paddr;
559 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
560 						srng_params.ring_base_vaddr;
561 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
562 		(srng_params.num_entries * srng_params.entry_size) << 2;
563 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
564 		      (unsigned long)(hal_soc->dev_base_addr);
565 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
566 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
567 
568 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
569 		(unsigned int)addr_offset,
570 		(unsigned int)dev_base_paddr,
571 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
572 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
573 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
574 		srng_params.num_entries,
575 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
576 
577 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
578 	hal_srng = (struct hal_srng *)
579 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
580 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
581 			    hal_srng_to_hal_ring_handle(hal_srng),
582 			    &srng_params);
583 
584 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
585 						srng_params.ring_base_paddr;
586 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
587 						srng_params.ring_base_vaddr;
588 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
589 		(srng_params.num_entries * srng_params.entry_size) << 2;
590 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
591 		      (unsigned long)(hal_soc->dev_base_addr);
592 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
593 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
594 
595 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
596 		(unsigned int)addr_offset,
597 		(unsigned int)dev_base_paddr,
598 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
599 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
600 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
601 		srng_params.num_entries,
602 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
603 
604 	hal_srng = (struct hal_srng *)
605 			pdev->rx_refill_buf_ring2.hal_srng;
606 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
607 			    hal_srng_to_hal_ring_handle(hal_srng),
608 			    &srng_params);
609 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
610 		srng_params.ring_base_paddr;
611 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
612 		srng_params.ring_base_vaddr;
613 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
614 		(srng_params.num_entries * srng_params.entry_size) << 2;
615 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
616 				       hal_srng_to_hal_ring_handle(hal_srng));
617 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
618 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
619 
620 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
621 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
622 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
623 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
624 		srng_params.num_entries,
625 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
626 
627 	/*
628 	 * Set DEST_RING_MAPPING_4 to SW2 as default value for
629 	 * DESTINATION_RING_CTRL_IX_0.
630 	 */
631 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
632 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
633 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
634 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
635 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
636 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
637 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
638 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
639 
640 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
641 
642 	return 0;
643 }
644 
645 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
646 					     qdf_shared_mem_t *shared_mem,
647 					     void *cpu_addr,
648 					     qdf_dma_addr_t dma_addr,
649 					     uint32_t size)
650 {
651 	qdf_dma_addr_t paddr;
652 	int ret;
653 
654 	shared_mem->vaddr = cpu_addr;
655 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
656 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
657 
658 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
659 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
660 
661 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
662 				      shared_mem->vaddr, dma_addr, size);
663 	if (ret) {
664 		dp_err("Unable to get DMA sgtable");
665 		return QDF_STATUS_E_NOMEM;
666 	}
667 
668 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
669 
670 	return QDF_STATUS_SUCCESS;
671 }
672 
673 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
674 {
675 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
676 	struct dp_pdev *pdev =
677 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
678 	struct dp_ipa_resources *ipa_res;
679 
680 	if (!pdev) {
681 		dp_err("%s invalid instance", __func__);
682 		return QDF_STATUS_E_FAILURE;
683 	}
684 
685 	ipa_res = &pdev->ipa_resource;
686 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
687 		return QDF_STATUS_SUCCESS;
688 
689 	ipa_res->tx_num_alloc_buffer =
690 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
691 
692 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
693 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
694 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
695 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
696 
697 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
698 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
699 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
700 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
701 
702 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
703 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
704 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
705 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
706 
707 	dp_ipa_get_shared_mem_info(
708 			soc->osdev, &ipa_res->rx_refill_ring,
709 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
710 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
711 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
712 
713 	if (!qdf_mem_get_dma_addr(soc->osdev,
714 				  &ipa_res->tx_comp_ring.mem_info) ||
715 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info))
716 		return QDF_STATUS_E_FAILURE;
717 
718 	return QDF_STATUS_SUCCESS;
719 }
720 
721 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
722 {
723 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
724 	struct dp_pdev *pdev =
725 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
726 	struct dp_ipa_resources *ipa_res;
727 	struct hal_srng *wbm_srng = (struct hal_srng *)
728 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
729 	struct hal_srng *reo_srng = (struct hal_srng *)
730 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
731 	uint32_t tx_comp_doorbell_dmaaddr;
732 	uint32_t rx_ready_doorbell_dmaaddr;
733 
734 	if (!pdev) {
735 		dp_err("%s invalid instance", __func__);
736 		return QDF_STATUS_E_FAILURE;
737 	}
738 
739 	ipa_res = &pdev->ipa_resource;
740 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
741 		return QDF_STATUS_SUCCESS;
742 
743 	ipa_res->tx_comp_doorbell_vaddr =
744 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
745 
746 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
747 		pld_smmu_map(soc->osdev->dev, ipa_res->tx_comp_doorbell_paddr,
748 			     &tx_comp_doorbell_dmaaddr, sizeof(uint32_t));
749 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
750 
751 		pld_smmu_map(soc->osdev->dev, ipa_res->rx_ready_doorbell_paddr,
752 			     &rx_ready_doorbell_dmaaddr, sizeof(uint32_t));
753 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
754 	}
755 
756 	hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
757 
758 	dp_info("paddr %pK vaddr %pK",
759 		(void *)ipa_res->tx_comp_doorbell_paddr,
760 		(void *)ipa_res->tx_comp_doorbell_vaddr);
761 
762 	/*
763 	 * For RX, REO module on Napier/Hastings does reordering on incoming
764 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
765 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
766 	 * to IPA.
767 	 * Set the doorbell addr for the REO ring.
768 	 */
769 	hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
770 	return QDF_STATUS_SUCCESS;
771 }
772 
773 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
774 			      uint8_t *op_msg)
775 {
776 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
777 	struct dp_pdev *pdev =
778 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
779 
780 	if (!pdev) {
781 		dp_err("%s invalid instance", __func__);
782 		return QDF_STATUS_E_FAILURE;
783 	}
784 
785 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
786 		return QDF_STATUS_SUCCESS;
787 
788 	if (pdev->ipa_uc_op_cb) {
789 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
790 	} else {
791 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
792 		    "%s: IPA callback function is not registered", __func__);
793 		qdf_mem_free(op_msg);
794 		return QDF_STATUS_E_FAILURE;
795 	}
796 
797 	return QDF_STATUS_SUCCESS;
798 }
799 
800 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
801 				 ipa_uc_op_cb_type op_cb,
802 				 void *usr_ctxt)
803 {
804 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
805 	struct dp_pdev *pdev =
806 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
807 
808 	if (!pdev) {
809 		dp_err("%s invalid instance", __func__);
810 		return QDF_STATUS_E_FAILURE;
811 	}
812 
813 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
814 		return QDF_STATUS_SUCCESS;
815 
816 	pdev->ipa_uc_op_cb = op_cb;
817 	pdev->usr_ctxt = usr_ctxt;
818 
819 	return QDF_STATUS_SUCCESS;
820 }
821 
822 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
823 {
824 	/* TBD */
825 	return QDF_STATUS_SUCCESS;
826 }
827 
828 /**
829  * dp_tx_send_ipa_data_frame() - send IPA data frame
830  * @soc_hdl: datapath soc handle
831  * @vdev_id: id of the virtual device
832  * @skb: skb to transmit
833  *
834  * Return: skb/ NULL is for success
835  */
836 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
837 				     qdf_nbuf_t skb)
838 {
839 	qdf_nbuf_t ret;
840 
841 	/* Terminate the (single-element) list of tx frames */
842 	qdf_nbuf_set_next(skb, NULL);
843 	ret = dp_tx_send(soc_hdl, vdev_id, skb);
844 	if (ret) {
845 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
846 			  "%s: Failed to tx", __func__);
847 		return ret;
848 	}
849 
850 	return NULL;
851 }
852 
853 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
854 {
855 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
856 	struct dp_pdev *pdev =
857 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
858 	uint32_t ix0;
859 	uint32_t ix2;
860 
861 	if (!pdev) {
862 		dp_err("%s invalid instance", __func__);
863 		return QDF_STATUS_E_FAILURE;
864 	}
865 
866 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
867 		return QDF_STATUS_SUCCESS;
868 
869 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
870 		return QDF_STATUS_E_AGAIN;
871 
872 	/* Call HAL API to remap REO rings to REO2IPA ring */
873 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
874 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) |
875 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 2) |
876 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) |
877 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) |
878 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
879 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
880 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
881 
882 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
883 		ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) |
884 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) |
885 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
886 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
887 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) |
888 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
889 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) |
890 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
891 
892 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
893 					   &ix2, &ix2);
894 		dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
895 	} else {
896 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
897 					   NULL, NULL);
898 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
899 	}
900 
901 	return QDF_STATUS_SUCCESS;
902 }
903 
904 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
905 {
906 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
907 	struct dp_pdev *pdev =
908 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
909 	uint32_t ix0;
910 	uint32_t ix2;
911 	uint32_t ix3;
912 
913 	if (!pdev) {
914 		dp_err("%s invalid instance", __func__);
915 		return QDF_STATUS_E_FAILURE;
916 	}
917 
918 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
919 		return QDF_STATUS_SUCCESS;
920 
921 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
922 		return QDF_STATUS_E_AGAIN;
923 
924 	/* Call HAL API to remap REO rings to REO2IPA ring */
925 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
926 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
927 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
928 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
929 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
930 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
931 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
932 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
933 
934 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
935 		dp_reo_remap_config(soc, &ix2, &ix3);
936 
937 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
938 					   &ix2, &ix3);
939 		dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
940 	} else {
941 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
942 					   NULL, NULL);
943 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
944 	}
945 
946 	return QDF_STATUS_SUCCESS;
947 }
948 
949 /* This should be configurable per H/W configuration enable status */
950 #define L3_HEADER_PADDING	2
951 
952 #ifdef CONFIG_IPA_WDI_UNIFIED_API
953 
954 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
955 static inline void dp_setup_mcc_sys_pipes(
956 		qdf_ipa_sys_connect_params_t *sys_in,
957 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
958 {
959 	/* Setup MCC sys pipe */
960 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
961 			DP_IPA_MAX_IFACE;
962 	for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
963 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
964 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
965 }
966 #else
967 static inline void dp_setup_mcc_sys_pipes(
968 		qdf_ipa_sys_connect_params_t *sys_in,
969 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
970 {
971 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
972 }
973 #endif
974 
975 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
976 				 struct dp_ipa_resources *ipa_res,
977 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
978 				 bool over_gsi)
979 {
980 	struct tcl_data_cmd *tcl_desc_ptr;
981 	uint8_t *desc_addr;
982 	uint32_t desc_size;
983 
984 	if (over_gsi)
985 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
986 	else
987 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
988 
989 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
990 		qdf_mem_get_dma_addr(soc->osdev,
991 				     &ipa_res->tx_comp_ring.mem_info);
992 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
993 		qdf_mem_get_dma_size(soc->osdev,
994 				     &ipa_res->tx_comp_ring.mem_info);
995 
996 	/* WBM Tail Pointer Address */
997 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
998 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
999 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
1000 
1001 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1002 		qdf_mem_get_dma_addr(soc->osdev,
1003 				     &ipa_res->tx_ring.mem_info);
1004 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
1005 		qdf_mem_get_dma_size(soc->osdev,
1006 				     &ipa_res->tx_ring.mem_info);
1007 
1008 	/* TCL Head Pointer Address */
1009 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1010 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1011 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
1012 
1013 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1014 		ipa_res->tx_num_alloc_buffer;
1015 
1016 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1017 
1018 	/* Preprogram TCL descriptor */
1019 	desc_addr =
1020 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1021 	desc_size = sizeof(struct tcl_data_cmd);
1022 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1023 	tcl_desc_ptr = (struct tcl_data_cmd *)
1024 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1025 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1026 		HAL_RX_BUF_RBM_SW2_BM;
1027 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1028 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1029 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1030 }
1031 
1032 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
1033 				 struct dp_ipa_resources *ipa_res,
1034 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
1035 				 bool over_gsi)
1036 {
1037 	if (over_gsi)
1038 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1039 					IPA_CLIENT_WLAN2_PROD;
1040 	else
1041 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1042 					IPA_CLIENT_WLAN1_PROD;
1043 
1044 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1045 		qdf_mem_get_dma_addr(soc->osdev,
1046 				     &ipa_res->rx_rdy_ring.mem_info);
1047 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1048 		qdf_mem_get_dma_size(soc->osdev,
1049 				     &ipa_res->rx_rdy_ring.mem_info);
1050 
1051 	/* REO Tail Pointer Address */
1052 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1053 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1054 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
1055 
1056 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1057 		qdf_mem_get_dma_addr(soc->osdev,
1058 				     &ipa_res->rx_refill_ring.mem_info);
1059 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1060 		qdf_mem_get_dma_size(soc->osdev,
1061 				     &ipa_res->rx_refill_ring.mem_info);
1062 
1063 	/* FW Head Pointer Address */
1064 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1065 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1066 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
1067 
1068 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
1069 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1070 }
1071 
1072 static void
1073 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
1074 			  struct dp_ipa_resources *ipa_res,
1075 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
1076 			  bool over_gsi)
1077 {
1078 	struct tcl_data_cmd *tcl_desc_ptr;
1079 	uint8_t *desc_addr;
1080 	uint32_t desc_size;
1081 
1082 	if (over_gsi)
1083 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1084 			IPA_CLIENT_WLAN2_CONS;
1085 	else
1086 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1087 			IPA_CLIENT_WLAN1_CONS;
1088 
1089 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
1090 		     &ipa_res->tx_comp_ring.sgtable,
1091 		     sizeof(sgtable_t));
1092 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
1093 		qdf_mem_get_dma_size(soc->osdev,
1094 				     &ipa_res->tx_comp_ring.mem_info);
1095 	/* WBM Tail Pointer Address */
1096 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
1097 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1098 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
1099 
1100 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
1101 		     &ipa_res->tx_ring.sgtable,
1102 		     sizeof(sgtable_t));
1103 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
1104 		qdf_mem_get_dma_size(soc->osdev,
1105 				     &ipa_res->tx_ring.mem_info);
1106 	/* TCL Head Pointer Address */
1107 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
1108 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1109 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
1110 
1111 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
1112 		ipa_res->tx_num_alloc_buffer;
1113 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
1114 
1115 	/* Preprogram TCL descriptor */
1116 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
1117 			tx_smmu);
1118 	desc_size = sizeof(struct tcl_data_cmd);
1119 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1120 	tcl_desc_ptr = (struct tcl_data_cmd *)
1121 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
1122 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1123 		HAL_RX_BUF_RBM_SW2_BM;
1124 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1125 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1126 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1127 }
1128 
1129 static void
1130 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
1131 			  struct dp_ipa_resources *ipa_res,
1132 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
1133 			  bool over_gsi)
1134 {
1135 	if (over_gsi)
1136 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1137 					IPA_CLIENT_WLAN2_PROD;
1138 	else
1139 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1140 					IPA_CLIENT_WLAN1_PROD;
1141 
1142 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
1143 		     &ipa_res->rx_rdy_ring.sgtable,
1144 		     sizeof(sgtable_t));
1145 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
1146 		qdf_mem_get_dma_size(soc->osdev,
1147 				     &ipa_res->rx_rdy_ring.mem_info);
1148 	/* REO Tail Pointer Address */
1149 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
1150 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1151 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
1152 
1153 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
1154 		     &ipa_res->rx_refill_ring.sgtable,
1155 		     sizeof(sgtable_t));
1156 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
1157 		qdf_mem_get_dma_size(soc->osdev,
1158 				     &ipa_res->rx_refill_ring.mem_info);
1159 
1160 	/* FW Head Pointer Address */
1161 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
1162 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1163 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
1164 
1165 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
1166 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1167 }
1168 
1169 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1170 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1171 			void *ipa_wdi_meter_notifier_cb,
1172 			uint32_t ipa_desc_size, void *ipa_priv,
1173 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1174 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
1175 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
1176 {
1177 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1178 	struct dp_pdev *pdev =
1179 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1180 	struct dp_ipa_resources *ipa_res;
1181 	qdf_ipa_ep_cfg_t *tx_cfg;
1182 	qdf_ipa_ep_cfg_t *rx_cfg;
1183 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
1184 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
1185 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
1186 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu;
1187 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1188 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1189 	int ret;
1190 
1191 	if (!pdev) {
1192 		dp_err("%s invalid instance", __func__);
1193 		return QDF_STATUS_E_FAILURE;
1194 	}
1195 
1196 	ipa_res = &pdev->ipa_resource;
1197 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1198 		return QDF_STATUS_SUCCESS;
1199 
1200 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1201 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1202 
1203 	if (is_smmu_enabled)
1204 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
1205 	else
1206 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
1207 
1208 	dp_setup_mcc_sys_pipes(sys_in, &pipe_in);
1209 
1210 	/* TX PIPE */
1211 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1212 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in);
1213 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
1214 	} else {
1215 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1216 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
1217 	}
1218 
1219 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
1220 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1221 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
1222 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
1223 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
1224 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
1225 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
1226 
1227 	/**
1228 	 * Transfer Ring: WBM Ring
1229 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1230 	 * Event Ring: TCL ring
1231 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1232 	 */
1233 	if (is_smmu_enabled)
1234 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
1235 	else
1236 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
1237 
1238 	/* RX PIPE */
1239 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1240 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in);
1241 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
1242 	} else {
1243 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1244 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
1245 	}
1246 
1247 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
1248 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1249 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
1250 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
1251 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
1252 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
1253 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
1254 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
1255 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
1256 
1257 	/**
1258 	 * Transfer Ring: REO Ring
1259 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1260 	 * Event Ring: FW ring
1261 	 * Event Ring Doorbell PA: FW Head Pointer Address
1262 	 */
1263 	if (is_smmu_enabled)
1264 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
1265 	else
1266 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
1267 
1268 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1269 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1270 
1271 	/* Connect WDI IPA PIPEs */
1272 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1273 
1274 	if (ret) {
1275 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1276 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1277 			  __func__, ret);
1278 		return QDF_STATUS_E_FAILURE;
1279 	}
1280 
1281 	/* IPA uC Doorbell registers */
1282 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
1283 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1284 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1285 
1286 	ipa_res->tx_comp_doorbell_paddr =
1287 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1288 	ipa_res->rx_ready_doorbell_paddr =
1289 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1290 
1291 	soc->ipa_first_tx_db_access = true;
1292 
1293 	return QDF_STATUS_SUCCESS;
1294 }
1295 
1296 /**
1297  * dp_ipa_setup_iface() - Setup IPA header and register interface
1298  * @ifname: Interface name
1299  * @mac_addr: Interface MAC address
1300  * @prod_client: IPA prod client type
1301  * @cons_client: IPA cons client type
1302  * @session_id: Session ID
1303  * @is_ipv6_enabled: Is IPV6 enabled or not
1304  *
1305  * Return: QDF_STATUS
1306  */
1307 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1308 			      qdf_ipa_client_type_t prod_client,
1309 			      qdf_ipa_client_type_t cons_client,
1310 			      uint8_t session_id, bool is_ipv6_enabled)
1311 {
1312 	qdf_ipa_wdi_reg_intf_in_params_t in;
1313 	qdf_ipa_wdi_hdr_info_t hdr_info;
1314 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1315 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1316 	int ret = -EINVAL;
1317 
1318 	dp_debug("Add Partial hdr: %s, %pM", ifname, mac_addr);
1319 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1320 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1321 
1322 	/* IPV4 header */
1323 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1324 
1325 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1326 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1327 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1328 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1329 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1330 
1331 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1332 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1333 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1334 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
1335 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1336 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1337 		htonl(session_id << 16);
1338 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1339 
1340 	/* IPV6 header */
1341 	if (is_ipv6_enabled) {
1342 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1343 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1344 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1345 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1346 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1347 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1348 	}
1349 
1350 	dp_debug("registering for session_id: %u", session_id);
1351 
1352 	ret = qdf_ipa_wdi_reg_intf(&in);
1353 
1354 	if (ret) {
1355 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1356 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1357 		    __func__, ret);
1358 		return QDF_STATUS_E_FAILURE;
1359 	}
1360 
1361 	return QDF_STATUS_SUCCESS;
1362 }
1363 
1364 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1365 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1366 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1367 			void *ipa_wdi_meter_notifier_cb,
1368 			uint32_t ipa_desc_size, void *ipa_priv,
1369 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1370 			uint32_t *rx_pipe_handle)
1371 {
1372 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1373 	struct dp_pdev *pdev =
1374 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1375 	struct dp_ipa_resources *ipa_res;
1376 	qdf_ipa_wdi_pipe_setup_info_t *tx;
1377 	qdf_ipa_wdi_pipe_setup_info_t *rx;
1378 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1379 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1380 	struct tcl_data_cmd *tcl_desc_ptr;
1381 	uint8_t *desc_addr;
1382 	uint32_t desc_size;
1383 	int ret;
1384 
1385 	if (!pdev) {
1386 		dp_err("%s invalid instance", __func__);
1387 		return QDF_STATUS_E_FAILURE;
1388 	}
1389 
1390 	ipa_res = &pdev->ipa_resource;
1391 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1392 		return QDF_STATUS_SUCCESS;
1393 
1394 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1395 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1396 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1397 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1398 
1399 	/* TX PIPE */
1400 	/**
1401 	 * Transfer Ring: WBM Ring
1402 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1403 	 * Event Ring: TCL ring
1404 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1405 	 */
1406 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1407 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
1408 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1409 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
1410 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
1411 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
1412 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
1413 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
1414 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1415 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1416 		ipa_res->tx_comp_ring_base_paddr;
1417 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1418 		ipa_res->tx_comp_ring_size;
1419 	/* WBM Tail Pointer Address */
1420 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1421 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1422 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1423 		ipa_res->tx_ring_base_paddr;
1424 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
1425 	/* TCL Head Pointer Address */
1426 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1427 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1428 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1429 		ipa_res->tx_num_alloc_buffer;
1430 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1431 
1432 	/* Preprogram TCL descriptor */
1433 	desc_addr =
1434 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1435 	desc_size = sizeof(struct tcl_data_cmd);
1436 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1437 	tcl_desc_ptr = (struct tcl_data_cmd *)
1438 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1439 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1440 						HAL_RX_BUF_RBM_SW2_BM;
1441 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1442 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1443 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1444 
1445 	/* RX PIPE */
1446 	/**
1447 	 * Transfer Ring: REO Ring
1448 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1449 	 * Event Ring: FW ring
1450 	 * Event Ring Doorbell PA: FW Head Pointer Address
1451 	 */
1452 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1453 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
1454 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1455 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
1456 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
1457 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
1458 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
1459 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
1460 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
1461 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
1462 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
1463 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1464 						ipa_res->rx_rdy_ring_base_paddr;
1465 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1466 						ipa_res->rx_rdy_ring_size;
1467 	/* REO Tail Pointer Address */
1468 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1469 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1470 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1471 					ipa_res->rx_refill_ring_base_paddr;
1472 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1473 						ipa_res->rx_refill_ring_size;
1474 	/* FW Head Pointer Address */
1475 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1476 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1477 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
1478 						L3_HEADER_PADDING;
1479 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1480 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1481 
1482 	/* Connect WDI IPA PIPE */
1483 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1484 	if (ret) {
1485 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1486 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1487 			  __func__, ret);
1488 		return QDF_STATUS_E_FAILURE;
1489 	}
1490 
1491 	/* IPA uC Doorbell registers */
1492 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1493 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
1494 		  __func__,
1495 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1496 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1497 
1498 	ipa_res->tx_comp_doorbell_paddr =
1499 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1500 	ipa_res->tx_comp_doorbell_vaddr =
1501 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
1502 	ipa_res->rx_ready_doorbell_paddr =
1503 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1504 
1505 	soc->ipa_first_tx_db_access = true;
1506 
1507 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1508 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1509 		  __func__,
1510 		  "transfer_ring_base_pa",
1511 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
1512 		  "transfer_ring_size",
1513 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
1514 		  "transfer_ring_doorbell_pa",
1515 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
1516 		  "event_ring_base_pa",
1517 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
1518 		  "event_ring_size",
1519 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
1520 		  "event_ring_doorbell_pa",
1521 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
1522 		  "num_pkt_buffers",
1523 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
1524 		  "tx_comp_doorbell_paddr",
1525 		  (void *)ipa_res->tx_comp_doorbell_paddr);
1526 
1527 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1528 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1529 		  __func__,
1530 		  "transfer_ring_base_pa",
1531 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
1532 		  "transfer_ring_size",
1533 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
1534 		  "transfer_ring_doorbell_pa",
1535 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
1536 		  "event_ring_base_pa",
1537 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
1538 		  "event_ring_size",
1539 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
1540 		  "event_ring_doorbell_pa",
1541 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
1542 		  "num_pkt_buffers",
1543 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
1544 		  "tx_comp_doorbell_paddr",
1545 		  (void *)ipa_res->rx_ready_doorbell_paddr);
1546 
1547 	return QDF_STATUS_SUCCESS;
1548 }
1549 
1550 /**
1551  * dp_ipa_setup_iface() - Setup IPA header and register interface
1552  * @ifname: Interface name
1553  * @mac_addr: Interface MAC address
1554  * @prod_client: IPA prod client type
1555  * @cons_client: IPA cons client type
1556  * @session_id: Session ID
1557  * @is_ipv6_enabled: Is IPV6 enabled or not
1558  *
1559  * Return: QDF_STATUS
1560  */
1561 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1562 			      qdf_ipa_client_type_t prod_client,
1563 			      qdf_ipa_client_type_t cons_client,
1564 			      uint8_t session_id, bool is_ipv6_enabled)
1565 {
1566 	qdf_ipa_wdi_reg_intf_in_params_t in;
1567 	qdf_ipa_wdi_hdr_info_t hdr_info;
1568 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1569 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1570 	int ret = -EINVAL;
1571 
1572 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1573 		  "%s: Add Partial hdr: %s, %pM",
1574 		  __func__, ifname, mac_addr);
1575 
1576 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1577 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1578 
1579 	/* IPV4 header */
1580 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1581 
1582 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1583 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1584 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1585 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1586 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1587 
1588 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1589 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1590 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1591 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1592 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1593 		htonl(session_id << 16);
1594 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1595 
1596 	/* IPV6 header */
1597 	if (is_ipv6_enabled) {
1598 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1599 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1600 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1601 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1602 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1603 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1604 	}
1605 
1606 	ret = qdf_ipa_wdi_reg_intf(&in);
1607 	if (ret) {
1608 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1609 		       ret);
1610 		return QDF_STATUS_E_FAILURE;
1611 	}
1612 
1613 	return QDF_STATUS_SUCCESS;
1614 }
1615 
1616 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1617 
1618 /**
1619  * dp_ipa_cleanup() - Disconnect IPA pipes
1620  * @tx_pipe_handle: Tx pipe handle
1621  * @rx_pipe_handle: Rx pipe handle
1622  *
1623  * Return: QDF_STATUS
1624  */
1625 QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
1626 {
1627 	int ret;
1628 
1629 	ret = qdf_ipa_wdi_disconn_pipes();
1630 	if (ret) {
1631 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
1632 		       ret);
1633 		return QDF_STATUS_E_FAILURE;
1634 	}
1635 
1636 	return QDF_STATUS_SUCCESS;
1637 }
1638 
1639 /**
1640  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
1641  * @ifname: Interface name
1642  * @is_ipv6_enabled: Is IPV6 enabled or not
1643  *
1644  * Return: QDF_STATUS
1645  */
1646 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
1647 {
1648 	int ret;
1649 
1650 	ret = qdf_ipa_wdi_dereg_intf(ifname);
1651 	if (ret) {
1652 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1653 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
1654 			  __func__, ret);
1655 		return QDF_STATUS_E_FAILURE;
1656 	}
1657 
1658 	return QDF_STATUS_SUCCESS;
1659 }
1660 
1661 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1662 {
1663 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1664 	struct dp_pdev *pdev =
1665 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1666 	struct hal_srng *wbm_srng = (struct hal_srng *)
1667 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1668 	struct dp_ipa_resources *ipa_res;
1669 	QDF_STATUS result;
1670 
1671 	if (!pdev) {
1672 		dp_err("%s invalid instance", __func__);
1673 		return QDF_STATUS_E_FAILURE;
1674 	}
1675 
1676 	ipa_res = &pdev->ipa_resource;
1677 
1678 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
1679 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
1680 
1681 	result = qdf_ipa_wdi_enable_pipes();
1682 	if (result) {
1683 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1684 			  "%s: Enable WDI PIPE fail, code %d",
1685 			  __func__, result);
1686 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1687 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1688 		return QDF_STATUS_E_FAILURE;
1689 	}
1690 
1691 	if (soc->ipa_first_tx_db_access) {
1692 		hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr);
1693 		soc->ipa_first_tx_db_access = false;
1694 	}
1695 
1696 	return QDF_STATUS_SUCCESS;
1697 }
1698 
1699 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1700 {
1701 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1702 	struct dp_pdev *pdev =
1703 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1704 	QDF_STATUS result;
1705 
1706 	if (!pdev) {
1707 		dp_err("%s invalid instance", __func__);
1708 		return QDF_STATUS_E_FAILURE;
1709 	}
1710 
1711 	result = qdf_ipa_wdi_disable_pipes();
1712 	if (result) {
1713 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1714 			  "%s: Disable WDI PIPE fail, code %d",
1715 			  __func__, result);
1716 		qdf_assert_always(0);
1717 		return QDF_STATUS_E_FAILURE;
1718 	}
1719 
1720 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1721 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1722 
1723 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
1724 }
1725 
1726 /**
1727  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
1728  * @client: Client type
1729  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
1730  *
1731  * Return: QDF_STATUS
1732  */
1733 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
1734 {
1735 	qdf_ipa_wdi_perf_profile_t profile;
1736 	QDF_STATUS result;
1737 
1738 	profile.client = client;
1739 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
1740 
1741 	result = qdf_ipa_wdi_set_perf_profile(&profile);
1742 	if (result) {
1743 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1744 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
1745 			  __func__, result);
1746 		return QDF_STATUS_E_FAILURE;
1747 	}
1748 
1749 	return QDF_STATUS_SUCCESS;
1750 }
1751 
1752 /**
1753  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
1754  * @pdev: pdev
1755  * @vdev: vdev
1756  * @nbuf: skb
1757  *
1758  * Return: nbuf if TX fails and NULL if TX succeeds
1759  */
1760 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
1761 				       struct dp_vdev *vdev,
1762 				       qdf_nbuf_t nbuf)
1763 {
1764 	struct dp_peer *vdev_peer;
1765 	uint16_t len;
1766 
1767 	vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev);
1768 	if (qdf_unlikely(!vdev_peer))
1769 		return nbuf;
1770 
1771 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1772 	len = qdf_nbuf_len(nbuf);
1773 
1774 	if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
1775 		DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
1776 		dp_peer_unref_delete(vdev_peer);
1777 		return nbuf;
1778 	}
1779 
1780 	DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
1781 	dp_peer_unref_delete(vdev_peer);
1782 	return NULL;
1783 }
1784 
1785 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1786 			    qdf_nbuf_t nbuf, bool *fwd_success)
1787 {
1788 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1789 	struct dp_vdev *vdev =
1790 		dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1791 	struct dp_pdev *pdev;
1792 	struct dp_peer *da_peer;
1793 	struct dp_peer *sa_peer;
1794 	qdf_nbuf_t nbuf_copy;
1795 	uint8_t da_is_bcmc;
1796 	struct ethhdr *eh;
1797 
1798 	*fwd_success = false; /* set default as failure */
1799 
1800 	/*
1801 	 * WDI 3.0 skb->cb[] info from IPA driver
1802 	 * skb->cb[0] = vdev_id
1803 	 * skb->cb[1].bit#1 = da_is_bcmc
1804 	 */
1805 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
1806 
1807 	if (qdf_unlikely(!vdev))
1808 		return false;
1809 
1810 	pdev = vdev->pdev;
1811 	if (qdf_unlikely(!pdev))
1812 		return false;
1813 
1814 	/* no fwd for station mode and just pass up to stack */
1815 	if (vdev->opmode == wlan_op_mode_sta)
1816 		return false;
1817 
1818 	if (da_is_bcmc) {
1819 		nbuf_copy = qdf_nbuf_copy(nbuf);
1820 		if (!nbuf_copy)
1821 			return false;
1822 
1823 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
1824 			qdf_nbuf_free(nbuf_copy);
1825 		else
1826 			*fwd_success = true;
1827 
1828 		/* return false to pass original pkt up to stack */
1829 		return false;
1830 	}
1831 
1832 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
1833 
1834 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
1835 		return false;
1836 
1837 	da_peer = dp_find_peer_by_addr_and_vdev(dp_pdev_to_cdp_pdev(pdev),
1838 						dp_vdev_to_cdp_vdev(vdev),
1839 						eh->h_dest);
1840 
1841 	if (!da_peer)
1842 		return false;
1843 
1844 	sa_peer = dp_find_peer_by_addr_and_vdev(dp_pdev_to_cdp_pdev(pdev),
1845 						dp_vdev_to_cdp_vdev(vdev),
1846 						eh->h_source);
1847 
1848 	if (!sa_peer)
1849 		return false;
1850 
1851 	/*
1852 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
1853 	 * Need to add skb to internal tracking table to avoid nbuf memory
1854 	 * leak check for unallocated skb.
1855 	 */
1856 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
1857 
1858 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
1859 		qdf_nbuf_free(nbuf);
1860 	else
1861 		*fwd_success = true;
1862 
1863 	return true;
1864 }
1865 
1866 #ifdef MDM_PLATFORM
1867 bool dp_ipa_is_mdm_platform(void)
1868 {
1869 	return true;
1870 }
1871 #else
1872 bool dp_ipa_is_mdm_platform(void)
1873 {
1874 	return false;
1875 }
1876 #endif
1877 
1878 /**
1879  * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
1880  * @soc: soc
1881  * @nbuf: source skb
1882  *
1883  * Return: new nbuf if success and otherwise NULL
1884  */
1885 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
1886 					     qdf_nbuf_t nbuf)
1887 {
1888 	uint8_t *src_nbuf_data;
1889 	uint8_t *dst_nbuf_data;
1890 	qdf_nbuf_t dst_nbuf;
1891 	qdf_nbuf_t temp_nbuf = nbuf;
1892 	uint32_t nbuf_len = qdf_nbuf_len(nbuf);
1893 	bool is_nbuf_head = true;
1894 	uint32_t copy_len = 0;
1895 
1896 	dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
1897 				  RX_BUFFER_RESERVATION,
1898 				  RX_DATA_BUFFER_ALIGNMENT, FALSE);
1899 
1900 	if (!dst_nbuf) {
1901 		dp_err_rl("nbuf allocate fail");
1902 		return NULL;
1903 	}
1904 
1905 	if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
1906 		qdf_nbuf_free(dst_nbuf);
1907 		dp_err_rl("nbuf is jumbo data");
1908 		return NULL;
1909 	}
1910 
1911 	/* prepeare to copy all data into new skb */
1912 	dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
1913 	while (temp_nbuf) {
1914 		src_nbuf_data = qdf_nbuf_data(temp_nbuf);
1915 		/* first head nbuf */
1916 		if (is_nbuf_head) {
1917 			qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
1918 				     RX_PKT_TLVS_LEN);
1919 			/* leave extra 2 bytes L3_HEADER_PADDING */
1920 			dst_nbuf_data += (RX_PKT_TLVS_LEN + L3_HEADER_PADDING);
1921 			src_nbuf_data += RX_PKT_TLVS_LEN;
1922 			copy_len = qdf_nbuf_headlen(temp_nbuf) -
1923 						RX_PKT_TLVS_LEN;
1924 			temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
1925 			is_nbuf_head = false;
1926 		} else {
1927 			copy_len = qdf_nbuf_len(temp_nbuf);
1928 			temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
1929 		}
1930 		qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
1931 		dst_nbuf_data += copy_len;
1932 	}
1933 
1934 	qdf_nbuf_set_len(dst_nbuf, nbuf_len);
1935 	/* copy is done, free original nbuf */
1936 	qdf_nbuf_free(nbuf);
1937 
1938 	return dst_nbuf;
1939 }
1940 
1941 /**
1942  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
1943  * @soc: soc
1944  * @nbuf: skb
1945  *
1946  * Return: nbuf if success and otherwise NULL
1947  */
1948 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
1949 {
1950 
1951 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1952 		return nbuf;
1953 
1954 	/* WLAN IPA is run-time disabled */
1955 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
1956 		return nbuf;
1957 
1958 	if (!qdf_nbuf_is_frag(nbuf))
1959 		return nbuf;
1960 
1961 	/* linearize skb for IPA */
1962 	return dp_ipa_frag_nbuf_linearize(soc, nbuf);
1963 }
1964 
1965 #endif
1966