xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifdef IPA_OFFLOAD
19 
20 #include <wlan_ipa_ucfg_api.h>
21 #include <qdf_ipa_wdi3.h>
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hal_reo.h>
27 #include <hif.h>
28 #include <htt.h>
29 #include <wdi_event.h>
30 #include <queue.h>
31 #include "dp_types.h"
32 #include "dp_htt.h"
33 #include "dp_tx.h"
34 #include "dp_rx.h"
35 #include "dp_ipa.h"
36 #include "dp_internal.h"
37 #ifdef WIFI_MONITOR_SUPPORT
38 #include "dp_mon.h"
39 #endif
40 #ifdef FEATURE_WDS
41 #include "dp_txrx_wds.h"
42 #endif
43 
44 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
45 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
46 
47 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
48  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
49  * This causes back pressure, resulting in a FW crash.
50  * By leaving some entries with no buffer attached, WBM will be able to write
51  * to the ring, and from dumps we can figure out the buffer which is causing
52  * this issue.
53  */
54 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
55 
56 /**
57  *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
58  * @timestamp: Timestamp when remap occurs
59  * @ix0_reg: reo destination ring IX0 value
60  * @ix2_reg: reo destination ring IX2 value
61  * @ix3_reg: reo destination ring IX3 value
62  */
63 struct dp_ipa_reo_remap_record {
64 	uint64_t timestamp;
65 	uint32_t ix0_reg;
66 	uint32_t ix2_reg;
67 	uint32_t ix3_reg;
68 };
69 
70 #ifdef IPA_WDS_EASYMESH_FEATURE
71 #define WLAN_IPA_META_DATA_MASK htonl(0x000000FF)
72 #else
73 #define WLAN_IPA_META_DATA_MASK htonl(0x00FF0000)
74 #endif
75 
76 #define REO_REMAP_HISTORY_SIZE 32
77 
78 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
79 
80 static qdf_atomic_t dp_ipa_reo_remap_history_index;
81 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
82 {
83 	int next = qdf_atomic_inc_return(index);
84 
85 	if (next == REO_REMAP_HISTORY_SIZE)
86 		qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
87 
88 	return next % REO_REMAP_HISTORY_SIZE;
89 }
90 
91 /**
92  * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
93  * @ix0_val: reo destination ring IX0 value
94  * @ix2_val: reo destination ring IX2 value
95  * @ix3_val: reo destination ring IX3 value
96  *
97  * Return: None
98  */
99 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
100 					 uint32_t ix3_val)
101 {
102 	int idx = dp_ipa_reo_remap_record_index_next(
103 				&dp_ipa_reo_remap_history_index);
104 	struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
105 
106 	record->timestamp = qdf_get_log_timestamp();
107 	record->ix0_reg = ix0_val;
108 	record->ix2_reg = ix2_val;
109 	record->ix3_reg = ix3_val;
110 }
111 
112 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
113 						   qdf_nbuf_t nbuf,
114 						   uint32_t size,
115 						   bool create,
116 						   const char *func,
117 						   uint32_t line)
118 {
119 	qdf_mem_info_t mem_map_table = {0};
120 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
121 	qdf_ipa_wdi_hdl_t hdl;
122 
123 	/* Need to handle the case when one soc will
124 	 * have multiple pdev(radio's), Currently passing
125 	 * pdev_id as 0 assuming 1 soc has only 1 radio.
126 	 */
127 	hdl = wlan_ipa_get_hdl(soc->ctrl_psoc, 0);
128 	if (hdl == DP_IPA_HDL_INVALID) {
129 		dp_err("IPA handle is invalid");
130 		return QDF_STATUS_E_INVAL;
131 	}
132 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
133 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
134 				 size);
135 
136 	if (create) {
137 		/* Assert if PA is zero */
138 		qdf_assert_always(mem_map_table.pa);
139 
140 		ret = qdf_nbuf_smmu_map_debug(nbuf, hdl, 1, &mem_map_table,
141 					      func, line);
142 	} else {
143 		ret = qdf_nbuf_smmu_unmap_debug(nbuf, hdl, 1, &mem_map_table,
144 						func, line);
145 	}
146 	qdf_assert_always(!ret);
147 
148 	/* Return status of mapping/unmapping is stored in
149 	 * mem_map_table.result field, assert if the result
150 	 * is failure
151 	 */
152 	if (create)
153 		qdf_assert_always(!mem_map_table.result);
154 	else
155 		qdf_assert_always(mem_map_table.result >= mem_map_table.size);
156 
157 	return ret;
158 }
159 
160 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
161 					     qdf_nbuf_t nbuf,
162 					     uint32_t size,
163 					     bool create, const char *func,
164 					     uint32_t line)
165 {
166 	struct dp_pdev *pdev;
167 	int i;
168 
169 	for (i = 0; i < soc->pdev_count; i++) {
170 		pdev = soc->pdev_list[i];
171 		if (pdev && dp_monitor_is_configured(pdev))
172 			return QDF_STATUS_SUCCESS;
173 	}
174 
175 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
176 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
177 		return QDF_STATUS_SUCCESS;
178 
179 	/**
180 	 * Even if ipa pipes is disabled, but if it's unmap
181 	 * operation and nbuf has done ipa smmu map before,
182 	 * do ipa smmu unmap as well.
183 	 */
184 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
185 		if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
186 			DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
187 		} else {
188 			return QDF_STATUS_SUCCESS;
189 		}
190 	}
191 
192 	if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
193 		if (create) {
194 			DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
195 		} else {
196 			DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
197 		}
198 		return QDF_STATUS_E_INVAL;
199 	}
200 
201 	qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
202 
203 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create,
204 						func, line);
205 }
206 
207 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
208 	struct dp_soc *soc,
209 	struct dp_pdev *pdev,
210 	bool create,
211 	const char *func,
212 	uint32_t line)
213 {
214 	uint32_t index;
215 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
216 	uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
217 	qdf_nbuf_t nbuf;
218 	uint32_t buf_len;
219 
220 	if (!ipa_is_ready()) {
221 		dp_info("IPA is not READY");
222 		return 0;
223 	}
224 
225 	for (index = 0; index < tx_buffer_cnt; index++) {
226 		nbuf = (qdf_nbuf_t)
227 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
228 		if (!nbuf)
229 			continue;
230 		buf_len = qdf_nbuf_get_data_len(nbuf);
231 		ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len,
232 						       create, func, line);
233 	}
234 
235 	return ret;
236 }
237 
238 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
239 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
240 						     bool lock_required)
241 {
242 	hal_ring_handle_t hal_ring_hdl;
243 	int ring;
244 
245 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
246 		hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng;
247 		hal_srng_lock(hal_ring_hdl);
248 		soc->ipa_reo_ctx_lock_required[ring] = lock_required;
249 		hal_srng_unlock(hal_ring_hdl);
250 	}
251 }
252 #else
253 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
254 						     bool lock_required)
255 {
256 }
257 
258 #endif
259 
260 #ifdef RX_DESC_MULTI_PAGE_ALLOC
261 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
262 							 struct dp_pdev *pdev,
263 							 bool create,
264 							 const char *func,
265 							 uint32_t line)
266 {
267 	struct rx_desc_pool *rx_pool;
268 	uint8_t pdev_id;
269 	uint32_t num_desc, page_id, offset, i;
270 	uint16_t num_desc_per_page;
271 	union dp_rx_desc_list_elem_t *rx_desc_elem;
272 	struct dp_rx_desc *rx_desc;
273 	qdf_nbuf_t nbuf;
274 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
275 
276 	if (!qdf_ipa_is_ready())
277 		return ret;
278 
279 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
280 		return ret;
281 
282 	pdev_id = pdev->pdev_id;
283 	rx_pool = &soc->rx_desc_buf[pdev_id];
284 
285 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
286 	qdf_spin_lock_bh(&rx_pool->lock);
287 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
288 	num_desc = rx_pool->pool_size;
289 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
290 	for (i = 0; i < num_desc; i++) {
291 		page_id = i / num_desc_per_page;
292 		offset = i % num_desc_per_page;
293 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
294 			break;
295 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
296 		rx_desc = &rx_desc_elem->rx_desc;
297 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
298 			continue;
299 		nbuf = rx_desc->nbuf;
300 
301 		if (qdf_unlikely(create ==
302 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
303 			if (create) {
304 				DP_STATS_INC(soc,
305 					     rx.err.ipa_smmu_map_dup, 1);
306 			} else {
307 				DP_STATS_INC(soc,
308 					     rx.err.ipa_smmu_unmap_dup, 1);
309 			}
310 			continue;
311 		}
312 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
313 
314 		ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
315 						       rx_pool->buf_size,
316 						       create, func, line);
317 	}
318 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
319 	qdf_spin_unlock_bh(&rx_pool->lock);
320 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
321 
322 	return ret;
323 }
324 #else
325 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(
326 							 struct dp_soc *soc,
327 							 struct dp_pdev *pdev,
328 							 bool create,
329 							 const char *func,
330 							 uint32_t line)
331 {
332 	struct rx_desc_pool *rx_pool;
333 	uint8_t pdev_id;
334 	qdf_nbuf_t nbuf;
335 	int i;
336 
337 	if (!qdf_ipa_is_ready())
338 		return QDF_STATUS_SUCCESS;
339 
340 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
341 		return QDF_STATUS_SUCCESS;
342 
343 	pdev_id = pdev->pdev_id;
344 	rx_pool = &soc->rx_desc_buf[pdev_id];
345 
346 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
347 	qdf_spin_lock_bh(&rx_pool->lock);
348 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
349 	for (i = 0; i < rx_pool->pool_size; i++) {
350 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
351 		    rx_pool->array[i].rx_desc.unmapped)
352 			continue;
353 
354 		nbuf = rx_pool->array[i].rx_desc.nbuf;
355 
356 		if (qdf_unlikely(create ==
357 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
358 			if (create) {
359 				DP_STATS_INC(soc,
360 					     rx.err.ipa_smmu_map_dup, 1);
361 			} else {
362 				DP_STATS_INC(soc,
363 					     rx.err.ipa_smmu_unmap_dup, 1);
364 			}
365 			continue;
366 		}
367 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
368 
369 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, rx_pool->buf_size,
370 						 create, func, line);
371 	}
372 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
373 	qdf_spin_unlock_bh(&rx_pool->lock);
374 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
375 
376 	return QDF_STATUS_SUCCESS;
377 }
378 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
379 
380 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
381 					     qdf_shared_mem_t *shared_mem,
382 					     void *cpu_addr,
383 					     qdf_dma_addr_t dma_addr,
384 					     uint32_t size)
385 {
386 	qdf_dma_addr_t paddr;
387 	int ret;
388 
389 	shared_mem->vaddr = cpu_addr;
390 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
391 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
392 
393 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
394 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
395 
396 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
397 				      shared_mem->vaddr, dma_addr, size);
398 	if (ret) {
399 		dp_err("Unable to get DMA sgtable");
400 		return QDF_STATUS_E_NOMEM;
401 	}
402 
403 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
404 
405 	return QDF_STATUS_SUCCESS;
406 }
407 
408 /**
409  * dp_ipa_get_tx_bank_id - API to get TCL bank id
410  * @soc: dp_soc handle
411  * @bank_id: out parameter for bank id
412  *
413  * Return: QDF_STATUS
414  */
415 static QDF_STATUS dp_ipa_get_tx_bank_id(struct dp_soc *soc, uint8_t *bank_id)
416 {
417 	if (soc->arch_ops.ipa_get_bank_id) {
418 		*bank_id = soc->arch_ops.ipa_get_bank_id(soc);
419 		if (*bank_id < 0) {
420 			return QDF_STATUS_E_INVAL;
421 		} else {
422 			dp_info("bank_id %u", *bank_id);
423 			return QDF_STATUS_SUCCESS;
424 		}
425 	} else {
426 		return QDF_STATUS_E_NOSUPPORT;
427 	}
428 }
429 
430 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
431 	defined(CONFIG_IPA_WDI_UNIFIED_API)
432 static void dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc,
433 					   qdf_ipa_wdi_pipe_setup_info_t *tx)
434 {
435 	uint8_t bank_id;
436 
437 	if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id)))
438 		QDF_IPA_WDI_SETUP_INFO_RX_BANK_ID(tx, bank_id);
439 }
440 
441 static void
442 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc,
443 				    qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
444 {
445 	uint8_t bank_id;
446 
447 	if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id)))
448 		QDF_IPA_WDI_SETUP_INFO_SMMU_RX_BANK_ID(tx_smmu, bank_id);
449 }
450 #else
451 static inline void
452 dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc,
453 			       qdf_ipa_wdi_pipe_setup_info_t *tx)
454 {
455 }
456 
457 static inline void
458 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc,
459 				    qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
460 {
461 }
462 #endif
463 
464 #ifdef IPA_WDI3_TX_TWO_PIPES
465 static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
466 {
467 	struct dp_ipa_resources *ipa_res;
468 	qdf_nbuf_t nbuf;
469 	int idx;
470 
471 	for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) {
472 		nbuf = (qdf_nbuf_t)
473 			soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx];
474 		if (!nbuf)
475 			continue;
476 
477 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
478 		qdf_mem_dp_tx_skb_cnt_dec();
479 		qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
480 		qdf_nbuf_free(nbuf);
481 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] =
482 						(void *)NULL;
483 	}
484 
485 	qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
486 	soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
487 
488 	ipa_res = &pdev->ipa_resource;
489 	if (!ipa_res->is_db_ddr_mapped && ipa_res->tx_alt_comp_doorbell_vaddr)
490 		iounmap(ipa_res->tx_alt_comp_doorbell_vaddr);
491 
492 	qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable);
493 	qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable);
494 }
495 
496 static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
497 {
498 	uint32_t tx_buffer_count;
499 	uint32_t ring_base_align = 8;
500 	qdf_dma_addr_t buffer_paddr;
501 	struct hal_srng *wbm_srng = (struct hal_srng *)
502 			soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
503 	struct hal_srng_params srng_params;
504 	uint32_t wbm_bm_id;
505 	void *ring_entry;
506 	int num_entries;
507 	qdf_nbuf_t nbuf;
508 	int retval = QDF_STATUS_SUCCESS;
509 	int max_alloc_count = 0;
510 
511 	/*
512 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
513 	 * unsigned int uc_tx_buf_sz =
514 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
515 	 */
516 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
517 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
518 
519 	wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx,
520 						  IPA_TX_ALT_RING_IDX);
521 
522 	hal_get_srng_params(soc->hal_soc,
523 			    hal_srng_to_hal_ring_handle(wbm_srng),
524 			    &srng_params);
525 	num_entries = srng_params.num_entries;
526 
527 	max_alloc_count =
528 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
529 	if (max_alloc_count <= 0) {
530 		dp_err("incorrect value for buffer count %u", max_alloc_count);
531 		return -EINVAL;
532 	}
533 
534 	dp_info("requested %d buffers to be posted to wbm ring",
535 		max_alloc_count);
536 
537 	soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned =
538 		qdf_mem_malloc(num_entries *
539 		sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned));
540 	if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) {
541 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
542 		return -ENOMEM;
543 	}
544 
545 	hal_srng_access_start_unlocked(soc->hal_soc,
546 				       hal_srng_to_hal_ring_handle(wbm_srng));
547 
548 	/*
549 	 * Allocate Tx buffers as many as possible.
550 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
551 	 * Populate Tx buffers into WBM2IPA ring
552 	 * This initial buffer population will simulate H/W as source ring,
553 	 * and update HP
554 	 */
555 	for (tx_buffer_count = 0;
556 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
557 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
558 		if (!nbuf)
559 			break;
560 
561 		ring_entry = hal_srng_dst_get_next_hp(
562 				soc->hal_soc,
563 				hal_srng_to_hal_ring_handle(wbm_srng));
564 		if (!ring_entry) {
565 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
566 				  "%s: Failed to get WBM ring entry",
567 				  __func__);
568 			qdf_nbuf_free(nbuf);
569 			break;
570 		}
571 
572 		qdf_nbuf_map_single(soc->osdev, nbuf,
573 				    QDF_DMA_BIDIRECTIONAL);
574 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
575 		qdf_mem_dp_tx_skb_cnt_inc();
576 		qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
577 
578 		hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
579 					     buffer_paddr, 0, wbm_bm_id);
580 
581 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[
582 			tx_buffer_count] = (void *)nbuf;
583 	}
584 
585 	hal_srng_access_end_unlocked(soc->hal_soc,
586 				     hal_srng_to_hal_ring_handle(wbm_srng));
587 
588 	soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count;
589 
590 	if (tx_buffer_count) {
591 		dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count);
592 	} else {
593 		dp_err("Failed to allocate IPA TX buffer pool2");
594 		qdf_mem_free(
595 			soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
596 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
597 		retval = -ENOMEM;
598 	}
599 
600 	return retval;
601 }
602 
603 static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
604 {
605 	struct dp_soc *soc = pdev->soc;
606 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
607 
608 	ipa_res->tx_alt_ring_num_alloc_buffer =
609 		(uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt;
610 
611 	dp_ipa_get_shared_mem_info(
612 			soc->osdev, &ipa_res->tx_alt_ring,
613 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
614 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
615 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
616 
617 	dp_ipa_get_shared_mem_info(
618 			soc->osdev, &ipa_res->tx_alt_comp_ring,
619 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
620 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
621 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
622 
623 	if (!qdf_mem_get_dma_addr(soc->osdev,
624 				  &ipa_res->tx_alt_comp_ring.mem_info))
625 		return QDF_STATUS_E_FAILURE;
626 
627 	return QDF_STATUS_SUCCESS;
628 }
629 
630 static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
631 {
632 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
633 	struct hal_srng *hal_srng;
634 	struct hal_srng_params srng_params;
635 	unsigned long addr_offset, dev_base_paddr;
636 
637 	/* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */
638 	hal_srng = (struct hal_srng *)
639 		soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng;
640 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
641 			    hal_srng_to_hal_ring_handle(hal_srng),
642 			    &srng_params);
643 
644 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr =
645 		srng_params.ring_base_paddr;
646 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr =
647 		srng_params.ring_base_vaddr;
648 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size =
649 		(srng_params.num_entries * srng_params.entry_size) << 2;
650 	/*
651 	 * For the register backed memory addresses, use the scn->mem_pa to
652 	 * calculate the physical address of the shadow registers
653 	 */
654 	dev_base_paddr =
655 		(unsigned long)
656 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
657 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
658 		      (unsigned long)(hal_soc->dev_base_addr);
659 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr =
660 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
661 
662 	dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
663 		(unsigned int)addr_offset,
664 		(unsigned int)dev_base_paddr,
665 		(unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr),
666 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
667 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
668 		srng_params.num_entries,
669 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
670 
671 	/* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */
672 	hal_srng = (struct hal_srng *)
673 		soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
674 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
675 			    hal_srng_to_hal_ring_handle(hal_srng),
676 			    &srng_params);
677 
678 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr =
679 						srng_params.ring_base_paddr;
680 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr =
681 						srng_params.ring_base_vaddr;
682 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size =
683 		(srng_params.num_entries * srng_params.entry_size) << 2;
684 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr =
685 		hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
686 				     hal_srng_to_hal_ring_handle(hal_srng));
687 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
688 		      (unsigned long)(hal_soc->dev_base_addr);
689 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr =
690 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
691 
692 	dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
693 		(unsigned int)addr_offset,
694 		(unsigned int)dev_base_paddr,
695 		(unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr),
696 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
697 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
698 		srng_params.num_entries,
699 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
700 }
701 
702 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
703 {
704 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
705 	uint32_t rx_ready_doorbell_dmaaddr;
706 	uint32_t tx_comp_doorbell_dmaaddr;
707 	struct dp_soc *soc = pdev->soc;
708 	int ret = 0;
709 
710 	if (ipa_res->is_db_ddr_mapped)
711 		ipa_res->tx_comp_doorbell_vaddr =
712 				phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
713 	else
714 		ipa_res->tx_comp_doorbell_vaddr =
715 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
716 
717 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
718 		ret = pld_smmu_map(soc->osdev->dev,
719 				   ipa_res->tx_comp_doorbell_paddr,
720 				   &tx_comp_doorbell_dmaaddr,
721 				   sizeof(uint32_t));
722 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
723 		qdf_assert_always(!ret);
724 
725 		ret = pld_smmu_map(soc->osdev->dev,
726 				   ipa_res->rx_ready_doorbell_paddr,
727 				   &rx_ready_doorbell_dmaaddr,
728 				   sizeof(uint32_t));
729 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
730 		qdf_assert_always(!ret);
731 	}
732 
733 	/* Setup for alternative TX pipe */
734 	if (!ipa_res->tx_alt_comp_doorbell_paddr)
735 		return;
736 
737 	if (ipa_res->is_db_ddr_mapped)
738 		ipa_res->tx_alt_comp_doorbell_vaddr =
739 			phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr);
740 	else
741 		ipa_res->tx_alt_comp_doorbell_vaddr =
742 			ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4);
743 
744 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
745 		ret = pld_smmu_map(soc->osdev->dev,
746 				   ipa_res->tx_alt_comp_doorbell_paddr,
747 				   &tx_comp_doorbell_dmaaddr,
748 				   sizeof(uint32_t));
749 		ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
750 		qdf_assert_always(!ret);
751 	}
752 }
753 
754 static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
755 {
756 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
757 	struct dp_soc *soc = pdev->soc;
758 	int ret = 0;
759 
760 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
761 		return;
762 
763 	/* Unmap must be in reverse order of map */
764 	if (ipa_res->tx_alt_comp_doorbell_paddr) {
765 		ret = pld_smmu_unmap(soc->osdev->dev,
766 				     ipa_res->tx_alt_comp_doorbell_paddr,
767 				     sizeof(uint32_t));
768 		qdf_assert_always(!ret);
769 	}
770 
771 	ret = pld_smmu_unmap(soc->osdev->dev,
772 			     ipa_res->rx_ready_doorbell_paddr,
773 			     sizeof(uint32_t));
774 	qdf_assert_always(!ret);
775 
776 	ret = pld_smmu_unmap(soc->osdev->dev,
777 			     ipa_res->tx_comp_doorbell_paddr,
778 			     sizeof(uint32_t));
779 	qdf_assert_always(!ret);
780 }
781 
782 static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
783 						 struct dp_pdev *pdev,
784 						 bool create, const char *func,
785 						 uint32_t line)
786 {
787 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
788 	struct ipa_dp_tx_rsc *rsc;
789 	uint32_t tx_buffer_cnt;
790 	uint32_t buf_len;
791 	qdf_nbuf_t nbuf;
792 	uint32_t index;
793 
794 	if (!ipa_is_ready()) {
795 		dp_info("IPA is not READY");
796 		return QDF_STATUS_SUCCESS;
797 	}
798 
799 	rsc = &soc->ipa_uc_tx_rsc_alt;
800 	tx_buffer_cnt = rsc->alloc_tx_buf_cnt;
801 
802 	for (index = 0; index < tx_buffer_cnt; index++) {
803 		nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index];
804 		if (!nbuf)
805 			continue;
806 
807 		buf_len = qdf_nbuf_get_data_len(nbuf);
808 		ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len,
809 						       create, func, line);
810 	}
811 
812 	return ret;
813 }
814 
815 static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc,
816 					  struct dp_ipa_resources *ipa_res,
817 					  qdf_ipa_wdi_pipe_setup_info_t *tx)
818 {
819 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1;
820 
821 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
822 		qdf_mem_get_dma_addr(soc->osdev,
823 				     &ipa_res->tx_alt_comp_ring.mem_info);
824 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
825 		qdf_mem_get_dma_size(soc->osdev,
826 				     &ipa_res->tx_alt_comp_ring.mem_info);
827 
828 	/* WBM Tail Pointer Address */
829 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
830 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
831 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
832 
833 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
834 		qdf_mem_get_dma_addr(soc->osdev,
835 				     &ipa_res->tx_alt_ring.mem_info);
836 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
837 		qdf_mem_get_dma_size(soc->osdev,
838 				     &ipa_res->tx_alt_ring.mem_info);
839 
840 	/* TCL Head Pointer Address */
841 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
842 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
843 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
844 
845 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
846 		ipa_res->tx_alt_ring_num_alloc_buffer;
847 
848 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
849 
850 	dp_ipa_setup_tx_params_bank_id(soc, tx);
851 }
852 
853 static void
854 dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc,
855 				   struct dp_ipa_resources *ipa_res,
856 				   qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
857 {
858 	QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1;
859 
860 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
861 		     &ipa_res->tx_alt_comp_ring.sgtable,
862 		     sizeof(sgtable_t));
863 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
864 		qdf_mem_get_dma_size(soc->osdev,
865 				     &ipa_res->tx_alt_comp_ring.mem_info);
866 	/* WBM Tail Pointer Address */
867 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
868 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
869 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
870 
871 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
872 		     &ipa_res->tx_alt_ring.sgtable,
873 		     sizeof(sgtable_t));
874 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
875 		qdf_mem_get_dma_size(soc->osdev,
876 				     &ipa_res->tx_alt_ring.mem_info);
877 	/* TCL Head Pointer Address */
878 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
879 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
880 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
881 
882 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
883 		ipa_res->tx_alt_ring_num_alloc_buffer;
884 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
885 
886 	dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu);
887 }
888 
889 static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc,
890 				     struct dp_ipa_resources *res,
891 				     qdf_ipa_wdi_conn_in_params_t *in)
892 {
893 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL;
894 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
895 	qdf_ipa_ep_cfg_t *tx_cfg;
896 
897 	QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true;
898 
899 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
900 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in);
901 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
902 		dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu);
903 	} else {
904 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in);
905 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx);
906 		dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx);
907 	}
908 
909 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
910 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
911 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
912 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
913 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
914 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
915 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
916 }
917 
918 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
919 			       qdf_ipa_wdi_conn_out_params_t *out)
920 {
921 	res->tx_comp_doorbell_paddr =
922 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
923 	res->rx_ready_doorbell_paddr =
924 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
925 	res->tx_alt_comp_doorbell_paddr =
926 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out);
927 }
928 
929 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
930 					  uint8_t session_id)
931 {
932 	bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT;
933 
934 	session_id = session_id >> IPA_SESSION_ID_SHIFT;
935 	dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface);
936 
937 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
938 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface;
939 }
940 
941 static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
942 					struct dp_ipa_resources *res)
943 {
944 	struct hal_srng *wbm_srng;
945 
946 	/* Init first TX comp ring */
947 	wbm_srng = (struct hal_srng *)
948 		soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
949 
950 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
951 			     res->tx_comp_doorbell_vaddr);
952 
953 	/* Init the alternate TX comp ring */
954 	if (!res->tx_alt_comp_doorbell_paddr)
955 		return;
956 
957 	wbm_srng = (struct hal_srng *)
958 		soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
959 
960 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
961 			     res->tx_alt_comp_doorbell_vaddr);
962 }
963 
964 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
965 					 struct dp_ipa_resources *ipa_res)
966 {
967 	struct hal_srng *wbm_srng;
968 
969 	wbm_srng = (struct hal_srng *)
970 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
971 
972 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
973 					  ipa_res->tx_comp_doorbell_paddr);
974 
975 	dp_info("paddr %pK vaddr %pK",
976 		(void *)ipa_res->tx_comp_doorbell_paddr,
977 		(void *)ipa_res->tx_comp_doorbell_vaddr);
978 
979 	/* Setup for alternative TX comp ring */
980 	if (!ipa_res->tx_alt_comp_doorbell_paddr)
981 		return;
982 
983 	wbm_srng = (struct hal_srng *)
984 			soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
985 
986 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
987 					  ipa_res->tx_alt_comp_doorbell_paddr);
988 
989 	dp_info("paddr %pK vaddr %pK",
990 		(void *)ipa_res->tx_alt_comp_doorbell_paddr,
991 		(void *)ipa_res->tx_alt_comp_doorbell_vaddr);
992 }
993 
994 #ifdef IPA_SET_RESET_TX_DB_PA
995 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
996 					      struct dp_ipa_resources *ipa_res)
997 {
998 	hal_ring_handle_t wbm_srng;
999 	qdf_dma_addr_t hp_addr;
1000 
1001 	wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1002 	if (!wbm_srng)
1003 		return QDF_STATUS_E_FAILURE;
1004 
1005 	hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
1006 
1007 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1008 
1009 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1010 
1011 	/* Reset alternative TX comp ring */
1012 	wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
1013 	if (!wbm_srng)
1014 		return QDF_STATUS_E_FAILURE;
1015 
1016 	hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr;
1017 
1018 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1019 
1020 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1021 
1022 	return QDF_STATUS_SUCCESS;
1023 }
1024 #endif /* IPA_SET_RESET_TX_DB_PA */
1025 
1026 #else /* !IPA_WDI3_TX_TWO_PIPES */
1027 
1028 static inline
1029 void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1030 {
1031 }
1032 
1033 static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
1034 {
1035 }
1036 
1037 static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
1038 {
1039 	return 0;
1040 }
1041 
1042 static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
1043 {
1044 	return QDF_STATUS_SUCCESS;
1045 }
1046 
1047 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
1048 {
1049 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1050 	uint32_t rx_ready_doorbell_dmaaddr;
1051 	uint32_t tx_comp_doorbell_dmaaddr;
1052 	struct dp_soc *soc = pdev->soc;
1053 	int ret = 0;
1054 
1055 	if (ipa_res->is_db_ddr_mapped)
1056 		ipa_res->tx_comp_doorbell_vaddr =
1057 				phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
1058 	else
1059 		ipa_res->tx_comp_doorbell_vaddr =
1060 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
1061 
1062 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
1063 		ret = pld_smmu_map(soc->osdev->dev,
1064 				   ipa_res->tx_comp_doorbell_paddr,
1065 				   &tx_comp_doorbell_dmaaddr,
1066 				   sizeof(uint32_t));
1067 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
1068 		qdf_assert_always(!ret);
1069 
1070 		ret = pld_smmu_map(soc->osdev->dev,
1071 				   ipa_res->rx_ready_doorbell_paddr,
1072 				   &rx_ready_doorbell_dmaaddr,
1073 				   sizeof(uint32_t));
1074 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
1075 		qdf_assert_always(!ret);
1076 	}
1077 }
1078 
1079 static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
1080 {
1081 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1082 	struct dp_soc *soc = pdev->soc;
1083 	int ret = 0;
1084 
1085 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
1086 		return;
1087 
1088 	ret = pld_smmu_unmap(soc->osdev->dev,
1089 			     ipa_res->rx_ready_doorbell_paddr,
1090 			     sizeof(uint32_t));
1091 	qdf_assert_always(!ret);
1092 
1093 	ret = pld_smmu_unmap(soc->osdev->dev,
1094 			     ipa_res->tx_comp_doorbell_paddr,
1095 			     sizeof(uint32_t));
1096 	qdf_assert_always(!ret);
1097 }
1098 
1099 static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
1100 							struct dp_pdev *pdev,
1101 							bool create,
1102 							const char *func,
1103 							uint32_t line)
1104 {
1105 	return QDF_STATUS_SUCCESS;
1106 }
1107 
1108 static inline
1109 void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res,
1110 			      qdf_ipa_wdi_conn_in_params_t *in)
1111 {
1112 }
1113 
1114 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
1115 			       qdf_ipa_wdi_conn_out_params_t *out)
1116 {
1117 	res->tx_comp_doorbell_paddr =
1118 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
1119 	res->rx_ready_doorbell_paddr =
1120 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
1121 }
1122 
1123 #ifdef IPA_WDS_EASYMESH_FEATURE
1124 /**
1125  * dp_ipa_setup_iface_session_id - Pass vdev id to IPA
1126  * @in: ipa in params
1127  * @session_id: vdev id
1128  *
1129  * Pass Vdev id to IPA, IPA metadata order is changed and vdev id
1130  * is stored at higher nibble so, no shift is required.
1131  *
1132  * Return: none
1133  */
1134 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
1135 					  uint8_t session_id)
1136 {
1137 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id);
1138 }
1139 #else
1140 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
1141 					  uint8_t session_id)
1142 {
1143 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
1144 }
1145 #endif
1146 
1147 static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
1148 					       struct dp_ipa_resources *res)
1149 {
1150 	struct hal_srng *wbm_srng = (struct hal_srng *)
1151 		soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1152 
1153 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
1154 			     res->tx_comp_doorbell_vaddr);
1155 }
1156 
1157 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
1158 					 struct dp_ipa_resources *ipa_res)
1159 {
1160 	struct hal_srng *wbm_srng = (struct hal_srng *)
1161 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1162 
1163 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
1164 					  ipa_res->tx_comp_doorbell_paddr);
1165 
1166 	dp_info("paddr %pK vaddr %pK",
1167 		(void *)ipa_res->tx_comp_doorbell_paddr,
1168 		(void *)ipa_res->tx_comp_doorbell_vaddr);
1169 }
1170 
1171 #ifdef IPA_SET_RESET_TX_DB_PA
1172 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
1173 					      struct dp_ipa_resources *ipa_res)
1174 {
1175 	hal_ring_handle_t wbm_srng =
1176 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1177 	qdf_dma_addr_t hp_addr;
1178 
1179 	if (!wbm_srng)
1180 		return QDF_STATUS_E_FAILURE;
1181 
1182 	hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
1183 
1184 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1185 
1186 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1187 
1188 	return QDF_STATUS_SUCCESS;
1189 }
1190 #endif /* IPA_SET_RESET_TX_DB_PA */
1191 
1192 #endif /* IPA_WDI3_TX_TWO_PIPES */
1193 
1194 /**
1195  * dp_tx_ipa_uc_detach - Free autonomy TX resources
1196  * @soc: data path instance
1197  * @pdev: core txrx pdev context
1198  *
1199  * Free allocated TX buffers with WBM SRNG
1200  *
1201  * Return: none
1202  */
1203 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1204 {
1205 	int idx;
1206 	qdf_nbuf_t nbuf;
1207 	struct dp_ipa_resources *ipa_res;
1208 
1209 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1210 		nbuf = (qdf_nbuf_t)
1211 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
1212 		if (!nbuf)
1213 			continue;
1214 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
1215 		qdf_mem_dp_tx_skb_cnt_dec();
1216 		qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
1217 		qdf_nbuf_free(nbuf);
1218 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
1219 						(void *)NULL;
1220 	}
1221 
1222 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1223 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1224 
1225 	ipa_res = &pdev->ipa_resource;
1226 
1227 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
1228 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
1229 }
1230 
1231 /**
1232  * dp_rx_ipa_uc_detach - free autonomy RX resources
1233  * @soc: data path instance
1234  * @pdev: core txrx pdev context
1235  *
1236  * This function will detach DP RX into main device context
1237  * will free DP Rx resources.
1238  *
1239  * Return: none
1240  */
1241 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1242 {
1243 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1244 
1245 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
1246 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
1247 }
1248 
1249 /*
1250  * dp_rx_alt_ipa_uc_detach - free autonomy RX resources
1251  * @soc: data path instance
1252  * @pdev: core txrx pdev context
1253  *
1254  * This function will detach DP RX into main device context
1255  * will free DP Rx resources.
1256  *
1257  * Return: none
1258  */
1259 #ifdef IPA_WDI3_VLAN_SUPPORT
1260 static void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1261 {
1262 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1263 
1264 	if (!wlan_ipa_is_vlan_enabled())
1265 		return;
1266 
1267 	qdf_mem_free_sgtable(&ipa_res->rx_alt_rdy_ring.sgtable);
1268 	qdf_mem_free_sgtable(&ipa_res->rx_alt_refill_ring.sgtable);
1269 }
1270 #else
1271 static inline
1272 void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1273 { }
1274 #endif
1275 
1276 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1277 {
1278 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1279 		return QDF_STATUS_SUCCESS;
1280 
1281 	/* TX resource detach */
1282 	dp_tx_ipa_uc_detach(soc, pdev);
1283 
1284 	/* Cleanup 2nd TX pipe resources */
1285 	dp_ipa_tx_alt_pool_detach(soc, pdev);
1286 
1287 	/* RX resource detach */
1288 	dp_rx_ipa_uc_detach(soc, pdev);
1289 
1290 	/* Cleanup 2nd RX pipe resources */
1291 	dp_rx_alt_ipa_uc_detach(soc, pdev);
1292 
1293 	return QDF_STATUS_SUCCESS;	/* success */
1294 }
1295 
1296 /**
1297  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
1298  * @soc: data path instance
1299  * @pdev: Physical device handle
1300  *
1301  * Allocate TX buffer from non-cacheable memory
1302  * Attach allocated TX buffers with WBM SRNG
1303  *
1304  * Return: int
1305  */
1306 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1307 {
1308 	uint32_t tx_buffer_count;
1309 	uint32_t ring_base_align = 8;
1310 	qdf_dma_addr_t buffer_paddr;
1311 	struct hal_srng *wbm_srng = (struct hal_srng *)
1312 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1313 	struct hal_srng_params srng_params;
1314 	void *ring_entry;
1315 	int num_entries;
1316 	qdf_nbuf_t nbuf;
1317 	int retval = QDF_STATUS_SUCCESS;
1318 	int max_alloc_count = 0;
1319 	uint32_t wbm_bm_id;
1320 
1321 	/*
1322 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
1323 	 * unsigned int uc_tx_buf_sz =
1324 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
1325 	 */
1326 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
1327 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
1328 
1329 	wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx,
1330 						  IPA_TCL_DATA_RING_IDX);
1331 
1332 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
1333 			    &srng_params);
1334 	num_entries = srng_params.num_entries;
1335 
1336 	max_alloc_count =
1337 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
1338 	if (max_alloc_count <= 0) {
1339 		dp_err("incorrect value for buffer count %u", max_alloc_count);
1340 		return -EINVAL;
1341 	}
1342 
1343 	dp_info("requested %d buffers to be posted to wbm ring",
1344 		max_alloc_count);
1345 
1346 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
1347 		qdf_mem_malloc(num_entries *
1348 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
1349 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
1350 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
1351 		return -ENOMEM;
1352 	}
1353 
1354 	hal_srng_access_start_unlocked(soc->hal_soc,
1355 				       hal_srng_to_hal_ring_handle(wbm_srng));
1356 
1357 	/*
1358 	 * Allocate Tx buffers as many as possible.
1359 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
1360 	 * Populate Tx buffers into WBM2IPA ring
1361 	 * This initial buffer population will simulate H/W as source ring,
1362 	 * and update HP
1363 	 */
1364 	for (tx_buffer_count = 0;
1365 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
1366 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
1367 		if (!nbuf)
1368 			break;
1369 
1370 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
1371 				hal_srng_to_hal_ring_handle(wbm_srng));
1372 		if (!ring_entry) {
1373 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1374 				  "%s: Failed to get WBM ring entry",
1375 				  __func__);
1376 			qdf_nbuf_free(nbuf);
1377 			break;
1378 		}
1379 
1380 		qdf_nbuf_map_single(soc->osdev, nbuf,
1381 				    QDF_DMA_BIDIRECTIONAL);
1382 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1383 		qdf_mem_dp_tx_skb_cnt_inc();
1384 		qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
1385 
1386 		/*
1387 		 * TODO - KIWI code can directly call the be handler
1388 		 * instead of hal soc ops.
1389 		 */
1390 		hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
1391 					     buffer_paddr, 0, wbm_bm_id);
1392 
1393 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
1394 			= (void *)nbuf;
1395 	}
1396 
1397 	hal_srng_access_end_unlocked(soc->hal_soc,
1398 				     hal_srng_to_hal_ring_handle(wbm_srng));
1399 
1400 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
1401 
1402 	if (tx_buffer_count) {
1403 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
1404 	} else {
1405 		dp_err("No IPA WDI TX buffer allocated!");
1406 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1407 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1408 		retval = -ENOMEM;
1409 	}
1410 
1411 	return retval;
1412 }
1413 
1414 /**
1415  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
1416  * @soc: data path instance
1417  * @pdev: core txrx pdev context
1418  *
1419  * This function will attach a DP RX instance into the main
1420  * device (SOC) context.
1421  *
1422  * Return: QDF_STATUS_SUCCESS: success
1423  *         QDF_STATUS_E_RESOURCES: Error return
1424  */
1425 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1426 {
1427 	return QDF_STATUS_SUCCESS;
1428 }
1429 
1430 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1431 {
1432 	int error;
1433 
1434 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1435 		return QDF_STATUS_SUCCESS;
1436 
1437 	/* TX resource attach */
1438 	error = dp_tx_ipa_uc_attach(soc, pdev);
1439 	if (error) {
1440 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1441 			  "%s: DP IPA UC TX attach fail code %d",
1442 			  __func__, error);
1443 		return error;
1444 	}
1445 
1446 	/* Setup 2nd TX pipe */
1447 	error = dp_ipa_tx_alt_pool_attach(soc);
1448 	if (error) {
1449 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1450 			  "%s: DP IPA TX pool2 attach fail code %d",
1451 			  __func__, error);
1452 		dp_tx_ipa_uc_detach(soc, pdev);
1453 		return error;
1454 	}
1455 
1456 	/* RX resource attach */
1457 	error = dp_rx_ipa_uc_attach(soc, pdev);
1458 	if (error) {
1459 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1460 			  "%s: DP IPA UC RX attach fail code %d",
1461 			  __func__, error);
1462 		dp_ipa_tx_alt_pool_detach(soc, pdev);
1463 		dp_tx_ipa_uc_detach(soc, pdev);
1464 		return error;
1465 	}
1466 
1467 	return QDF_STATUS_SUCCESS;	/* success */
1468 }
1469 
1470 #ifdef IPA_WDI3_VLAN_SUPPORT
1471 /*
1472  * dp_ipa_rx_alt_ring_resource_setup() - setup IPA 2nd RX ring resources
1473  * @soc: data path SoC handle
1474  * @pdev: data path pdev handle
1475  *
1476  * Return: none
1477  */
1478 static
1479 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev)
1480 {
1481 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
1482 	struct hal_srng *hal_srng;
1483 	struct hal_srng_params srng_params;
1484 	unsigned long addr_offset, dev_base_paddr;
1485 	qdf_dma_addr_t hp_addr;
1486 
1487 	if (!wlan_ipa_is_vlan_enabled())
1488 		return;
1489 
1490 	dev_base_paddr =
1491 		(unsigned long)
1492 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
1493 
1494 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW3 */
1495 	hal_srng = (struct hal_srng *)
1496 			soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng;
1497 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1498 			    hal_srng_to_hal_ring_handle(hal_srng),
1499 			    &srng_params);
1500 
1501 	soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr =
1502 						srng_params.ring_base_paddr;
1503 	soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr =
1504 						srng_params.ring_base_vaddr;
1505 	soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size =
1506 		(srng_params.num_entries * srng_params.entry_size) << 2;
1507 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1508 		      (unsigned long)(hal_soc->dev_base_addr);
1509 	soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr =
1510 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1511 
1512 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1513 		(unsigned int)addr_offset,
1514 		(unsigned int)dev_base_paddr,
1515 		(unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr),
1516 		(void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr,
1517 		(void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr,
1518 		srng_params.num_entries,
1519 		soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size);
1520 
1521 	hal_srng = (struct hal_srng *)
1522 			pdev->rx_refill_buf_ring3.hal_srng;
1523 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1524 			    hal_srng_to_hal_ring_handle(hal_srng),
1525 			    &srng_params);
1526 	soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr =
1527 		srng_params.ring_base_paddr;
1528 	soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr =
1529 		srng_params.ring_base_vaddr;
1530 	soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size =
1531 		(srng_params.num_entries * srng_params.entry_size) << 2;
1532 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1533 				       hal_srng_to_hal_ring_handle(hal_srng));
1534 	soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr =
1535 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
1536 
1537 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1538 		(unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr),
1539 		(void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr,
1540 		(void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr,
1541 		srng_params.num_entries,
1542 		soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size);
1543 }
1544 #else
1545 static inline
1546 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev)
1547 { }
1548 #endif
1549 /*
1550  * dp_ipa_ring_resource_setup() - setup IPA ring resources
1551  * @soc: data path SoC handle
1552  *
1553  * Return: none
1554  */
1555 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
1556 		struct dp_pdev *pdev)
1557 {
1558 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
1559 	struct hal_srng *hal_srng;
1560 	struct hal_srng_params srng_params;
1561 	qdf_dma_addr_t hp_addr;
1562 	unsigned long addr_offset, dev_base_paddr;
1563 	uint32_t ix0;
1564 	uint8_t ix0_map[8];
1565 
1566 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1567 		return QDF_STATUS_SUCCESS;
1568 
1569 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
1570 	hal_srng = (struct hal_srng *)
1571 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
1572 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1573 			    hal_srng_to_hal_ring_handle(hal_srng),
1574 			    &srng_params);
1575 
1576 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
1577 		srng_params.ring_base_paddr;
1578 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
1579 		srng_params.ring_base_vaddr;
1580 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
1581 		(srng_params.num_entries * srng_params.entry_size) << 2;
1582 	/*
1583 	 * For the register backed memory addresses, use the scn->mem_pa to
1584 	 * calculate the physical address of the shadow registers
1585 	 */
1586 	dev_base_paddr =
1587 		(unsigned long)
1588 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
1589 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
1590 		      (unsigned long)(hal_soc->dev_base_addr);
1591 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
1592 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1593 
1594 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1595 		(unsigned int)addr_offset,
1596 		(unsigned int)dev_base_paddr,
1597 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
1598 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1599 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1600 		srng_params.num_entries,
1601 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1602 
1603 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
1604 	hal_srng = (struct hal_srng *)
1605 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1606 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1607 			    hal_srng_to_hal_ring_handle(hal_srng),
1608 			    &srng_params);
1609 
1610 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
1611 						srng_params.ring_base_paddr;
1612 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
1613 						srng_params.ring_base_vaddr;
1614 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
1615 		(srng_params.num_entries * srng_params.entry_size) << 2;
1616 	soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr =
1617 		hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1618 				     hal_srng_to_hal_ring_handle(hal_srng));
1619 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1620 		      (unsigned long)(hal_soc->dev_base_addr);
1621 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
1622 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1623 
1624 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
1625 		(unsigned int)addr_offset,
1626 		(unsigned int)dev_base_paddr,
1627 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
1628 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1629 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1630 		srng_params.num_entries,
1631 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1632 
1633 	dp_ipa_tx_alt_ring_resource_setup(soc);
1634 
1635 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
1636 	hal_srng = (struct hal_srng *)
1637 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
1638 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1639 			    hal_srng_to_hal_ring_handle(hal_srng),
1640 			    &srng_params);
1641 
1642 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
1643 						srng_params.ring_base_paddr;
1644 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
1645 						srng_params.ring_base_vaddr;
1646 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
1647 		(srng_params.num_entries * srng_params.entry_size) << 2;
1648 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1649 		      (unsigned long)(hal_soc->dev_base_addr);
1650 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
1651 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1652 
1653 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1654 		(unsigned int)addr_offset,
1655 		(unsigned int)dev_base_paddr,
1656 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
1657 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1658 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1659 		srng_params.num_entries,
1660 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1661 
1662 	hal_srng = (struct hal_srng *)
1663 			pdev->rx_refill_buf_ring2.hal_srng;
1664 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1665 			    hal_srng_to_hal_ring_handle(hal_srng),
1666 			    &srng_params);
1667 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
1668 		srng_params.ring_base_paddr;
1669 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
1670 		srng_params.ring_base_vaddr;
1671 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
1672 		(srng_params.num_entries * srng_params.entry_size) << 2;
1673 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1674 				       hal_srng_to_hal_ring_handle(hal_srng));
1675 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
1676 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
1677 
1678 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1679 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
1680 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1681 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1682 		srng_params.num_entries,
1683 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1684 
1685 	/*
1686 	 * Set DEST_RING_MAPPING_4 to SW2 as default value for
1687 	 * DESTINATION_RING_CTRL_IX_0.
1688 	 */
1689 	ix0_map[0] = REO_REMAP_SW1;
1690 	ix0_map[1] = REO_REMAP_SW1;
1691 	ix0_map[2] = REO_REMAP_SW2;
1692 	ix0_map[3] = REO_REMAP_SW3;
1693 	ix0_map[4] = REO_REMAP_SW2;
1694 	ix0_map[5] = REO_REMAP_RELEASE;
1695 	ix0_map[6] = REO_REMAP_FW;
1696 	ix0_map[7] = REO_REMAP_FW;
1697 
1698 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1699 				    ix0_map);
1700 
1701 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
1702 
1703 	dp_ipa_rx_alt_ring_resource_setup(soc, pdev);
1704 	return 0;
1705 }
1706 
1707 #ifdef IPA_WDI3_VLAN_SUPPORT
1708 /*
1709  * dp_ipa_rx_alt_ring_get_resource() - get IPA 2nd RX ring resources
1710  * @pdev: data path pdev handle
1711  *
1712  * Return: Success if resourece is found
1713  */
1714 static QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev)
1715 {
1716 	struct dp_soc *soc = pdev->soc;
1717 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1718 
1719 	if (!wlan_ipa_is_vlan_enabled())
1720 		return QDF_STATUS_SUCCESS;
1721 
1722 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_alt_rdy_ring,
1723 				   soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr,
1724 				   soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr,
1725 				   soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size);
1726 
1727 	dp_ipa_get_shared_mem_info(
1728 			soc->osdev, &ipa_res->rx_alt_refill_ring,
1729 			soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr,
1730 			soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr,
1731 			soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size);
1732 
1733 	if (!qdf_mem_get_dma_addr(soc->osdev,
1734 				  &ipa_res->rx_alt_rdy_ring.mem_info) ||
1735 	    !qdf_mem_get_dma_addr(soc->osdev,
1736 				  &ipa_res->rx_alt_refill_ring.mem_info))
1737 		return QDF_STATUS_E_FAILURE;
1738 
1739 	return QDF_STATUS_SUCCESS;
1740 }
1741 #else
1742 static inline QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev)
1743 {
1744 	return QDF_STATUS_SUCCESS;
1745 }
1746 #endif
1747 
1748 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1749 {
1750 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1751 	struct dp_pdev *pdev =
1752 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1753 	struct dp_ipa_resources *ipa_res;
1754 
1755 	if (!pdev) {
1756 		dp_err("Invalid instance");
1757 		return QDF_STATUS_E_FAILURE;
1758 	}
1759 
1760 	ipa_res = &pdev->ipa_resource;
1761 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1762 		return QDF_STATUS_SUCCESS;
1763 
1764 	ipa_res->tx_num_alloc_buffer =
1765 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
1766 
1767 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
1768 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1769 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1770 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1771 
1772 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
1773 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1774 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1775 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1776 
1777 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
1778 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1779 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1780 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1781 
1782 	dp_ipa_get_shared_mem_info(
1783 			soc->osdev, &ipa_res->rx_refill_ring,
1784 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1785 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1786 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1787 
1788 	if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) ||
1789 	    !qdf_mem_get_dma_addr(soc->osdev,
1790 				  &ipa_res->tx_comp_ring.mem_info) ||
1791 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) ||
1792 	    !qdf_mem_get_dma_addr(soc->osdev,
1793 				  &ipa_res->rx_refill_ring.mem_info))
1794 		return QDF_STATUS_E_FAILURE;
1795 
1796 	if (dp_ipa_tx_alt_ring_get_resource(pdev))
1797 		return QDF_STATUS_E_FAILURE;
1798 
1799 	if (dp_ipa_rx_alt_ring_get_resource(pdev))
1800 		return QDF_STATUS_E_FAILURE;
1801 
1802 	return QDF_STATUS_SUCCESS;
1803 }
1804 
1805 #ifdef IPA_SET_RESET_TX_DB_PA
1806 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res)
1807 #else
1808 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \
1809 		dp_ipa_set_tx_doorbell_paddr(soc, ipa_res)
1810 #endif
1811 
1812 #ifdef IPA_WDI3_VLAN_SUPPORT
1813 /*
1814  * dp_ipa_map_rx_alt_ring_doorbell_paddr() - Map 2nd rx ring doorbell paddr
1815  * @pdev: data path pdev handle
1816  *
1817  * Return: none
1818  */
1819 static void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
1820 {
1821 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1822 	uint32_t rx_ready_doorbell_dmaaddr;
1823 	struct dp_soc *soc = pdev->soc;
1824 	struct hal_srng *reo_srng = (struct hal_srng *)
1825 			soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng;
1826 	int ret = 0;
1827 
1828 	if (!wlan_ipa_is_vlan_enabled())
1829 		return;
1830 
1831 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
1832 		ret = pld_smmu_map(soc->osdev->dev,
1833 				   ipa_res->rx_alt_ready_doorbell_paddr,
1834 				   &rx_ready_doorbell_dmaaddr,
1835 				   sizeof(uint32_t));
1836 		ipa_res->rx_alt_ready_doorbell_paddr =
1837 					rx_ready_doorbell_dmaaddr;
1838 		qdf_assert_always(!ret);
1839 	}
1840 
1841 	hal_srng_dst_set_hp_paddr_confirm(reo_srng,
1842 					  ipa_res->rx_alt_ready_doorbell_paddr);
1843 }
1844 
1845 /*
1846  * dp_ipa_unmap_rx_alt_ring_doorbell_paddr() - Unmap 2nd rx ring doorbell paddr
1847  * @pdev: data path pdev handle
1848  *
1849  * Return: none
1850  */
1851 static void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
1852 {
1853 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1854 	struct dp_soc *soc = pdev->soc;
1855 	int ret = 0;
1856 
1857 	if (!wlan_ipa_is_vlan_enabled())
1858 		return;
1859 
1860 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
1861 		return;
1862 
1863 	ret = pld_smmu_unmap(soc->osdev->dev,
1864 			     ipa_res->rx_alt_ready_doorbell_paddr,
1865 			     sizeof(uint32_t));
1866 	qdf_assert_always(!ret);
1867 }
1868 #else
1869 static inline void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
1870 { }
1871 
1872 static inline void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
1873 { }
1874 #endif
1875 
1876 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1877 {
1878 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1879 	struct dp_pdev *pdev =
1880 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1881 	struct dp_ipa_resources *ipa_res;
1882 	struct hal_srng *reo_srng = (struct hal_srng *)
1883 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
1884 
1885 	if (!pdev) {
1886 		dp_err("Invalid instance");
1887 		return QDF_STATUS_E_FAILURE;
1888 	}
1889 
1890 	ipa_res = &pdev->ipa_resource;
1891 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1892 		return QDF_STATUS_SUCCESS;
1893 
1894 	dp_ipa_map_ring_doorbell_paddr(pdev);
1895 	dp_ipa_map_rx_alt_ring_doorbell_paddr(pdev);
1896 
1897 	DP_IPA_SET_TX_DB_PADDR(soc, ipa_res);
1898 
1899 	/*
1900 	 * For RX, REO module on Napier/Hastings does reordering on incoming
1901 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
1902 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
1903 	 * to IPA.
1904 	 * Set the doorbell addr for the REO ring.
1905 	 */
1906 	hal_srng_dst_set_hp_paddr_confirm(reo_srng,
1907 					  ipa_res->rx_ready_doorbell_paddr);
1908 	return QDF_STATUS_SUCCESS;
1909 }
1910 
1911 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl,
1912 					 uint8_t pdev_id)
1913 {
1914 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1915 	struct dp_pdev *pdev =
1916 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1917 	struct dp_ipa_resources *ipa_res;
1918 
1919 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1920 		return QDF_STATUS_SUCCESS;
1921 
1922 	if (!pdev) {
1923 		dp_err("Invalid instance");
1924 		return QDF_STATUS_E_FAILURE;
1925 	}
1926 
1927 	ipa_res = &pdev->ipa_resource;
1928 	if (!ipa_res->is_db_ddr_mapped)
1929 		iounmap(ipa_res->tx_comp_doorbell_vaddr);
1930 
1931 	return QDF_STATUS_SUCCESS;
1932 }
1933 
1934 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1935 			      uint8_t *op_msg)
1936 {
1937 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1938 	struct dp_pdev *pdev =
1939 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1940 
1941 	if (!pdev) {
1942 		dp_err("Invalid instance");
1943 		return QDF_STATUS_E_FAILURE;
1944 	}
1945 
1946 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
1947 		return QDF_STATUS_SUCCESS;
1948 
1949 	if (pdev->ipa_uc_op_cb) {
1950 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
1951 	} else {
1952 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1953 		    "%s: IPA callback function is not registered", __func__);
1954 		qdf_mem_free(op_msg);
1955 		return QDF_STATUS_E_FAILURE;
1956 	}
1957 
1958 	return QDF_STATUS_SUCCESS;
1959 }
1960 
1961 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1962 				 ipa_uc_op_cb_type op_cb,
1963 				 void *usr_ctxt)
1964 {
1965 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1966 	struct dp_pdev *pdev =
1967 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1968 
1969 	if (!pdev) {
1970 		dp_err("Invalid instance");
1971 		return QDF_STATUS_E_FAILURE;
1972 	}
1973 
1974 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
1975 		return QDF_STATUS_SUCCESS;
1976 
1977 	pdev->ipa_uc_op_cb = op_cb;
1978 	pdev->usr_ctxt = usr_ctxt;
1979 
1980 	return QDF_STATUS_SUCCESS;
1981 }
1982 
1983 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1984 {
1985 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1986 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1987 
1988 	if (!pdev) {
1989 		dp_err("Invalid instance");
1990 		return;
1991 	}
1992 
1993 	dp_debug("Deregister OP handler callback");
1994 	pdev->ipa_uc_op_cb = NULL;
1995 	pdev->usr_ctxt = NULL;
1996 }
1997 
1998 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1999 {
2000 	/* TBD */
2001 	return QDF_STATUS_SUCCESS;
2002 }
2003 
2004 /**
2005  * dp_tx_send_ipa_data_frame() - send IPA data frame
2006  * @soc_hdl: datapath soc handle
2007  * @vdev_id: id of the virtual device
2008  * @skb: skb to transmit
2009  *
2010  * Return: skb/ NULL is for success
2011  */
2012 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2013 				     qdf_nbuf_t skb)
2014 {
2015 	qdf_nbuf_t ret;
2016 
2017 	/* Terminate the (single-element) list of tx frames */
2018 	qdf_nbuf_set_next(skb, NULL);
2019 	ret = dp_tx_send(soc_hdl, vdev_id, skb);
2020 	if (ret) {
2021 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2022 			  "%s: Failed to tx", __func__);
2023 		return ret;
2024 	}
2025 
2026 	return NULL;
2027 }
2028 
2029 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL
2030 /**
2031  * dp_ipa_is_target_ready() - check if target is ready or not
2032  * @soc: datapath soc handle
2033  *
2034  * Return: true if target is ready
2035  */
2036 static inline
2037 bool dp_ipa_is_target_ready(struct dp_soc *soc)
2038 {
2039 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
2040 		return false;
2041 	else
2042 		return true;
2043 }
2044 #else
2045 static inline
2046 bool dp_ipa_is_target_ready(struct dp_soc *soc)
2047 {
2048 	return true;
2049 }
2050 #endif
2051 
2052 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2053 {
2054 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2055 	struct dp_pdev *pdev =
2056 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2057 	uint32_t ix0;
2058 	uint32_t ix2;
2059 	uint8_t ix_map[8];
2060 
2061 	if (!pdev) {
2062 		dp_err("Invalid instance");
2063 		return QDF_STATUS_E_FAILURE;
2064 	}
2065 
2066 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2067 		return QDF_STATUS_SUCCESS;
2068 
2069 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
2070 		return QDF_STATUS_E_AGAIN;
2071 
2072 	if (!dp_ipa_is_target_ready(soc))
2073 		return QDF_STATUS_E_AGAIN;
2074 
2075 	/* Call HAL API to remap REO rings to REO2IPA ring */
2076 	ix_map[0] = REO_REMAP_SW1;
2077 	ix_map[1] = REO_REMAP_SW4;
2078 	ix_map[2] = REO_REMAP_SW1;
2079 	if (wlan_ipa_is_vlan_enabled())
2080 		ix_map[3] = REO_REMAP_SW3;
2081 	else
2082 		ix_map[3] = REO_REMAP_SW4;
2083 	ix_map[4] = REO_REMAP_SW4;
2084 	ix_map[5] = REO_REMAP_RELEASE;
2085 	ix_map[6] = REO_REMAP_FW;
2086 	ix_map[7] = REO_REMAP_FW;
2087 
2088 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
2089 				    ix_map);
2090 
2091 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2092 		ix_map[0] = REO_REMAP_SW4;
2093 		ix_map[1] = REO_REMAP_SW4;
2094 		ix_map[2] = REO_REMAP_SW4;
2095 		ix_map[3] = REO_REMAP_SW4;
2096 		ix_map[4] = REO_REMAP_SW4;
2097 		ix_map[5] = REO_REMAP_SW4;
2098 		ix_map[6] = REO_REMAP_SW4;
2099 		ix_map[7] = REO_REMAP_SW4;
2100 
2101 		ix2 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX2,
2102 					    ix_map);
2103 
2104 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2105 					   &ix2, &ix2);
2106 		dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
2107 	} else {
2108 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2109 					   NULL, NULL);
2110 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
2111 	}
2112 
2113 	return QDF_STATUS_SUCCESS;
2114 }
2115 
2116 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2117 {
2118 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2119 	struct dp_pdev *pdev =
2120 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2121 	uint8_t ix0_map[8];
2122 	uint32_t ix0;
2123 	uint32_t ix1;
2124 	uint32_t ix2;
2125 	uint32_t ix3;
2126 
2127 	if (!pdev) {
2128 		dp_err("Invalid instance");
2129 		return QDF_STATUS_E_FAILURE;
2130 	}
2131 
2132 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2133 		return QDF_STATUS_SUCCESS;
2134 
2135 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
2136 		return QDF_STATUS_E_AGAIN;
2137 
2138 	if (!dp_ipa_is_target_ready(soc))
2139 		return QDF_STATUS_E_AGAIN;
2140 
2141 	ix0_map[0] = REO_REMAP_SW1;
2142 	ix0_map[1] = REO_REMAP_SW1;
2143 	ix0_map[2] = REO_REMAP_SW2;
2144 	ix0_map[3] = REO_REMAP_SW3;
2145 	ix0_map[4] = REO_REMAP_SW2;
2146 	ix0_map[5] = REO_REMAP_RELEASE;
2147 	ix0_map[6] = REO_REMAP_FW;
2148 	ix0_map[7] = REO_REMAP_FW;
2149 
2150 	/* Call HAL API to remap REO rings to REO2IPA ring */
2151 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
2152 				    ix0_map);
2153 
2154 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2155 		dp_reo_remap_config(soc, &ix1, &ix2, &ix3);
2156 
2157 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2158 					   &ix2, &ix3);
2159 		dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
2160 	} else {
2161 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2162 					   NULL, NULL);
2163 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
2164 	}
2165 
2166 	return QDF_STATUS_SUCCESS;
2167 }
2168 
2169 /* This should be configurable per H/W configuration enable status */
2170 #define L3_HEADER_PADDING	2
2171 
2172 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
2173 	defined(CONFIG_IPA_WDI_UNIFIED_API)
2174 
2175 #if !defined(QCA_LL_TX_FLOW_CONTROL_V2) && !defined(QCA_IPA_LL_TX_FLOW_CONTROL)
2176 static inline void dp_setup_mcc_sys_pipes(
2177 		qdf_ipa_sys_connect_params_t *sys_in,
2178 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
2179 {
2180 	int i = 0;
2181 	/* Setup MCC sys pipe */
2182 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
2183 			DP_IPA_MAX_IFACE;
2184 	for (i = 0; i < DP_IPA_MAX_IFACE; i++)
2185 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
2186 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
2187 }
2188 #else
2189 static inline void dp_setup_mcc_sys_pipes(
2190 		qdf_ipa_sys_connect_params_t *sys_in,
2191 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
2192 {
2193 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
2194 }
2195 #endif
2196 
2197 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
2198 				 struct dp_ipa_resources *ipa_res,
2199 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
2200 				 bool over_gsi)
2201 {
2202 	if (over_gsi)
2203 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
2204 	else
2205 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
2206 
2207 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
2208 		qdf_mem_get_dma_addr(soc->osdev,
2209 				     &ipa_res->tx_comp_ring.mem_info);
2210 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
2211 		qdf_mem_get_dma_size(soc->osdev,
2212 				     &ipa_res->tx_comp_ring.mem_info);
2213 
2214 	/* WBM Tail Pointer Address */
2215 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
2216 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
2217 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
2218 
2219 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
2220 		qdf_mem_get_dma_addr(soc->osdev,
2221 				     &ipa_res->tx_ring.mem_info);
2222 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
2223 		qdf_mem_get_dma_size(soc->osdev,
2224 				     &ipa_res->tx_ring.mem_info);
2225 
2226 	/* TCL Head Pointer Address */
2227 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
2228 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
2229 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
2230 
2231 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
2232 		ipa_res->tx_num_alloc_buffer;
2233 
2234 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
2235 
2236 	dp_ipa_setup_tx_params_bank_id(soc, tx);
2237 }
2238 
2239 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
2240 				 struct dp_ipa_resources *ipa_res,
2241 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
2242 				 bool over_gsi)
2243 {
2244 	if (over_gsi)
2245 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2246 					IPA_CLIENT_WLAN2_PROD;
2247 	else
2248 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2249 					IPA_CLIENT_WLAN1_PROD;
2250 
2251 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
2252 		qdf_mem_get_dma_addr(soc->osdev,
2253 				     &ipa_res->rx_rdy_ring.mem_info);
2254 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
2255 		qdf_mem_get_dma_size(soc->osdev,
2256 				     &ipa_res->rx_rdy_ring.mem_info);
2257 
2258 	/* REO Tail Pointer Address */
2259 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
2260 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2261 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
2262 
2263 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
2264 		qdf_mem_get_dma_addr(soc->osdev,
2265 				     &ipa_res->rx_refill_ring.mem_info);
2266 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
2267 		qdf_mem_get_dma_size(soc->osdev,
2268 				     &ipa_res->rx_refill_ring.mem_info);
2269 
2270 	/* FW Head Pointer Address */
2271 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
2272 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2273 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
2274 
2275 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
2276 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2277 }
2278 
2279 static void
2280 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
2281 			  struct dp_ipa_resources *ipa_res,
2282 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
2283 			  bool over_gsi,
2284 			  qdf_ipa_wdi_hdl_t hdl)
2285 {
2286 	if (over_gsi) {
2287 		if (hdl == DP_IPA_HDL_FIRST)
2288 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2289 				IPA_CLIENT_WLAN2_CONS;
2290 		else if (hdl == DP_IPA_HDL_SECOND)
2291 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2292 				IPA_CLIENT_WLAN4_CONS;
2293 	} else {
2294 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2295 			IPA_CLIENT_WLAN1_CONS;
2296 	}
2297 
2298 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
2299 		     &ipa_res->tx_comp_ring.sgtable,
2300 		     sizeof(sgtable_t));
2301 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
2302 		qdf_mem_get_dma_size(soc->osdev,
2303 				     &ipa_res->tx_comp_ring.mem_info);
2304 	/* WBM Tail Pointer Address */
2305 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
2306 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
2307 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
2308 
2309 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
2310 		     &ipa_res->tx_ring.sgtable,
2311 		     sizeof(sgtable_t));
2312 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
2313 		qdf_mem_get_dma_size(soc->osdev,
2314 				     &ipa_res->tx_ring.mem_info);
2315 	/* TCL Head Pointer Address */
2316 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
2317 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
2318 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
2319 
2320 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
2321 		ipa_res->tx_num_alloc_buffer;
2322 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
2323 
2324 	dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu);
2325 }
2326 
2327 static void
2328 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
2329 			  struct dp_ipa_resources *ipa_res,
2330 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
2331 			  bool over_gsi,
2332 			  qdf_ipa_wdi_hdl_t hdl)
2333 {
2334 	if (over_gsi) {
2335 		if (hdl == DP_IPA_HDL_FIRST)
2336 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2337 				IPA_CLIENT_WLAN2_PROD;
2338 		else if (hdl == DP_IPA_HDL_SECOND)
2339 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2340 				IPA_CLIENT_WLAN3_PROD;
2341 	} else {
2342 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2343 					IPA_CLIENT_WLAN1_PROD;
2344 	}
2345 
2346 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
2347 		     &ipa_res->rx_rdy_ring.sgtable,
2348 		     sizeof(sgtable_t));
2349 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
2350 		qdf_mem_get_dma_size(soc->osdev,
2351 				     &ipa_res->rx_rdy_ring.mem_info);
2352 	/* REO Tail Pointer Address */
2353 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
2354 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2355 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
2356 
2357 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
2358 		     &ipa_res->rx_refill_ring.sgtable,
2359 		     sizeof(sgtable_t));
2360 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
2361 		qdf_mem_get_dma_size(soc->osdev,
2362 				     &ipa_res->rx_refill_ring.mem_info);
2363 
2364 	/* FW Head Pointer Address */
2365 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
2366 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2367 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
2368 
2369 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
2370 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2371 }
2372 
2373 #ifdef IPA_WDI3_VLAN_SUPPORT
2374 /*
2375  * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe smmu params
2376  * @soc: data path soc handle
2377  * @ipa_res: ipa resource pointer
2378  * @rx_smmu: smmu pipe info handle
2379  * @over_gsi: flag for IPA offload over gsi
2380  * @hdl: ipa registered handle
2381  *
2382  * Return: none
2383  */
2384 static void
2385 dp_ipa_wdi_rx_alt_pipe_smmu_params(struct dp_soc *soc,
2386 				   struct dp_ipa_resources *ipa_res,
2387 				   qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
2388 				   bool over_gsi,
2389 				   qdf_ipa_wdi_hdl_t hdl)
2390 {
2391 	if (!wlan_ipa_is_vlan_enabled())
2392 		return;
2393 
2394 	if (over_gsi) {
2395 		if (hdl == DP_IPA_HDL_FIRST)
2396 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2397 				IPA_CLIENT_WLAN2_PROD1;
2398 		else if (hdl == DP_IPA_HDL_SECOND)
2399 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2400 				IPA_CLIENT_WLAN3_PROD1;
2401 	} else {
2402 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2403 					IPA_CLIENT_WLAN1_PROD;
2404 	}
2405 
2406 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
2407 		     &ipa_res->rx_alt_rdy_ring.sgtable,
2408 		     sizeof(sgtable_t));
2409 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
2410 		qdf_mem_get_dma_size(soc->osdev,
2411 				     &ipa_res->rx_alt_rdy_ring.mem_info);
2412 	/* REO Tail Pointer Address */
2413 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
2414 		soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr;
2415 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
2416 
2417 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
2418 		     &ipa_res->rx_alt_refill_ring.sgtable,
2419 		     sizeof(sgtable_t));
2420 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
2421 		qdf_mem_get_dma_size(soc->osdev,
2422 				     &ipa_res->rx_alt_refill_ring.mem_info);
2423 
2424 	/* FW Head Pointer Address */
2425 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
2426 		soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr;
2427 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
2428 
2429 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
2430 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2431 }
2432 
2433 /*
2434  * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe params
2435  * @soc: data path soc handle
2436  * @ipa_res: ipa resource pointer
2437  * @rx: pipe info handle
2438  * @over_gsi: flag for IPA offload over gsi
2439  * @hdl: ipa registered handle
2440  *
2441  * Return: none
2442  */
2443 static void dp_ipa_wdi_rx_alt_pipe_params(struct dp_soc *soc,
2444 					  struct dp_ipa_resources *ipa_res,
2445 					  qdf_ipa_wdi_pipe_setup_info_t *rx,
2446 					  bool over_gsi,
2447 					  qdf_ipa_wdi_hdl_t hdl)
2448 {
2449 	if (!wlan_ipa_is_vlan_enabled())
2450 		return;
2451 
2452 	if (over_gsi) {
2453 		if (hdl == DP_IPA_HDL_FIRST)
2454 			QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2455 				IPA_CLIENT_WLAN2_PROD1;
2456 		else if (hdl == DP_IPA_HDL_SECOND)
2457 			QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2458 				IPA_CLIENT_WLAN3_PROD1;
2459 	} else {
2460 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2461 					IPA_CLIENT_WLAN1_PROD;
2462 	}
2463 
2464 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
2465 		qdf_mem_get_dma_addr(soc->osdev,
2466 				     &ipa_res->rx_alt_rdy_ring.mem_info);
2467 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
2468 		qdf_mem_get_dma_size(soc->osdev,
2469 				     &ipa_res->rx_alt_rdy_ring.mem_info);
2470 
2471 	/* REO Tail Pointer Address */
2472 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
2473 		soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr;
2474 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
2475 
2476 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
2477 		qdf_mem_get_dma_addr(soc->osdev,
2478 				     &ipa_res->rx_alt_refill_ring.mem_info);
2479 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
2480 		qdf_mem_get_dma_size(soc->osdev,
2481 				     &ipa_res->rx_alt_refill_ring.mem_info);
2482 
2483 	/* FW Head Pointer Address */
2484 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
2485 		soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr;
2486 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
2487 
2488 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
2489 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2490 }
2491 
2492 /*
2493  * dp_ipa_setup_rx_alt_pipe() - Setup 2nd rx pipe for IPA offload
2494  * @soc: data path soc handle
2495  * @res: ipa resource pointer
2496  * @in: pipe in handle
2497  * @over_gsi: flag for IPA offload over gsi
2498  * @hdl: ipa registered handle
2499  *
2500  * Return: none
2501  */
2502 static void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc,
2503 				     struct dp_ipa_resources *res,
2504 				     qdf_ipa_wdi_conn_in_params_t *in,
2505 				     bool over_gsi,
2506 				     qdf_ipa_wdi_hdl_t hdl)
2507 {
2508 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
2509 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
2510 	qdf_ipa_ep_cfg_t *rx_cfg;
2511 
2512 	if (!wlan_ipa_is_vlan_enabled())
2513 		return;
2514 
2515 	QDF_IPA_WDI_CONN_IN_PARAMS_IS_RX1_USED(in) = true;
2516 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
2517 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT_SMMU(in);
2518 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
2519 		dp_ipa_wdi_rx_alt_pipe_smmu_params(soc, res, rx_smmu,
2520 						   over_gsi, hdl);
2521 	} else {
2522 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT(in);
2523 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx);
2524 		dp_ipa_wdi_rx_alt_pipe_params(soc, res, rx, over_gsi, hdl);
2525 	}
2526 
2527 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
2528 	/* Update with wds len(96) + 4 if wds support is enabled */
2529 	if (ucfg_ipa_is_wds_enabled())
2530 		QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN;
2531 	else
2532 		QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN;
2533 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
2534 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
2535 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
2536 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
2537 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
2538 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
2539 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
2540 }
2541 
2542 /*
2543  * dp_ipa_set_rx_alt_pipe_db() - Setup 2nd rx pipe doorbell
2544  * @res: ipa resource pointer
2545  * @out: pipe out handle
2546  *
2547  * Return: none
2548  */
2549 static void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res,
2550 				      qdf_ipa_wdi_conn_out_params_t *out)
2551 {
2552 	if (!wlan_ipa_is_vlan_enabled())
2553 		return;
2554 
2555 	res->rx_alt_ready_doorbell_paddr =
2556 			QDF_IPA_WDI_CONN_OUT_PARAMS_RX_ALT_UC_DB_PA(out);
2557 	dp_debug("Setting DB 0x%x for RX alt pipe",
2558 		 res->rx_alt_ready_doorbell_paddr);
2559 }
2560 #else
2561 static inline
2562 void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc,
2563 			      struct dp_ipa_resources *res,
2564 			      qdf_ipa_wdi_conn_in_params_t *in,
2565 			      bool over_gsi,
2566 			      qdf_ipa_wdi_hdl_t hdl)
2567 { }
2568 
2569 static inline
2570 void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res,
2571 			       qdf_ipa_wdi_conn_out_params_t *out)
2572 { }
2573 #endif
2574 
2575 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2576 			void *ipa_i2w_cb, void *ipa_w2i_cb,
2577 			void *ipa_wdi_meter_notifier_cb,
2578 			uint32_t ipa_desc_size, void *ipa_priv,
2579 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
2580 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
2581 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi,
2582 			qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id,
2583 			void *ipa_ast_notify_cb)
2584 {
2585 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2586 	struct dp_pdev *pdev =
2587 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2588 	struct dp_ipa_resources *ipa_res;
2589 	qdf_ipa_ep_cfg_t *tx_cfg;
2590 	qdf_ipa_ep_cfg_t *rx_cfg;
2591 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
2592 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
2593 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
2594 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
2595 	qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL;
2596 	qdf_ipa_wdi_conn_out_params_t pipe_out;
2597 	int ret;
2598 
2599 	if (!pdev) {
2600 		dp_err("Invalid instance");
2601 		return QDF_STATUS_E_FAILURE;
2602 	}
2603 
2604 	ipa_res = &pdev->ipa_resource;
2605 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2606 		return QDF_STATUS_SUCCESS;
2607 
2608 	pipe_in = qdf_mem_malloc(sizeof(*pipe_in));
2609 	if (!pipe_in)
2610 		return QDF_STATUS_E_NOMEM;
2611 
2612 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
2613 
2614 	if (is_smmu_enabled)
2615 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true;
2616 	else
2617 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false;
2618 
2619 	dp_setup_mcc_sys_pipes(sys_in, pipe_in);
2620 
2621 	/* TX PIPE */
2622 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2623 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in);
2624 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
2625 	} else {
2626 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in);
2627 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
2628 	}
2629 
2630 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
2631 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2632 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
2633 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
2634 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
2635 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
2636 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
2637 
2638 	/**
2639 	 * Transfer Ring: WBM Ring
2640 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
2641 	 * Event Ring: TCL ring
2642 	 * Event Ring Doorbell PA: TCL Head Pointer Address
2643 	 */
2644 	if (is_smmu_enabled)
2645 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi, id);
2646 	else
2647 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
2648 
2649 	dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in);
2650 
2651 	/* RX PIPE */
2652 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2653 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in);
2654 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
2655 	} else {
2656 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in);
2657 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
2658 	}
2659 
2660 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
2661 	if (ucfg_ipa_is_wds_enabled())
2662 		QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST;
2663 	else
2664 		QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
2665 
2666 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
2667 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
2668 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
2669 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
2670 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
2671 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
2672 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
2673 
2674 	/**
2675 	 * Transfer Ring: REO Ring
2676 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
2677 	 * Event Ring: FW ring
2678 	 * Event Ring Doorbell PA: FW Head Pointer Address
2679 	 */
2680 	if (is_smmu_enabled)
2681 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi, id);
2682 	else
2683 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
2684 
2685 	/* setup 2nd rx pipe */
2686 	dp_ipa_setup_rx_alt_pipe(soc, ipa_res, pipe_in, over_gsi, id);
2687 
2688 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb;
2689 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv;
2690 	QDF_IPA_WDI_CONN_IN_PARAMS_HANDLE(pipe_in) = hdl;
2691 	dp_ipa_ast_notify_cb(pipe_in, ipa_ast_notify_cb);
2692 
2693 	/* Connect WDI IPA PIPEs */
2694 	ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out);
2695 
2696 	if (ret) {
2697 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2698 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
2699 			  __func__, ret);
2700 		qdf_mem_free(pipe_in);
2701 		return QDF_STATUS_E_FAILURE;
2702 	}
2703 
2704 	/* IPA uC Doorbell registers */
2705 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
2706 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
2707 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
2708 
2709 	dp_ipa_set_pipe_db(ipa_res, &pipe_out);
2710 	dp_ipa_set_rx_alt_pipe_db(ipa_res, &pipe_out);
2711 
2712 	ipa_res->is_db_ddr_mapped =
2713 		QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out);
2714 
2715 	soc->ipa_first_tx_db_access = true;
2716 	qdf_mem_free(pipe_in);
2717 
2718 	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
2719 	soc->ipa_rx_buf_map_lock_initialized = true;
2720 
2721 	return QDF_STATUS_SUCCESS;
2722 }
2723 
2724 #ifdef IPA_WDI3_VLAN_SUPPORT
2725 /*
2726  * dp_ipa_set_rx1_used() - Set rx1 used flag for 2nd rx offload ring
2727  * @in: pipe in handle
2728  *
2729  * Return: none
2730  */
2731 static inline
2732 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in)
2733 {
2734 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_RX1_USED(in) = true;
2735 }
2736 
2737 /*
2738  * dp_ipa_set_v4_vlan_hdr() - Set v4 vlan hdr
2739  * @in: pipe in handle
2740  * hdr: pointer to hdr
2741  *
2742  * Return: none
2743  */
2744 static inline
2745 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
2746 			    qdf_ipa_wdi_hdr_info_t *hdr)
2747 {
2748 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v4_VLAN]),
2749 		     hdr, sizeof(qdf_ipa_wdi_hdr_info_t));
2750 }
2751 
2752 /*
2753  * dp_ipa_set_v6_vlan_hdr() - Set v6 vlan hdr
2754  * @in: pipe in handle
2755  * hdr: pointer to hdr
2756  *
2757  * Return: none
2758  */
2759 static inline
2760 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
2761 			    qdf_ipa_wdi_hdr_info_t *hdr)
2762 {
2763 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v6_VLAN]),
2764 		     hdr, sizeof(qdf_ipa_wdi_hdr_info_t));
2765 }
2766 #else
2767 static inline
2768 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in)
2769 { }
2770 
2771 static inline
2772 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
2773 			    qdf_ipa_wdi_hdr_info_t *hdr)
2774 { }
2775 
2776 static inline
2777 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
2778 			    qdf_ipa_wdi_hdr_info_t *hdr)
2779 { }
2780 #endif
2781 
2782 #ifdef IPA_WDS_EASYMESH_FEATURE
2783 /**
2784  * dp_ipa_set_wdi_hdr_type() - Set wdi hdr type for IPA
2785  * @hdr_info: Header info
2786  *
2787  * Return: None
2788  */
2789 static inline void
2790 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
2791 {
2792 	if (ucfg_ipa_is_wds_enabled())
2793 		QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
2794 			IPA_HDR_L2_ETHERNET_II_AST;
2795 	else
2796 		QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
2797 			IPA_HDR_L2_ETHERNET_II;
2798 }
2799 #else
2800 static inline void
2801 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
2802 {
2803 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = IPA_HDR_L2_ETHERNET_II;
2804 }
2805 #endif
2806 
2807 #ifdef IPA_WDI3_VLAN_SUPPORT
2808 /**
2809  * dp_ipa_set_wdi_vlan_hdr_type() - Set wdi vlan hdr type for IPA
2810  * @hdr_info: Header info
2811  *
2812  * Return: None
2813  */
2814 static inline void
2815 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
2816 {
2817 	if (ucfg_ipa_is_wds_enabled())
2818 		QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
2819 			IPA_HDR_L2_802_1Q_AST;
2820 	else
2821 		QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
2822 			IPA_HDR_L2_802_1Q;
2823 }
2824 #else
2825 static inline void
2826 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
2827 { }
2828 #endif
2829 
2830 /**
2831  * dp_ipa_setup_iface() - Setup IPA header and register interface
2832  * @ifname: Interface name
2833  * @mac_addr: Interface MAC address
2834  * @prod_client: IPA prod client type
2835  * @cons_client: IPA cons client type
2836  * @session_id: Session ID
2837  * @is_ipv6_enabled: Is IPV6 enabled or not
2838  * @hdl: IPA handle
2839  *
2840  * Return: QDF_STATUS
2841  */
2842 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
2843 			      qdf_ipa_client_type_t prod_client,
2844 			      qdf_ipa_client_type_t cons_client,
2845 			      uint8_t session_id, bool is_ipv6_enabled,
2846 			      qdf_ipa_wdi_hdl_t hdl)
2847 {
2848 	qdf_ipa_wdi_reg_intf_in_params_t in;
2849 	qdf_ipa_wdi_hdr_info_t hdr_info;
2850 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
2851 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
2852 	struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr;
2853 	struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr_v6;
2854 	int ret = -EINVAL;
2855 
2856 	qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t));
2857 
2858 	/* Need to reset the values to 0 as all the fields are not
2859 	 * updated in the Header, Unused fields will be set to 0.
2860 	 */
2861 	qdf_mem_zero(&uc_tx_vlan_hdr, sizeof(struct dp_ipa_uc_tx_vlan_hdr));
2862 	qdf_mem_zero(&uc_tx_vlan_hdr_v6, sizeof(struct dp_ipa_uc_tx_vlan_hdr));
2863 
2864 	dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname,
2865 		 QDF_MAC_ADDR_REF(mac_addr));
2866 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2867 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
2868 
2869 	/* IPV4 header */
2870 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
2871 
2872 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
2873 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2874 	dp_ipa_set_wdi_hdr_type(&hdr_info);
2875 
2876 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
2877 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
2878 
2879 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
2880 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
2881 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2882 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
2883 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
2884 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = WLAN_IPA_META_DATA_MASK;
2885 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_HANDLE(&in) = hdl;
2886 	dp_ipa_setup_iface_session_id(&in, session_id);
2887 	dp_debug("registering for session_id: %u", session_id);
2888 
2889 	/* IPV6 header */
2890 	if (is_ipv6_enabled) {
2891 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
2892 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
2893 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
2894 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
2895 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
2896 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2897 	}
2898 
2899 	if (wlan_ipa_is_vlan_enabled()) {
2900 		/* Add vlan specific headers if vlan supporti is enabled */
2901 		qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2902 		dp_ipa_set_rx1_used(&in);
2903 		qdf_ether_addr_copy(uc_tx_vlan_hdr.eth.h_source, mac_addr);
2904 		/* IPV4 Vlan header */
2905 		uc_tx_vlan_hdr.eth.h_vlan_proto = qdf_htons(ETH_P_8021Q);
2906 		uc_tx_vlan_hdr.eth.h_vlan_encapsulated_proto = qdf_htons(ETH_P_IP);
2907 
2908 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) =
2909 				(uint8_t *)&uc_tx_vlan_hdr;
2910 		QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) =
2911 				DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN;
2912 		dp_ipa_set_wdi_vlan_hdr_type(&hdr_info);
2913 
2914 		QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
2915 			DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
2916 
2917 		dp_ipa_set_v4_vlan_hdr(&in, &hdr_info);
2918 
2919 		/* IPV6 Vlan header */
2920 		if (is_ipv6_enabled) {
2921 			qdf_mem_copy(&uc_tx_vlan_hdr_v6, &uc_tx_vlan_hdr,
2922 				     DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN);
2923 			uc_tx_vlan_hdr_v6.eth.h_vlan_proto =
2924 					qdf_htons(ETH_P_8021Q);
2925 			uc_tx_vlan_hdr_v6.eth.h_vlan_encapsulated_proto =
2926 					qdf_htons(ETH_P_IPV6);
2927 			QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) =
2928 					(uint8_t *)&uc_tx_vlan_hdr_v6;
2929 			dp_ipa_set_v6_vlan_hdr(&in, &hdr_info);
2930 		}
2931 	}
2932 
2933 	ret = qdf_ipa_wdi_reg_intf(&in);
2934 	if (ret) {
2935 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2936 			  "%s: ipa_wdi_reg_intf: register IPA interface failed: ret=%d",
2937 			  __func__, ret);
2938 		return QDF_STATUS_E_FAILURE;
2939 	}
2940 
2941 	return QDF_STATUS_SUCCESS;
2942 }
2943 
2944 #else /* !CONFIG_IPA_WDI_UNIFIED_API */
2945 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2946 			void *ipa_i2w_cb, void *ipa_w2i_cb,
2947 			void *ipa_wdi_meter_notifier_cb,
2948 			uint32_t ipa_desc_size, void *ipa_priv,
2949 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
2950 			uint32_t *rx_pipe_handle)
2951 {
2952 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2953 	struct dp_pdev *pdev =
2954 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2955 	struct dp_ipa_resources *ipa_res;
2956 	qdf_ipa_wdi_pipe_setup_info_t *tx;
2957 	qdf_ipa_wdi_pipe_setup_info_t *rx;
2958 	qdf_ipa_wdi_conn_in_params_t pipe_in;
2959 	qdf_ipa_wdi_conn_out_params_t pipe_out;
2960 	struct tcl_data_cmd *tcl_desc_ptr;
2961 	uint8_t *desc_addr;
2962 	uint32_t desc_size;
2963 	int ret;
2964 
2965 	if (!pdev) {
2966 		dp_err("Invalid instance");
2967 		return QDF_STATUS_E_FAILURE;
2968 	}
2969 
2970 	ipa_res = &pdev->ipa_resource;
2971 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2972 		return QDF_STATUS_SUCCESS;
2973 
2974 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
2975 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
2976 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
2977 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
2978 
2979 	/* TX PIPE */
2980 	/**
2981 	 * Transfer Ring: WBM Ring
2982 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
2983 	 * Event Ring: TCL ring
2984 	 * Event Ring Doorbell PA: TCL Head Pointer Address
2985 	 */
2986 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
2987 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
2988 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2989 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
2990 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
2991 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
2992 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
2993 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
2994 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
2995 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
2996 		ipa_res->tx_comp_ring_base_paddr;
2997 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
2998 		ipa_res->tx_comp_ring_size;
2999 	/* WBM Tail Pointer Address */
3000 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
3001 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
3002 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
3003 		ipa_res->tx_ring_base_paddr;
3004 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
3005 	/* TCL Head Pointer Address */
3006 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
3007 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
3008 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
3009 		ipa_res->tx_num_alloc_buffer;
3010 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
3011 
3012 	/* Preprogram TCL descriptor */
3013 	desc_addr =
3014 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
3015 	desc_size = sizeof(struct tcl_data_cmd);
3016 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
3017 	tcl_desc_ptr = (struct tcl_data_cmd *)
3018 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
3019 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
3020 						HAL_RX_BUF_RBM_SW2_BM;
3021 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
3022 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
3023 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
3024 
3025 	/* RX PIPE */
3026 	/**
3027 	 * Transfer Ring: REO Ring
3028 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
3029 	 * Event Ring: FW ring
3030 	 * Event Ring Doorbell PA: FW Head Pointer Address
3031 	 */
3032 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
3033 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
3034 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
3035 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
3036 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
3037 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
3038 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
3039 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
3040 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
3041 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
3042 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
3043 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
3044 						ipa_res->rx_rdy_ring_base_paddr;
3045 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
3046 						ipa_res->rx_rdy_ring_size;
3047 	/* REO Tail Pointer Address */
3048 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
3049 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
3050 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
3051 					ipa_res->rx_refill_ring_base_paddr;
3052 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
3053 						ipa_res->rx_refill_ring_size;
3054 	/* FW Head Pointer Address */
3055 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
3056 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
3057 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = soc->rx_pkt_tlv_size +
3058 						L3_HEADER_PADDING;
3059 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
3060 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
3061 
3062 	/* Connect WDI IPA PIPE */
3063 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
3064 	if (ret) {
3065 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3066 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
3067 			  __func__, ret);
3068 		return QDF_STATUS_E_FAILURE;
3069 	}
3070 
3071 	/* IPA uC Doorbell registers */
3072 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3073 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
3074 		  __func__,
3075 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
3076 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
3077 
3078 	ipa_res->tx_comp_doorbell_paddr =
3079 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
3080 	ipa_res->tx_comp_doorbell_vaddr =
3081 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
3082 	ipa_res->rx_ready_doorbell_paddr =
3083 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
3084 
3085 	soc->ipa_first_tx_db_access = true;
3086 
3087 	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
3088 	soc->ipa_rx_buf_map_lock_initialized = true;
3089 
3090 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3091 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
3092 		  __func__,
3093 		  "transfer_ring_base_pa",
3094 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
3095 		  "transfer_ring_size",
3096 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
3097 		  "transfer_ring_doorbell_pa",
3098 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
3099 		  "event_ring_base_pa",
3100 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
3101 		  "event_ring_size",
3102 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
3103 		  "event_ring_doorbell_pa",
3104 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
3105 		  "num_pkt_buffers",
3106 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
3107 		  "tx_comp_doorbell_paddr",
3108 		  (void *)ipa_res->tx_comp_doorbell_paddr);
3109 
3110 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3111 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
3112 		  __func__,
3113 		  "transfer_ring_base_pa",
3114 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
3115 		  "transfer_ring_size",
3116 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
3117 		  "transfer_ring_doorbell_pa",
3118 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
3119 		  "event_ring_base_pa",
3120 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
3121 		  "event_ring_size",
3122 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
3123 		  "event_ring_doorbell_pa",
3124 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
3125 		  "num_pkt_buffers",
3126 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
3127 		  "tx_comp_doorbell_paddr",
3128 		  (void *)ipa_res->rx_ready_doorbell_paddr);
3129 
3130 	return QDF_STATUS_SUCCESS;
3131 }
3132 
3133 /**
3134  * dp_ipa_setup_iface() - Setup IPA header and register interface
3135  * @ifname: Interface name
3136  * @mac_addr: Interface MAC address
3137  * @prod_client: IPA prod client type
3138  * @cons_client: IPA cons client type
3139  * @session_id: Session ID
3140  * @is_ipv6_enabled: Is IPV6 enabled or not
3141  * @hdl: IPA handle
3142  *
3143  * Return: QDF_STATUS
3144  */
3145 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
3146 			      qdf_ipa_client_type_t prod_client,
3147 			      qdf_ipa_client_type_t cons_client,
3148 			      uint8_t session_id, bool is_ipv6_enabled,
3149 			      qdf_ipa_wdi_hdl_t hdl)
3150 {
3151 	qdf_ipa_wdi_reg_intf_in_params_t in;
3152 	qdf_ipa_wdi_hdr_info_t hdr_info;
3153 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
3154 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
3155 	int ret = -EINVAL;
3156 
3157 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3158 		  "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT,
3159 		  __func__, ifname, QDF_MAC_ADDR_REF(mac_addr));
3160 
3161 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3162 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
3163 
3164 	/* IPV4 header */
3165 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
3166 
3167 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
3168 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
3169 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
3170 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
3171 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3172 
3173 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
3174 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
3175 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3176 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
3177 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
3178 		htonl(session_id << 16);
3179 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
3180 
3181 	/* IPV6 header */
3182 	if (is_ipv6_enabled) {
3183 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
3184 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
3185 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
3186 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
3187 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
3188 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3189 	}
3190 
3191 	ret = qdf_ipa_wdi_reg_intf(&in);
3192 	if (ret) {
3193 		dp_err("ipa_wdi_reg_intf: register IPA interface failed: ret=%d",
3194 		       ret);
3195 		return QDF_STATUS_E_FAILURE;
3196 	}
3197 
3198 	return QDF_STATUS_SUCCESS;
3199 }
3200 
3201 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
3202 
3203 /**
3204  * dp_ipa_cleanup() - Disconnect IPA pipes
3205  * @soc_hdl: dp soc handle
3206  * @pdev_id: dp pdev id
3207  * @tx_pipe_handle: Tx pipe handle
3208  * @rx_pipe_handle: Rx pipe handle
3209  * @hdl: IPA handle
3210  *
3211  * Return: QDF_STATUS
3212  */
3213 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3214 			  uint32_t tx_pipe_handle, uint32_t rx_pipe_handle,
3215 			  qdf_ipa_wdi_hdl_t hdl)
3216 {
3217 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3218 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3219 	struct dp_pdev *pdev;
3220 	int ret;
3221 
3222 	ret = qdf_ipa_wdi_disconn_pipes(hdl);
3223 	if (ret) {
3224 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
3225 		       ret);
3226 		status = QDF_STATUS_E_FAILURE;
3227 	}
3228 
3229 	if (soc->ipa_rx_buf_map_lock_initialized) {
3230 		qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock);
3231 		soc->ipa_rx_buf_map_lock_initialized = false;
3232 	}
3233 
3234 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3235 	if (qdf_unlikely(!pdev)) {
3236 		dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
3237 		status = QDF_STATUS_E_FAILURE;
3238 		goto exit;
3239 	}
3240 
3241 	dp_ipa_unmap_ring_doorbell_paddr(pdev);
3242 	dp_ipa_unmap_rx_alt_ring_doorbell_paddr(pdev);
3243 exit:
3244 	return status;
3245 }
3246 
3247 /**
3248  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
3249  * @ifname: Interface name
3250  * @is_ipv6_enabled: Is IPV6 enabled or not
3251  * @hdl: IPA handle
3252  *
3253  * Return: QDF_STATUS
3254  */
3255 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled,
3256 				qdf_ipa_wdi_hdl_t hdl)
3257 {
3258 	int ret;
3259 
3260 	ret = qdf_ipa_wdi_dereg_intf(ifname, hdl);
3261 	if (ret) {
3262 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3263 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
3264 			  __func__, ret);
3265 		return QDF_STATUS_E_FAILURE;
3266 	}
3267 
3268 	return QDF_STATUS_SUCCESS;
3269 }
3270 
3271 #ifdef IPA_SET_RESET_TX_DB_PA
3272 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \
3273 				dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res))
3274 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \
3275 				dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res))
3276 #else
3277 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res)
3278 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res)
3279 #endif
3280 
3281 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3282 			       qdf_ipa_wdi_hdl_t hdl)
3283 {
3284 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3285 	struct dp_pdev *pdev =
3286 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3287 	struct dp_ipa_resources *ipa_res;
3288 	QDF_STATUS result;
3289 
3290 	if (!pdev) {
3291 		dp_err("Invalid instance");
3292 		return QDF_STATUS_E_FAILURE;
3293 	}
3294 
3295 	ipa_res = &pdev->ipa_resource;
3296 
3297 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
3298 	DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res);
3299 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true,
3300 					       __func__, __LINE__);
3301 
3302 	result = qdf_ipa_wdi_enable_pipes(hdl);
3303 	if (result) {
3304 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3305 			  "%s: Enable WDI PIPE fail, code %d",
3306 			  __func__, result);
3307 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
3308 		DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
3309 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false,
3310 						       __func__, __LINE__);
3311 		return QDF_STATUS_E_FAILURE;
3312 	}
3313 
3314 	if (soc->ipa_first_tx_db_access) {
3315 		dp_ipa_tx_comp_ring_init_hp(soc, ipa_res);
3316 		soc->ipa_first_tx_db_access = false;
3317 	}
3318 
3319 	return QDF_STATUS_SUCCESS;
3320 }
3321 
3322 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3323 				qdf_ipa_wdi_hdl_t hdl)
3324 {
3325 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3326 	struct dp_pdev *pdev =
3327 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3328 	QDF_STATUS result;
3329 	struct dp_ipa_resources *ipa_res;
3330 
3331 	if (!pdev) {
3332 		dp_err("Invalid instance");
3333 		return QDF_STATUS_E_FAILURE;
3334 	}
3335 
3336 	ipa_res = &pdev->ipa_resource;
3337 
3338 	qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS);
3339 	/*
3340 	 * Reset the tx completion doorbell address before invoking IPA disable
3341 	 * pipes API to ensure that there is no access to IPA tx doorbell
3342 	 * address post disable pipes.
3343 	 */
3344 	DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
3345 
3346 	result = qdf_ipa_wdi_disable_pipes(hdl);
3347 	if (result) {
3348 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3349 			  "%s: Disable WDI PIPE fail, code %d",
3350 			  __func__, result);
3351 		qdf_assert_always(0);
3352 		return QDF_STATUS_E_FAILURE;
3353 	}
3354 
3355 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
3356 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false,
3357 					       __func__, __LINE__);
3358 
3359 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
3360 }
3361 
3362 /**
3363  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
3364  * @client: Client type
3365  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
3366  * @hdl: IPA handle
3367  *
3368  * Return: QDF_STATUS
3369  */
3370 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps,
3371 				 qdf_ipa_wdi_hdl_t hdl)
3372 {
3373 	qdf_ipa_wdi_perf_profile_t profile;
3374 	QDF_STATUS result;
3375 
3376 	profile.client = client;
3377 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
3378 
3379 	result = qdf_ipa_wdi_set_perf_profile(hdl, &profile);
3380 	if (result) {
3381 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3382 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
3383 			  __func__, result);
3384 		return QDF_STATUS_E_FAILURE;
3385 	}
3386 
3387 	return QDF_STATUS_SUCCESS;
3388 }
3389 
3390 /**
3391  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
3392  * @pdev: pdev
3393  * @vdev: vdev
3394  * @nbuf: skb
3395  *
3396  * Return: nbuf if TX fails and NULL if TX succeeds
3397  */
3398 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
3399 				       struct dp_vdev *vdev,
3400 				       qdf_nbuf_t nbuf)
3401 {
3402 	struct dp_peer *vdev_peer;
3403 	uint16_t len;
3404 
3405 	vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA);
3406 	if (qdf_unlikely(!vdev_peer))
3407 		return nbuf;
3408 
3409 	if (qdf_unlikely(!vdev_peer->txrx_peer)) {
3410 		dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
3411 		return nbuf;
3412 	}
3413 
3414 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
3415 	len = qdf_nbuf_len(nbuf);
3416 
3417 	if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
3418 		DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
3419 					      rx.intra_bss.fail, 1, len);
3420 		dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
3421 		return nbuf;
3422 	}
3423 
3424 	DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
3425 				      rx.intra_bss.pkts, 1, len);
3426 	dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
3427 	return NULL;
3428 }
3429 
3430 #ifdef IPA_WDS_EASYMESH_FEATURE
3431 /**
3432  * dp_ipa_peer_check() - Check for peer for given mac
3433  * @soc: dp soc object
3434  * @peer_mac_addr: peer mac address
3435  * @vdev_id: vdev id
3436  *
3437  * Return: true if peer is found, else false
3438  */
3439 static inline bool dp_ipa_peer_check(struct dp_soc *soc,
3440 				     uint8_t *peer_mac_addr, uint8_t vdev_id)
3441 {
3442 	struct dp_ast_entry *ast_entry = NULL;
3443 	struct dp_peer *peer = NULL;
3444 
3445 	qdf_spin_lock_bh(&soc->ast_lock);
3446 	ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
3447 
3448 	if ((!ast_entry) ||
3449 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
3450 		qdf_spin_unlock_bh(&soc->ast_lock);
3451 		return false;
3452 	}
3453 
3454 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
3455 				     DP_MOD_ID_IPA);
3456 
3457 	if (!peer) {
3458 		qdf_spin_unlock_bh(&soc->ast_lock);
3459 		return false;
3460 	} else {
3461 		if (peer->vdev->vdev_id == vdev_id) {
3462 			dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3463 			qdf_spin_unlock_bh(&soc->ast_lock);
3464 			return true;
3465 		}
3466 		dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3467 		qdf_spin_unlock_bh(&soc->ast_lock);
3468 		return false;
3469 	}
3470 }
3471 #else
3472 static inline bool dp_ipa_peer_check(struct dp_soc *soc,
3473 				     uint8_t *peer_mac_addr, uint8_t vdev_id)
3474 {
3475 	struct dp_peer *peer = NULL;
3476 
3477 	peer = dp_peer_find_hash_find(soc, peer_mac_addr, 0, vdev_id,
3478 				      DP_MOD_ID_IPA);
3479 	if (!peer) {
3480 		return false;
3481 	} else {
3482 		dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3483 		return true;
3484 	}
3485 }
3486 #endif
3487 
3488 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3489 			    qdf_nbuf_t nbuf, bool *fwd_success)
3490 {
3491 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3492 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3493 						     DP_MOD_ID_IPA);
3494 	struct dp_pdev *pdev;
3495 	qdf_nbuf_t nbuf_copy;
3496 	uint8_t da_is_bcmc;
3497 	struct ethhdr *eh;
3498 	bool status = false;
3499 
3500 	*fwd_success = false; /* set default as failure */
3501 
3502 	/*
3503 	 * WDI 3.0 skb->cb[] info from IPA driver
3504 	 * skb->cb[0] = vdev_id
3505 	 * skb->cb[1].bit#1 = da_is_bcmc
3506 	 */
3507 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
3508 
3509 	if (qdf_unlikely(!vdev))
3510 		return false;
3511 
3512 	pdev = vdev->pdev;
3513 	if (qdf_unlikely(!pdev))
3514 		goto out;
3515 
3516 	/* no fwd for station mode and just pass up to stack */
3517 	if (vdev->opmode == wlan_op_mode_sta)
3518 		goto out;
3519 
3520 	if (da_is_bcmc) {
3521 		nbuf_copy = qdf_nbuf_copy(nbuf);
3522 		if (!nbuf_copy)
3523 			goto out;
3524 
3525 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
3526 			qdf_nbuf_free(nbuf_copy);
3527 		else
3528 			*fwd_success = true;
3529 
3530 		/* return false to pass original pkt up to stack */
3531 		goto out;
3532 	}
3533 
3534 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
3535 
3536 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
3537 		goto out;
3538 
3539 	if (!dp_ipa_peer_check(soc, eh->h_dest, vdev->vdev_id))
3540 		goto out;
3541 
3542 	if (!dp_ipa_peer_check(soc, eh->h_source, vdev->vdev_id))
3543 		goto out;
3544 
3545 	/*
3546 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
3547 	 * Need to add skb to internal tracking table to avoid nbuf memory
3548 	 * leak check for unallocated skb.
3549 	 */
3550 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
3551 
3552 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
3553 		qdf_nbuf_free(nbuf);
3554 	else
3555 		*fwd_success = true;
3556 
3557 	status = true;
3558 out:
3559 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
3560 	return status;
3561 }
3562 
3563 #ifdef MDM_PLATFORM
3564 bool dp_ipa_is_mdm_platform(void)
3565 {
3566 	return true;
3567 }
3568 #else
3569 bool dp_ipa_is_mdm_platform(void)
3570 {
3571 	return false;
3572 }
3573 #endif
3574 
3575 /**
3576  * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
3577  * @soc: soc
3578  * @nbuf: source skb
3579  *
3580  * Return: new nbuf if success and otherwise NULL
3581  */
3582 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
3583 					     qdf_nbuf_t nbuf)
3584 {
3585 	uint8_t *src_nbuf_data;
3586 	uint8_t *dst_nbuf_data;
3587 	qdf_nbuf_t dst_nbuf;
3588 	qdf_nbuf_t temp_nbuf = nbuf;
3589 	uint32_t nbuf_len = qdf_nbuf_len(nbuf);
3590 	bool is_nbuf_head = true;
3591 	uint32_t copy_len = 0;
3592 
3593 	dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
3594 				  RX_BUFFER_RESERVATION,
3595 				  RX_DATA_BUFFER_ALIGNMENT, FALSE);
3596 
3597 	if (!dst_nbuf) {
3598 		dp_err_rl("nbuf allocate fail");
3599 		return NULL;
3600 	}
3601 
3602 	if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
3603 		qdf_nbuf_free(dst_nbuf);
3604 		dp_err_rl("nbuf is jumbo data");
3605 		return NULL;
3606 	}
3607 
3608 	/* prepeare to copy all data into new skb */
3609 	dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
3610 	while (temp_nbuf) {
3611 		src_nbuf_data = qdf_nbuf_data(temp_nbuf);
3612 		/* first head nbuf */
3613 		if (is_nbuf_head) {
3614 			qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
3615 				     soc->rx_pkt_tlv_size);
3616 			/* leave extra 2 bytes L3_HEADER_PADDING */
3617 			dst_nbuf_data += (soc->rx_pkt_tlv_size +
3618 					  L3_HEADER_PADDING);
3619 			src_nbuf_data += soc->rx_pkt_tlv_size;
3620 			copy_len = qdf_nbuf_headlen(temp_nbuf) -
3621 						soc->rx_pkt_tlv_size;
3622 			temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
3623 			is_nbuf_head = false;
3624 		} else {
3625 			copy_len = qdf_nbuf_len(temp_nbuf);
3626 			temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
3627 		}
3628 		qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
3629 		dst_nbuf_data += copy_len;
3630 	}
3631 
3632 	qdf_nbuf_set_len(dst_nbuf, nbuf_len);
3633 	/* copy is done, free original nbuf */
3634 	qdf_nbuf_free(nbuf);
3635 
3636 	return dst_nbuf;
3637 }
3638 
3639 /**
3640  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
3641  * @soc: soc
3642  * @nbuf: skb
3643  *
3644  * Return: nbuf if success and otherwise NULL
3645  */
3646 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
3647 {
3648 
3649 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
3650 		return nbuf;
3651 
3652 	/* WLAN IPA is run-time disabled */
3653 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
3654 		return nbuf;
3655 
3656 	if (!qdf_nbuf_is_frag(nbuf))
3657 		return nbuf;
3658 
3659 	/* linearize skb for IPA */
3660 	return dp_ipa_frag_nbuf_linearize(soc, nbuf);
3661 }
3662 
3663 QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
3664 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3665 	const char *func, uint32_t line)
3666 {
3667 	QDF_STATUS ret;
3668 
3669 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3670 	struct dp_pdev *pdev =
3671 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3672 
3673 	if (!pdev) {
3674 		dp_err("%s invalid instance", __func__);
3675 		return QDF_STATUS_E_FAILURE;
3676 	}
3677 
3678 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
3679 		dp_debug("SMMU S1 disabled");
3680 		return QDF_STATUS_SUCCESS;
3681 	}
3682 	ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true, func, line);
3683 	if (ret)
3684 		return ret;
3685 
3686 	ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true, func, line);
3687 	if (ret)
3688 		__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line);
3689 	return ret;
3690 }
3691 
3692 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
3693 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id, const char *func,
3694 	uint32_t line)
3695 {
3696 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3697 	struct dp_pdev *pdev =
3698 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3699 
3700 	if (!pdev) {
3701 		dp_err("%s invalid instance", __func__);
3702 		return QDF_STATUS_E_FAILURE;
3703 	}
3704 
3705 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
3706 		dp_debug("SMMU S1 disabled");
3707 		return QDF_STATUS_SUCCESS;
3708 	}
3709 
3710 	if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line) ||
3711 	    dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false, func, line))
3712 		return QDF_STATUS_E_FAILURE;
3713 
3714 	return QDF_STATUS_SUCCESS;
3715 }
3716 
3717 #ifdef IPA_WDS_EASYMESH_FEATURE
3718 QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
3719 			     qdf_ipa_ast_info_type_t *data)
3720 {
3721 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3722 	uint8_t *rx_tlv_hdr;
3723 	struct dp_peer *peer;
3724 	struct hal_rx_msdu_metadata msdu_metadata;
3725 	qdf_ipa_ast_info_type_t *ast_info;
3726 
3727 	if (!data) {
3728 		dp_err("Data is NULL !!!");
3729 		return QDF_STATUS_E_FAILURE;
3730 	}
3731 	ast_info = data;
3732 
3733 	rx_tlv_hdr = qdf_nbuf_data(ast_info->skb);
3734 	peer = dp_peer_get_ref_by_id(soc, ast_info->ta_peer_id,
3735 				     DP_MOD_ID_IPA);
3736 	if (!peer) {
3737 		dp_err("Peer is NULL !!!!");
3738 		return QDF_STATUS_E_FAILURE;
3739 	}
3740 
3741 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
3742 
3743 	dp_rx_ipa_wds_srcport_learn(soc, peer, ast_info->skb, msdu_metadata,
3744 				    ast_info->mac_addr_ad4_valid,
3745 				    ast_info->first_msdu_in_mpdu_flag);
3746 
3747 	dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3748 
3749 	return QDF_STATUS_SUCCESS;
3750 }
3751 #endif
3752 
3753 #ifdef QCA_ENHANCED_STATS_SUPPORT
3754 /**
3755  * dp_ipa_update_peer_rx_stats - update peer rx stats
3756  * @soc: soc handle
3757  * @vdev_id: vdev id
3758  * @peer_mac: Peer Mac Address
3759  * @nbuf: data nbuf
3760  *
3761  * Return: status success/failure
3762  */
3763 
3764 QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc,
3765 				       uint8_t vdev_id, uint8_t *peer_mac,
3766 				       qdf_nbuf_t nbuf)
3767 {
3768 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3769 						      peer_mac, 0, vdev_id,
3770 						      DP_MOD_ID_IPA);
3771 	struct dp_txrx_peer *txrx_peer;
3772 	uint8_t da_is_bcmc;
3773 	qdf_ether_header_t *eh;
3774 
3775 	if (!peer)
3776 		return QDF_STATUS_E_FAILURE;
3777 
3778 	txrx_peer = dp_get_txrx_peer(peer);
3779 
3780 	if (!txrx_peer) {
3781 		dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3782 		return QDF_STATUS_E_FAILURE;
3783 	}
3784 
3785 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
3786 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3787 
3788 	if (da_is_bcmc) {
3789 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
3790 					      qdf_nbuf_len(nbuf));
3791 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
3792 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast,
3793 						      1, qdf_nbuf_len(nbuf));
3794 	}
3795 
3796 	dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3797 
3798 	return QDF_STATUS_SUCCESS;
3799 }
3800 
3801 /**
3802  * dp_peer_aggregate_tid_stats - aggregate rx tid stats
3803  * @peer: Data Path peer
3804  *
3805  * Return: void
3806  */
3807 void
3808 dp_peer_aggregate_tid_stats(struct dp_peer *peer)
3809 {
3810 	uint8_t i = 0;
3811 	struct dp_rx_tid *rx_tid = NULL;
3812 	struct cdp_pkt_info rx_total = {0};
3813 	struct dp_txrx_peer *txrx_peer = NULL;
3814 
3815 	if (!peer->rx_tid)
3816 		return;
3817 
3818 	txrx_peer = dp_get_txrx_peer(peer);
3819 
3820 	if (!txrx_peer)
3821 		return;
3822 
3823 	for (i = 0; i < DP_MAX_TIDS; i++) {
3824 		rx_tid = &peer->rx_tid[i];
3825 		rx_total.num += rx_tid->rx_msdu_cnt.num;
3826 		rx_total.bytes += rx_tid->rx_msdu_cnt.bytes;
3827 	}
3828 
3829 	DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.num,
3830 				  rx_total.num);
3831 	DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.bytes,
3832 				  rx_total.bytes);
3833 }
3834 
3835 /**
3836  * dp_ipa_update_vdev_stats(): update vdev stats
3837  * @soc: soc handle
3838  * @srcobj: DP_PEER object
3839  * @arg: point to vdev stats structure
3840  *
3841  * Return: void
3842  */
3843 static inline
3844 void dp_ipa_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
3845 			      void *arg)
3846 {
3847 	dp_peer_aggregate_tid_stats(srcobj);
3848 	dp_update_vdev_stats(soc, srcobj, arg);
3849 }
3850 
3851 /**
3852  * dp_ipa_aggregate_vdev_stats - Aggregate vdev_stats
3853  * @vdev: Data path vdev
3854  * @vdev_stats: buffer to hold vdev stats
3855  *
3856  * Return: void
3857  */
3858 static inline
3859 void dp_ipa_aggregate_vdev_stats(struct dp_vdev *vdev,
3860 				 struct cdp_vdev_stats *vdev_stats)
3861 {
3862 	struct dp_soc *soc = NULL;
3863 
3864 	if (!vdev || !vdev->pdev)
3865 		return;
3866 
3867 	soc = vdev->pdev->soc;
3868 	dp_update_vdev_ingress_stats(vdev);
3869 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
3870 	dp_vdev_iterate_peer(vdev, dp_ipa_update_vdev_stats, vdev_stats,
3871 			     DP_MOD_ID_GENERIC_STATS);
3872 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
3873 
3874 	vdev_stats->tx.ucast.num = vdev_stats->tx.tx_ucast_total.num;
3875 	vdev_stats->tx.ucast.bytes = vdev_stats->tx.tx_ucast_total.bytes;
3876 	vdev_stats->tx.tx_success.num = vdev_stats->tx.tx_ucast_success.num;
3877 	vdev_stats->tx.tx_success.bytes = vdev_stats->tx.tx_ucast_success.bytes;
3878 
3879 	if (vdev_stats->rx.rx_total.num >= vdev_stats->rx.multicast.num)
3880 		vdev_stats->rx.unicast.num = vdev_stats->rx.rx_total.num -
3881 					vdev_stats->rx.multicast.num;
3882 	if (vdev_stats->rx.rx_total.bytes >=  vdev_stats->rx.multicast.bytes)
3883 		vdev_stats->rx.unicast.bytes = vdev_stats->rx.rx_total.bytes -
3884 					vdev_stats->rx.multicast.bytes;
3885 	vdev_stats->rx.to_stack.num = vdev_stats->rx.rx_total.num;
3886 	vdev_stats->rx.to_stack.bytes = vdev_stats->rx.rx_total.bytes;
3887 }
3888 
3889 /**
3890  * dp_ipa_aggregate_pdev_stats - Aggregate pdev stats
3891  * @pdev: Data path pdev
3892  *
3893  * Return: void
3894  */
3895 static inline
3896 void dp_ipa_aggregate_pdev_stats(struct dp_pdev *pdev)
3897 {
3898 	struct dp_vdev *vdev = NULL;
3899 	struct dp_soc *soc;
3900 	struct cdp_vdev_stats *vdev_stats =
3901 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
3902 
3903 	if (!vdev_stats) {
3904 		dp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
3905 		       pdev->soc);
3906 		return;
3907 	}
3908 
3909 	soc = pdev->soc;
3910 
3911 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
3912 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
3913 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
3914 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
3915 
3916 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3917 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3918 		dp_ipa_aggregate_vdev_stats(vdev, vdev_stats);
3919 		dp_update_pdev_stats(pdev, vdev_stats);
3920 		dp_update_pdev_ingress_stats(pdev, vdev);
3921 	}
3922 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3923 	qdf_mem_free(vdev_stats);
3924 }
3925 
3926 /**
3927  * dp_ipa_get_peer_stats - Get peer stats
3928  * @peer: Data path peer
3929  * @peer_stats: buffer to hold peer stats
3930  *
3931  * Return: void
3932  */
3933 void dp_ipa_get_peer_stats(struct dp_peer *peer,
3934 			   struct cdp_peer_stats *peer_stats)
3935 {
3936 	dp_peer_aggregate_tid_stats(peer);
3937 	dp_get_peer_stats(peer, peer_stats);
3938 
3939 	peer_stats->tx.tx_success.num =
3940 			peer_stats->tx.tx_ucast_success.num;
3941 	peer_stats->tx.tx_success.bytes =
3942 			peer_stats->tx.tx_ucast_success.bytes;
3943 	peer_stats->tx.ucast.num =
3944 			peer_stats->tx.tx_ucast_total.num;
3945 	peer_stats->tx.ucast.bytes =
3946 			peer_stats->tx.tx_ucast_total.bytes;
3947 
3948 	if (peer_stats->rx.rx_total.num >=  peer_stats->rx.multicast.num)
3949 		peer_stats->rx.unicast.num = peer_stats->rx.rx_total.num -
3950 						peer_stats->rx.multicast.num;
3951 
3952 	if (peer_stats->rx.rx_total.bytes >= peer_stats->rx.multicast.bytes)
3953 		peer_stats->rx.unicast.bytes = peer_stats->rx.rx_total.bytes -
3954 						peer_stats->rx.multicast.bytes;
3955 }
3956 
3957 /**
3958  * dp_ipa_txrx_get_pdev_stats - fetch pdev stats
3959  * @soc: DP soc handle
3960  * @pdev_id: id of DP pdev handle
3961  * @pdev_stats: buffer to hold pdev stats
3962  *
3963  * Return : status success/failure
3964  */
3965 QDF_STATUS
3966 dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
3967 			   struct cdp_pdev_stats *pdev_stats)
3968 {
3969 	struct dp_pdev *pdev =
3970 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
3971 						   pdev_id);
3972 	if (!pdev)
3973 		return QDF_STATUS_E_FAILURE;
3974 
3975 	dp_ipa_aggregate_pdev_stats(pdev);
3976 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
3977 
3978 	return QDF_STATUS_SUCCESS;
3979 }
3980 
3981 /**
3982  * dp_ipa_txrx_get_vdev_stats - fetch vdev stats
3983  * @soc_hdl: soc handle
3984  * @vdev_id: id of vdev handle
3985  * @buf: buffer to hold vdev stats
3986  * @is_aggregate: for aggregation
3987  *
3988  * Return : int
3989  */
3990 int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3991 			       void *buf, bool is_aggregate)
3992 {
3993 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3994 	struct cdp_vdev_stats *vdev_stats;
3995 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3996 						     DP_MOD_ID_IPA);
3997 
3998 	if (!vdev)
3999 		return 1;
4000 
4001 	vdev_stats = (struct cdp_vdev_stats *)buf;
4002 	dp_ipa_aggregate_vdev_stats(vdev, buf);
4003 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
4004 
4005 	return 0;
4006 }
4007 
4008 /**
4009  * dp_ipa_txrx_get_peer_stats - fetch peer stats
4010  * @soc: soc handle
4011  * @vdev_id: id of vdev handle
4012  * @peer_mac: peer mac address
4013  * @peer_stats: buffer to hold peer stats
4014  *
4015  * Return : status success/failure
4016  */
4017 QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
4018 				      uint8_t *peer_mac,
4019 				      struct cdp_peer_stats *peer_stats)
4020 {
4021 	struct dp_peer *peer = NULL;
4022 	struct cdp_peer_info peer_info = { 0 };
4023 
4024 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
4025 				 CDP_WILD_PEER_TYPE);
4026 
4027 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
4028 					 DP_MOD_ID_IPA);
4029 
4030 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
4031 
4032 	if (!peer)
4033 		return QDF_STATUS_E_FAILURE;
4034 
4035 	dp_ipa_get_peer_stats(peer, peer_stats);
4036 	dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
4037 
4038 	return QDF_STATUS_SUCCESS;
4039 }
4040 #endif
4041 #endif
4042