xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision aeb2ffde14d914a2adf90754989f139d4c934d4c)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifdef IPA_OFFLOAD
19 
20 #include <qdf_ipa_wdi3.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <hal_hw_headers.h>
24 #include <hal_api.h>
25 #include <hal_reo.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_htt.h"
32 #include "dp_tx.h"
33 #include "dp_rx.h"
34 #include "dp_ipa.h"
35 #include "dp_internal.h"
36 #ifdef WIFI_MONITOR_SUPPORT
37 #include "dp_mon.h"
38 #endif
39 
40 /* Ring index for WBM2SW2 release ring */
41 #define IPA_TX_COMP_RING_IDX HAL_IPA_TX_COMP_RING_IDX
42 
43 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
44 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
45 
46 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
47  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
48  * This causes back pressure, resulting in a FW crash.
49  * By leaving some entries with no buffer attached, WBM will be able to write
50  * to the ring, and from dumps we can figure out the buffer which is causing
51  * this issue.
52  */
53 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
54 /**
55  *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
56  * @ix0_reg: reo destination ring IX0 value
57  * @ix2_reg: reo destination ring IX2 value
58  * @ix3_reg: reo destination ring IX3 value
59  */
60 struct dp_ipa_reo_remap_record {
61 	uint64_t timestamp;
62 	uint32_t ix0_reg;
63 	uint32_t ix2_reg;
64 	uint32_t ix3_reg;
65 };
66 
67 #define REO_REMAP_HISTORY_SIZE 32
68 
69 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
70 
71 static qdf_atomic_t dp_ipa_reo_remap_history_index;
72 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
73 {
74 	int next = qdf_atomic_inc_return(index);
75 
76 	if (next == REO_REMAP_HISTORY_SIZE)
77 		qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
78 
79 	return next % REO_REMAP_HISTORY_SIZE;
80 }
81 
82 /**
83  * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
84  * @ix0_val: reo destination ring IX0 value
85  * @ix2_val: reo destination ring IX2 value
86  * @ix3_val: reo destination ring IX3 value
87  *
88  * Return: None
89  */
90 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
91 					 uint32_t ix3_val)
92 {
93 	int idx = dp_ipa_reo_remap_record_index_next(
94 				&dp_ipa_reo_remap_history_index);
95 	struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
96 
97 	record->timestamp = qdf_get_log_timestamp();
98 	record->ix0_reg = ix0_val;
99 	record->ix2_reg = ix2_val;
100 	record->ix3_reg = ix3_val;
101 }
102 
103 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
104 						   qdf_nbuf_t nbuf,
105 						   uint32_t size,
106 						   bool create)
107 {
108 	qdf_mem_info_t mem_map_table = {0};
109 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
110 	qdf_ipa_wdi_hdl_t hdl;
111 
112 	/* Need to handle the case when one soc will
113 	 * have multiple pdev(radio's), Currently passing
114 	 * pdev_id as 0 assuming 1 soc has only 1 radio.
115 	 */
116 	hdl = wlan_ipa_get_hdl(soc->ctrl_psoc, 0);
117 	if (hdl == DP_IPA_HDL_INVALID) {
118 		dp_err("IPA handle is invalid");
119 		return QDF_STATUS_E_INVAL;
120 	}
121 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
122 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
123 				 size);
124 
125 	if (create) {
126 		/* Assert if PA is zero */
127 		qdf_assert_always(mem_map_table.pa);
128 
129 		ret = qdf_ipa_wdi_create_smmu_mapping(hdl, 1,
130 						      &mem_map_table);
131 	} else {
132 		ret = qdf_ipa_wdi_release_smmu_mapping(hdl, 1,
133 						       &mem_map_table);
134 	}
135 	qdf_assert_always(!ret);
136 
137 	/* Return status of mapping/unmapping is stored in
138 	 * mem_map_table.result field, assert if the result
139 	 * is failure
140 	 */
141 	if (create)
142 		qdf_assert_always(!mem_map_table.result);
143 	else
144 		qdf_assert_always(mem_map_table.result >= mem_map_table.size);
145 
146 	return ret;
147 }
148 
149 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
150 					     qdf_nbuf_t nbuf,
151 					     uint32_t size,
152 					     bool create)
153 {
154 	struct dp_pdev *pdev;
155 	int i;
156 
157 	for (i = 0; i < soc->pdev_count; i++) {
158 		pdev = soc->pdev_list[i];
159 		if (pdev && dp_monitor_is_configured(pdev))
160 			return QDF_STATUS_SUCCESS;
161 	}
162 
163 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
164 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
165 		return QDF_STATUS_SUCCESS;
166 
167 	/**
168 	 * Even if ipa pipes is disabled, but if it's unmap
169 	 * operation and nbuf has done ipa smmu map before,
170 	 * do ipa smmu unmap as well.
171 	 */
172 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
173 		if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
174 			DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
175 		} else {
176 			return QDF_STATUS_SUCCESS;
177 		}
178 	}
179 
180 	if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
181 		if (create) {
182 			DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
183 		} else {
184 			DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
185 		}
186 		return QDF_STATUS_E_INVAL;
187 	}
188 
189 	qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
190 
191 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
192 }
193 
194 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
195 	struct dp_soc *soc,
196 	struct dp_pdev *pdev,
197 	bool create)
198 {
199 	uint32_t index;
200 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
201 	uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
202 	qdf_nbuf_t nbuf;
203 	uint32_t buf_len;
204 
205 	if (!ipa_is_ready()) {
206 		dp_info("IPA is not READY");
207 		return 0;
208 	}
209 
210 	for (index = 0; index < tx_buffer_cnt; index++) {
211 		nbuf = (qdf_nbuf_t)
212 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
213 		if (!nbuf)
214 			continue;
215 		buf_len = qdf_nbuf_get_data_len(nbuf);
216 		ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len,
217 						       create);
218 	}
219 
220 	return ret;
221 }
222 
223 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
224 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
225 						     bool lock_required)
226 {
227 	hal_ring_handle_t hal_ring_hdl;
228 	int ring;
229 
230 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
231 		hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng;
232 		hal_srng_lock(hal_ring_hdl);
233 		soc->ipa_reo_ctx_lock_required[ring] = lock_required;
234 		hal_srng_unlock(hal_ring_hdl);
235 	}
236 }
237 #else
238 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
239 						     bool lock_required)
240 {
241 }
242 
243 #endif
244 
245 #ifdef RX_DESC_MULTI_PAGE_ALLOC
246 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
247 							 struct dp_pdev *pdev,
248 							 bool create)
249 {
250 	struct rx_desc_pool *rx_pool;
251 	uint8_t pdev_id;
252 	uint32_t num_desc, page_id, offset, i;
253 	uint16_t num_desc_per_page;
254 	union dp_rx_desc_list_elem_t *rx_desc_elem;
255 	struct dp_rx_desc *rx_desc;
256 	qdf_nbuf_t nbuf;
257 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
258 
259 	if (!qdf_ipa_is_ready())
260 		return ret;
261 
262 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
263 		return ret;
264 
265 	pdev_id = pdev->pdev_id;
266 	rx_pool = &soc->rx_desc_buf[pdev_id];
267 
268 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
269 	qdf_spin_lock_bh(&rx_pool->lock);
270 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
271 	num_desc = rx_pool->pool_size;
272 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
273 	for (i = 0; i < num_desc; i++) {
274 		page_id = i / num_desc_per_page;
275 		offset = i % num_desc_per_page;
276 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
277 			break;
278 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
279 		rx_desc = &rx_desc_elem->rx_desc;
280 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
281 			continue;
282 		nbuf = rx_desc->nbuf;
283 
284 		if (qdf_unlikely(create ==
285 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
286 			if (create) {
287 				DP_STATS_INC(soc,
288 					     rx.err.ipa_smmu_map_dup, 1);
289 			} else {
290 				DP_STATS_INC(soc,
291 					     rx.err.ipa_smmu_unmap_dup, 1);
292 			}
293 			continue;
294 		}
295 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
296 
297 		ret = __dp_ipa_handle_buf_smmu_mapping(
298 				soc, nbuf, rx_pool->buf_size, create);
299 	}
300 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
301 	qdf_spin_unlock_bh(&rx_pool->lock);
302 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
303 
304 	return ret;
305 }
306 #else
307 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
308 							 struct dp_pdev *pdev,
309 							 bool create)
310 {
311 	struct rx_desc_pool *rx_pool;
312 	uint8_t pdev_id;
313 	qdf_nbuf_t nbuf;
314 	int i;
315 
316 	if (!qdf_ipa_is_ready())
317 		return QDF_STATUS_SUCCESS;
318 
319 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
320 		return QDF_STATUS_SUCCESS;
321 
322 	pdev_id = pdev->pdev_id;
323 	rx_pool = &soc->rx_desc_buf[pdev_id];
324 
325 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
326 	qdf_spin_lock_bh(&rx_pool->lock);
327 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
328 	for (i = 0; i < rx_pool->pool_size; i++) {
329 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
330 		    rx_pool->array[i].rx_desc.unmapped)
331 			continue;
332 
333 		nbuf = rx_pool->array[i].rx_desc.nbuf;
334 
335 		if (qdf_unlikely(create ==
336 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
337 			if (create) {
338 				DP_STATS_INC(soc,
339 					     rx.err.ipa_smmu_map_dup, 1);
340 			} else {
341 				DP_STATS_INC(soc,
342 					     rx.err.ipa_smmu_unmap_dup, 1);
343 			}
344 			continue;
345 		}
346 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
347 
348 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
349 						 rx_pool->buf_size, create);
350 	}
351 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
352 	qdf_spin_unlock_bh(&rx_pool->lock);
353 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
354 
355 	return QDF_STATUS_SUCCESS;
356 }
357 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
358 
359 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
360 					     qdf_shared_mem_t *shared_mem,
361 					     void *cpu_addr,
362 					     qdf_dma_addr_t dma_addr,
363 					     uint32_t size)
364 {
365 	qdf_dma_addr_t paddr;
366 	int ret;
367 
368 	shared_mem->vaddr = cpu_addr;
369 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
370 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
371 
372 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
373 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
374 
375 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
376 				      shared_mem->vaddr, dma_addr, size);
377 	if (ret) {
378 		dp_err("Unable to get DMA sgtable");
379 		return QDF_STATUS_E_NOMEM;
380 	}
381 
382 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
383 
384 	return QDF_STATUS_SUCCESS;
385 }
386 
387 #ifdef IPA_WDI3_TX_TWO_PIPES
388 static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
389 {
390 	struct dp_ipa_resources *ipa_res;
391 	qdf_nbuf_t nbuf;
392 	int idx;
393 
394 	for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) {
395 		nbuf = (qdf_nbuf_t)
396 			soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx];
397 		if (!nbuf)
398 			continue;
399 
400 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
401 		qdf_mem_dp_tx_skb_cnt_dec();
402 		qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
403 		qdf_nbuf_free(nbuf);
404 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] =
405 						(void *)NULL;
406 	}
407 
408 	qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
409 	soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
410 
411 	ipa_res = &pdev->ipa_resource;
412 	if (!ipa_res->is_db_ddr_mapped)
413 		iounmap(ipa_res->tx_alt_comp_doorbell_vaddr);
414 
415 	qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable);
416 	qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable);
417 }
418 
419 static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
420 {
421 	uint32_t tx_buffer_count;
422 	uint32_t ring_base_align = 8;
423 	qdf_dma_addr_t buffer_paddr;
424 	struct hal_srng *wbm_srng = (struct hal_srng *)
425 			soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
426 	struct hal_srng_params srng_params;
427 	uint32_t wbm_sw0_bm_id = soc->wbm_sw0_bm_id;
428 	void *ring_entry;
429 	int num_entries;
430 	qdf_nbuf_t nbuf;
431 	int retval = QDF_STATUS_SUCCESS;
432 	int max_alloc_count = 0;
433 
434 	/*
435 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
436 	 * unsigned int uc_tx_buf_sz =
437 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
438 	 */
439 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
440 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
441 
442 	hal_get_srng_params(soc->hal_soc,
443 			    hal_srng_to_hal_ring_handle(wbm_srng),
444 			    &srng_params);
445 	num_entries = srng_params.num_entries;
446 
447 	max_alloc_count =
448 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
449 	if (max_alloc_count <= 0) {
450 		dp_err("incorrect value for buffer count %u", max_alloc_count);
451 		return -EINVAL;
452 	}
453 
454 	dp_info("requested %d buffers to be posted to wbm ring",
455 		max_alloc_count);
456 
457 	soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned =
458 		qdf_mem_malloc(num_entries *
459 		sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned));
460 	if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) {
461 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
462 		return -ENOMEM;
463 	}
464 
465 	hal_srng_access_start_unlocked(soc->hal_soc,
466 				       hal_srng_to_hal_ring_handle(wbm_srng));
467 
468 	/*
469 	 * Allocate Tx buffers as many as possible.
470 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
471 	 * Populate Tx buffers into WBM2IPA ring
472 	 * This initial buffer population will simulate H/W as source ring,
473 	 * and update HP
474 	 */
475 	for (tx_buffer_count = 0;
476 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
477 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
478 		if (!nbuf)
479 			break;
480 
481 		ring_entry = hal_srng_dst_get_next_hp(
482 				soc->hal_soc,
483 				hal_srng_to_hal_ring_handle(wbm_srng));
484 		if (!ring_entry) {
485 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
486 				  "%s: Failed to get WBM ring entry",
487 				  __func__);
488 			qdf_nbuf_free(nbuf);
489 			break;
490 		}
491 
492 		qdf_nbuf_map_single(soc->osdev, nbuf,
493 				    QDF_DMA_BIDIRECTIONAL);
494 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
495 		qdf_mem_dp_tx_skb_cnt_inc();
496 		qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
497 
498 		hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
499 					     buffer_paddr, 0,
500 					     HAL_WBM_SW4_BM_ID(wbm_sw0_bm_id));
501 
502 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[
503 			tx_buffer_count] = (void *)nbuf;
504 	}
505 
506 	hal_srng_access_end_unlocked(soc->hal_soc,
507 				     hal_srng_to_hal_ring_handle(wbm_srng));
508 
509 	soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count;
510 
511 	if (tx_buffer_count) {
512 		dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count);
513 	} else {
514 		dp_err("Failed to allocate IPA TX buffer pool2");
515 		qdf_mem_free(
516 			soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
517 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
518 		retval = -ENOMEM;
519 	}
520 
521 	return retval;
522 }
523 
524 static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
525 {
526 	struct dp_soc *soc = pdev->soc;
527 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
528 
529 	ipa_res->tx_alt_ring_num_alloc_buffer =
530 		(uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt;
531 
532 	dp_ipa_get_shared_mem_info(
533 			soc->osdev, &ipa_res->tx_alt_ring,
534 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
535 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
536 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
537 
538 	dp_ipa_get_shared_mem_info(
539 			soc->osdev, &ipa_res->tx_alt_comp_ring,
540 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
541 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
542 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
543 
544 	if (!qdf_mem_get_dma_addr(soc->osdev,
545 				  &ipa_res->tx_alt_comp_ring.mem_info))
546 		return QDF_STATUS_E_FAILURE;
547 
548 	return QDF_STATUS_SUCCESS;
549 }
550 
551 static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
552 {
553 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
554 	struct hal_srng *hal_srng;
555 	struct hal_srng_params srng_params;
556 	unsigned long addr_offset, dev_base_paddr;
557 
558 	/* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */
559 	hal_srng = (struct hal_srng *)
560 		soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng;
561 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
562 			    hal_srng_to_hal_ring_handle(hal_srng),
563 			    &srng_params);
564 
565 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr =
566 		srng_params.ring_base_paddr;
567 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr =
568 		srng_params.ring_base_vaddr;
569 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size =
570 		(srng_params.num_entries * srng_params.entry_size) << 2;
571 	/*
572 	 * For the register backed memory addresses, use the scn->mem_pa to
573 	 * calculate the physical address of the shadow registers
574 	 */
575 	dev_base_paddr =
576 		(unsigned long)
577 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
578 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
579 		      (unsigned long)(hal_soc->dev_base_addr);
580 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr =
581 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
582 
583 	dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
584 		(unsigned int)addr_offset,
585 		(unsigned int)dev_base_paddr,
586 		(unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr),
587 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
588 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
589 		srng_params.num_entries,
590 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
591 
592 	/* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */
593 	hal_srng = (struct hal_srng *)
594 		soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
595 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
596 			    hal_srng_to_hal_ring_handle(hal_srng),
597 			    &srng_params);
598 
599 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr =
600 						srng_params.ring_base_paddr;
601 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr =
602 						srng_params.ring_base_vaddr;
603 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size =
604 		(srng_params.num_entries * srng_params.entry_size) << 2;
605 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr =
606 		hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
607 				     hal_srng_to_hal_ring_handle(hal_srng));
608 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
609 		      (unsigned long)(hal_soc->dev_base_addr);
610 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr =
611 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
612 
613 	dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
614 		(unsigned int)addr_offset,
615 		(unsigned int)dev_base_paddr,
616 		(unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr),
617 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
618 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
619 		srng_params.num_entries,
620 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
621 }
622 
623 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
624 {
625 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
626 	uint32_t rx_ready_doorbell_dmaaddr;
627 	uint32_t tx_comp_doorbell_dmaaddr;
628 	struct dp_soc *soc = pdev->soc;
629 	int ret = 0;
630 
631 	if (ipa_res->is_db_ddr_mapped)
632 		ipa_res->tx_comp_doorbell_vaddr =
633 				phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
634 	else
635 		ipa_res->tx_comp_doorbell_vaddr =
636 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
637 
638 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
639 		ret = pld_smmu_map(soc->osdev->dev,
640 				   ipa_res->tx_comp_doorbell_paddr,
641 				   &tx_comp_doorbell_dmaaddr,
642 				   sizeof(uint32_t));
643 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
644 		qdf_assert_always(!ret);
645 
646 		ret = pld_smmu_map(soc->osdev->dev,
647 				   ipa_res->rx_ready_doorbell_paddr,
648 				   &rx_ready_doorbell_dmaaddr,
649 				   sizeof(uint32_t));
650 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
651 		qdf_assert_always(!ret);
652 	}
653 
654 	/* Setup for alternative TX pipe */
655 	if (!ipa_res->tx_alt_comp_doorbell_paddr)
656 		return;
657 
658 	if (ipa_res->is_db_ddr_mapped)
659 		ipa_res->tx_alt_comp_doorbell_vaddr =
660 			phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr);
661 	else
662 		ipa_res->tx_alt_comp_doorbell_vaddr =
663 			ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4);
664 
665 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
666 		ret = pld_smmu_map(soc->osdev->dev,
667 				   ipa_res->tx_alt_comp_doorbell_paddr,
668 				   &tx_comp_doorbell_dmaaddr,
669 				   sizeof(uint32_t));
670 		ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
671 		qdf_assert_always(!ret);
672 	}
673 }
674 
675 static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
676 {
677 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
678 	struct dp_soc *soc = pdev->soc;
679 	int ret = 0;
680 
681 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
682 		return;
683 
684 	/* Unmap must be in reverse order of map */
685 	if (ipa_res->tx_alt_comp_doorbell_paddr) {
686 		ret = pld_smmu_unmap(soc->osdev->dev,
687 				     ipa_res->tx_alt_comp_doorbell_paddr,
688 				     sizeof(uint32_t));
689 		qdf_assert_always(!ret);
690 	}
691 
692 	ret = pld_smmu_unmap(soc->osdev->dev,
693 			     ipa_res->rx_ready_doorbell_paddr,
694 			     sizeof(uint32_t));
695 	qdf_assert_always(!ret);
696 
697 	ret = pld_smmu_unmap(soc->osdev->dev,
698 			     ipa_res->tx_comp_doorbell_paddr,
699 			     sizeof(uint32_t));
700 	qdf_assert_always(!ret);
701 }
702 
703 static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
704 						 struct dp_pdev *pdev,
705 						 bool create)
706 {
707 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
708 	struct ipa_dp_tx_rsc *rsc;
709 	uint32_t tx_buffer_cnt;
710 	uint32_t buf_len;
711 	qdf_nbuf_t nbuf;
712 	uint32_t index;
713 
714 	if (!ipa_is_ready()) {
715 		dp_info("IPA is not READY");
716 		return QDF_STATUS_SUCCESS;
717 	}
718 
719 	rsc = &soc->ipa_uc_tx_rsc_alt;
720 	tx_buffer_cnt = rsc->alloc_tx_buf_cnt;
721 
722 	for (index = 0; index < tx_buffer_cnt; index++) {
723 		nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index];
724 		if (!nbuf)
725 			continue;
726 
727 		buf_len = qdf_nbuf_get_data_len(nbuf);
728 		ret = __dp_ipa_handle_buf_smmu_mapping(
729 				soc, nbuf, buf_len, create);
730 	}
731 
732 	return ret;
733 }
734 
735 static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc,
736 					  struct dp_ipa_resources *ipa_res,
737 					  qdf_ipa_wdi_pipe_setup_info_t *tx)
738 {
739 	struct tcl_data_cmd *tcl_desc_ptr;
740 	uint8_t *desc_addr;
741 	uint32_t desc_size;
742 
743 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1;
744 
745 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
746 		qdf_mem_get_dma_addr(soc->osdev,
747 				     &ipa_res->tx_alt_comp_ring.mem_info);
748 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
749 		qdf_mem_get_dma_size(soc->osdev,
750 				     &ipa_res->tx_alt_comp_ring.mem_info);
751 
752 	/* WBM Tail Pointer Address */
753 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
754 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
755 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
756 
757 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
758 		qdf_mem_get_dma_addr(soc->osdev,
759 				     &ipa_res->tx_alt_ring.mem_info);
760 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
761 		qdf_mem_get_dma_size(soc->osdev,
762 				     &ipa_res->tx_alt_ring.mem_info);
763 
764 	/* TCL Head Pointer Address */
765 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
766 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
767 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
768 
769 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
770 		ipa_res->tx_alt_ring_num_alloc_buffer;
771 
772 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
773 
774 	/* Preprogram TCL descriptor */
775 	desc_addr =
776 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
777 	desc_size = sizeof(struct tcl_data_cmd);
778 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
779 	tcl_desc_ptr = (struct tcl_data_cmd *)
780 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
781 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
782 				HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id);
783 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
784 	tcl_desc_ptr->addry_en = 1;	/* Address X search enable in ASE */
785 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
786 	tcl_desc_ptr->packet_offset = 0;	/* padding for alignment */
787 }
788 
789 static void
790 dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc,
791 				   struct dp_ipa_resources *ipa_res,
792 				   qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
793 {
794 	struct tcl_data_cmd *tcl_desc_ptr;
795 	uint8_t *desc_addr;
796 	uint32_t desc_size;
797 
798 	QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1;
799 
800 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
801 		     &ipa_res->tx_alt_comp_ring.sgtable,
802 		     sizeof(sgtable_t));
803 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
804 		qdf_mem_get_dma_size(soc->osdev,
805 				     &ipa_res->tx_alt_comp_ring.mem_info);
806 	/* WBM Tail Pointer Address */
807 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
808 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
809 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
810 
811 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
812 		     &ipa_res->tx_alt_ring.sgtable,
813 		     sizeof(sgtable_t));
814 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
815 		qdf_mem_get_dma_size(soc->osdev,
816 				     &ipa_res->tx_alt_ring.mem_info);
817 	/* TCL Head Pointer Address */
818 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
819 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
820 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
821 
822 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
823 		ipa_res->tx_alt_ring_num_alloc_buffer;
824 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
825 
826 	/* Preprogram TCL descriptor */
827 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
828 			tx_smmu);
829 	desc_size = sizeof(struct tcl_data_cmd);
830 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
831 	tcl_desc_ptr = (struct tcl_data_cmd *)
832 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
833 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
834 					HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id);
835 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
836 	tcl_desc_ptr->addry_en = 1;	/* Address Y search enable in ASE */
837 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
838 	tcl_desc_ptr->packet_offset = 0;	/* padding for alignment */
839 }
840 
841 static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc,
842 				     struct dp_ipa_resources *res,
843 				     qdf_ipa_wdi_conn_in_params_t *in)
844 {
845 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL;
846 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
847 	qdf_ipa_ep_cfg_t *tx_cfg;
848 
849 	QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true;
850 
851 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
852 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in);
853 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
854 		dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu);
855 	} else {
856 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in);
857 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx);
858 		dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx);
859 	}
860 
861 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
862 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
863 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
864 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
865 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
866 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
867 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
868 }
869 
870 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
871 			       qdf_ipa_wdi_conn_out_params_t *out)
872 {
873 	res->tx_comp_doorbell_paddr =
874 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
875 	res->rx_ready_doorbell_paddr =
876 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
877 	res->tx_alt_comp_doorbell_paddr =
878 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out);
879 }
880 
881 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
882 					  uint8_t session_id)
883 {
884 	bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT;
885 
886 	session_id = session_id >> IPA_SESSION_ID_SHIFT;
887 	dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface);
888 
889 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
890 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface;
891 }
892 
893 static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
894 					struct dp_ipa_resources *res)
895 {
896 	struct hal_srng *wbm_srng;
897 
898 	/* Init first TX comp ring */
899 	wbm_srng = (struct hal_srng *)
900 		soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
901 
902 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
903 			     res->tx_comp_doorbell_vaddr);
904 
905 	/* Init the alternate TX comp ring */
906 	wbm_srng = (struct hal_srng *)
907 		soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
908 
909 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
910 			     res->tx_alt_comp_doorbell_vaddr);
911 }
912 
913 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
914 					 struct dp_ipa_resources *ipa_res)
915 {
916 	struct hal_srng *wbm_srng;
917 
918 	wbm_srng = (struct hal_srng *)
919 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
920 
921 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
922 					  ipa_res->tx_comp_doorbell_paddr);
923 
924 	dp_info("paddr %pK vaddr %pK",
925 		(void *)ipa_res->tx_comp_doorbell_paddr,
926 		(void *)ipa_res->tx_comp_doorbell_vaddr);
927 
928 	/* Setup for alternative TX comp ring */
929 	wbm_srng = (struct hal_srng *)
930 			soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
931 
932 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
933 					  ipa_res->tx_alt_comp_doorbell_paddr);
934 
935 	dp_info("paddr %pK vaddr %pK",
936 		(void *)ipa_res->tx_alt_comp_doorbell_paddr,
937 		(void *)ipa_res->tx_alt_comp_doorbell_vaddr);
938 }
939 
940 #ifdef IPA_SET_RESET_TX_DB_PA
941 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
942 					      struct dp_ipa_resources *ipa_res)
943 {
944 	hal_ring_handle_t wbm_srng;
945 	qdf_dma_addr_t hp_addr;
946 
947 	wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
948 	if (!wbm_srng)
949 		return QDF_STATUS_E_FAILURE;
950 
951 	hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
952 
953 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
954 
955 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
956 
957 	/* Reset alternative TX comp ring */
958 	wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
959 	if (!wbm_srng)
960 		return QDF_STATUS_E_FAILURE;
961 
962 	hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr;
963 
964 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
965 
966 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
967 
968 	return QDF_STATUS_SUCCESS;
969 }
970 #endif /* IPA_SET_RESET_TX_DB_PA */
971 
972 #else /* !IPA_WDI3_TX_TWO_PIPES */
973 
974 static inline
975 void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
976 {
977 }
978 
979 static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
980 {
981 }
982 
983 static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
984 {
985 	return 0;
986 }
987 
988 static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
989 {
990 	return QDF_STATUS_SUCCESS;
991 }
992 
993 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
994 {
995 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
996 	uint32_t rx_ready_doorbell_dmaaddr;
997 	uint32_t tx_comp_doorbell_dmaaddr;
998 	struct dp_soc *soc = pdev->soc;
999 	int ret = 0;
1000 
1001 	if (ipa_res->is_db_ddr_mapped)
1002 		ipa_res->tx_comp_doorbell_vaddr =
1003 				phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
1004 	else
1005 		ipa_res->tx_comp_doorbell_vaddr =
1006 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
1007 
1008 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
1009 		ret = pld_smmu_map(soc->osdev->dev,
1010 				   ipa_res->tx_comp_doorbell_paddr,
1011 				   &tx_comp_doorbell_dmaaddr,
1012 				   sizeof(uint32_t));
1013 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
1014 		qdf_assert_always(!ret);
1015 
1016 		ret = pld_smmu_map(soc->osdev->dev,
1017 				   ipa_res->rx_ready_doorbell_paddr,
1018 				   &rx_ready_doorbell_dmaaddr,
1019 				   sizeof(uint32_t));
1020 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
1021 		qdf_assert_always(!ret);
1022 	}
1023 }
1024 
1025 static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
1026 {
1027 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1028 	struct dp_soc *soc = pdev->soc;
1029 	int ret = 0;
1030 
1031 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
1032 		return;
1033 
1034 	ret = pld_smmu_unmap(soc->osdev->dev,
1035 			     ipa_res->rx_ready_doorbell_paddr,
1036 			     sizeof(uint32_t));
1037 	qdf_assert_always(!ret);
1038 
1039 	ret = pld_smmu_unmap(soc->osdev->dev,
1040 			     ipa_res->tx_comp_doorbell_paddr,
1041 			     sizeof(uint32_t));
1042 	qdf_assert_always(!ret);
1043 }
1044 
1045 static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
1046 							struct dp_pdev *pdev,
1047 							bool create)
1048 {
1049 	return QDF_STATUS_SUCCESS;
1050 }
1051 
1052 static inline
1053 void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res,
1054 			      qdf_ipa_wdi_conn_in_params_t *in)
1055 {
1056 }
1057 
1058 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
1059 			       qdf_ipa_wdi_conn_out_params_t *out)
1060 {
1061 	res->tx_comp_doorbell_paddr =
1062 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
1063 	res->rx_ready_doorbell_paddr =
1064 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
1065 }
1066 
1067 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
1068 					  uint8_t session_id)
1069 {
1070 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
1071 }
1072 
1073 static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
1074 					       struct dp_ipa_resources *res)
1075 {
1076 	struct hal_srng *wbm_srng = (struct hal_srng *)
1077 		soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1078 
1079 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
1080 			     res->tx_comp_doorbell_vaddr);
1081 }
1082 
1083 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
1084 					 struct dp_ipa_resources *ipa_res)
1085 {
1086 	struct hal_srng *wbm_srng = (struct hal_srng *)
1087 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1088 
1089 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
1090 					  ipa_res->tx_comp_doorbell_paddr);
1091 
1092 	dp_info("paddr %pK vaddr %pK",
1093 		(void *)ipa_res->tx_comp_doorbell_paddr,
1094 		(void *)ipa_res->tx_comp_doorbell_vaddr);
1095 }
1096 
1097 #ifdef IPA_SET_RESET_TX_DB_PA
1098 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
1099 					      struct dp_ipa_resources *ipa_res)
1100 {
1101 	hal_ring_handle_t wbm_srng =
1102 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1103 	qdf_dma_addr_t hp_addr;
1104 
1105 	if (!wbm_srng)
1106 		return QDF_STATUS_E_FAILURE;
1107 
1108 	hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
1109 
1110 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1111 
1112 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1113 
1114 	return QDF_STATUS_SUCCESS;
1115 }
1116 #endif /* IPA_SET_RESET_TX_DB_PA */
1117 
1118 #endif /* IPA_WDI3_TX_TWO_PIPES */
1119 
1120 /**
1121  * dp_tx_ipa_uc_detach - Free autonomy TX resources
1122  * @soc: data path instance
1123  * @pdev: core txrx pdev context
1124  *
1125  * Free allocated TX buffers with WBM SRNG
1126  *
1127  * Return: none
1128  */
1129 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1130 {
1131 	int idx;
1132 	qdf_nbuf_t nbuf;
1133 	struct dp_ipa_resources *ipa_res;
1134 
1135 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1136 		nbuf = (qdf_nbuf_t)
1137 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
1138 		if (!nbuf)
1139 			continue;
1140 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
1141 		qdf_mem_dp_tx_skb_cnt_dec();
1142 		qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
1143 		qdf_nbuf_free(nbuf);
1144 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
1145 						(void *)NULL;
1146 	}
1147 
1148 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1149 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1150 
1151 	ipa_res = &pdev->ipa_resource;
1152 
1153 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
1154 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
1155 }
1156 
1157 /**
1158  * dp_rx_ipa_uc_detach - free autonomy RX resources
1159  * @soc: data path instance
1160  * @pdev: core txrx pdev context
1161  *
1162  * This function will detach DP RX into main device context
1163  * will free DP Rx resources.
1164  *
1165  * Return: none
1166  */
1167 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1168 {
1169 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1170 
1171 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
1172 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
1173 }
1174 
1175 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1176 {
1177 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1178 		return QDF_STATUS_SUCCESS;
1179 
1180 	/* TX resource detach */
1181 	dp_tx_ipa_uc_detach(soc, pdev);
1182 
1183 	/* Cleanup 2nd TX pipe resources */
1184 	dp_ipa_tx_alt_pool_detach(soc, pdev);
1185 
1186 	/* RX resource detach */
1187 	dp_rx_ipa_uc_detach(soc, pdev);
1188 
1189 	return QDF_STATUS_SUCCESS;	/* success */
1190 }
1191 
1192 /**
1193  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
1194  * @soc: data path instance
1195  * @pdev: Physical device handle
1196  *
1197  * Allocate TX buffer from non-cacheable memory
1198  * Attache allocated TX buffers with WBM SRNG
1199  *
1200  * Return: int
1201  */
1202 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1203 {
1204 	uint32_t tx_buffer_count;
1205 	uint32_t ring_base_align = 8;
1206 	qdf_dma_addr_t buffer_paddr;
1207 	struct hal_srng *wbm_srng = (struct hal_srng *)
1208 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1209 	struct hal_srng_params srng_params;
1210 	void *ring_entry;
1211 	int num_entries;
1212 	qdf_nbuf_t nbuf;
1213 	int retval = QDF_STATUS_SUCCESS;
1214 	int max_alloc_count = 0;
1215 
1216 	/*
1217 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
1218 	 * unsigned int uc_tx_buf_sz =
1219 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
1220 	 */
1221 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
1222 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
1223 
1224 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
1225 			    &srng_params);
1226 	num_entries = srng_params.num_entries;
1227 
1228 	max_alloc_count =
1229 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
1230 	if (max_alloc_count <= 0) {
1231 		dp_err("incorrect value for buffer count %u", max_alloc_count);
1232 		return -EINVAL;
1233 	}
1234 
1235 	dp_info("requested %d buffers to be posted to wbm ring",
1236 		max_alloc_count);
1237 
1238 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
1239 		qdf_mem_malloc(num_entries *
1240 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
1241 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
1242 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
1243 		return -ENOMEM;
1244 	}
1245 
1246 	hal_srng_access_start_unlocked(soc->hal_soc,
1247 				       hal_srng_to_hal_ring_handle(wbm_srng));
1248 
1249 	/*
1250 	 * Allocate Tx buffers as many as possible.
1251 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
1252 	 * Populate Tx buffers into WBM2IPA ring
1253 	 * This initial buffer population will simulate H/W as source ring,
1254 	 * and update HP
1255 	 */
1256 	for (tx_buffer_count = 0;
1257 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
1258 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
1259 		if (!nbuf)
1260 			break;
1261 
1262 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
1263 				hal_srng_to_hal_ring_handle(wbm_srng));
1264 		if (!ring_entry) {
1265 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1266 				  "%s: Failed to get WBM ring entry",
1267 				  __func__);
1268 			qdf_nbuf_free(nbuf);
1269 			break;
1270 		}
1271 
1272 		qdf_nbuf_map_single(soc->osdev, nbuf,
1273 				    QDF_DMA_BIDIRECTIONAL);
1274 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1275 		qdf_mem_dp_tx_skb_cnt_inc();
1276 		qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
1277 
1278 		/*
1279 		 * TODO - KIWI code can directly call the be handler
1280 		 * instead of hal soc ops.
1281 		 */
1282 		hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
1283 					     buffer_paddr, 0,
1284 					     (IPA_TCL_DATA_RING_IDX +
1285 					      soc->wbm_sw0_bm_id));
1286 
1287 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
1288 			= (void *)nbuf;
1289 	}
1290 
1291 	hal_srng_access_end_unlocked(soc->hal_soc,
1292 				     hal_srng_to_hal_ring_handle(wbm_srng));
1293 
1294 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
1295 
1296 	if (tx_buffer_count) {
1297 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
1298 	} else {
1299 		dp_err("No IPA WDI TX buffer allocated!");
1300 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1301 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1302 		retval = -ENOMEM;
1303 	}
1304 
1305 	return retval;
1306 }
1307 
1308 /**
1309  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
1310  * @soc: data path instance
1311  * @pdev: core txrx pdev context
1312  *
1313  * This function will attach a DP RX instance into the main
1314  * device (SOC) context.
1315  *
1316  * Return: QDF_STATUS_SUCCESS: success
1317  *         QDF_STATUS_E_RESOURCES: Error return
1318  */
1319 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1320 {
1321 	return QDF_STATUS_SUCCESS;
1322 }
1323 
1324 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1325 {
1326 	int error;
1327 
1328 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1329 		return QDF_STATUS_SUCCESS;
1330 
1331 	/* TX resource attach */
1332 	error = dp_tx_ipa_uc_attach(soc, pdev);
1333 	if (error) {
1334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1335 			  "%s: DP IPA UC TX attach fail code %d",
1336 			  __func__, error);
1337 		return error;
1338 	}
1339 
1340 	/* Setup 2nd TX pipe */
1341 	error = dp_ipa_tx_alt_pool_attach(soc);
1342 	if (error) {
1343 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1344 			  "%s: DP IPA TX pool2 attach fail code %d",
1345 			  __func__, error);
1346 		dp_tx_ipa_uc_detach(soc, pdev);
1347 		return error;
1348 	}
1349 
1350 	/* RX resource attach */
1351 	error = dp_rx_ipa_uc_attach(soc, pdev);
1352 	if (error) {
1353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1354 			  "%s: DP IPA UC RX attach fail code %d",
1355 			  __func__, error);
1356 		dp_ipa_tx_alt_pool_detach(soc, pdev);
1357 		dp_tx_ipa_uc_detach(soc, pdev);
1358 		return error;
1359 	}
1360 
1361 	return QDF_STATUS_SUCCESS;	/* success */
1362 }
1363 
1364 /*
1365  * dp_ipa_ring_resource_setup() - setup IPA ring resources
1366  * @soc: data path SoC handle
1367  *
1368  * Return: none
1369  */
1370 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
1371 		struct dp_pdev *pdev)
1372 {
1373 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
1374 	struct hal_srng *hal_srng;
1375 	struct hal_srng_params srng_params;
1376 	qdf_dma_addr_t hp_addr;
1377 	unsigned long addr_offset, dev_base_paddr;
1378 	uint32_t ix0;
1379 	uint8_t ix0_map[8];
1380 
1381 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1382 		return QDF_STATUS_SUCCESS;
1383 
1384 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
1385 	hal_srng = (struct hal_srng *)
1386 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
1387 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1388 			    hal_srng_to_hal_ring_handle(hal_srng),
1389 			    &srng_params);
1390 
1391 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
1392 		srng_params.ring_base_paddr;
1393 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
1394 		srng_params.ring_base_vaddr;
1395 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
1396 		(srng_params.num_entries * srng_params.entry_size) << 2;
1397 	/*
1398 	 * For the register backed memory addresses, use the scn->mem_pa to
1399 	 * calculate the physical address of the shadow registers
1400 	 */
1401 	dev_base_paddr =
1402 		(unsigned long)
1403 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
1404 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
1405 		      (unsigned long)(hal_soc->dev_base_addr);
1406 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
1407 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1408 
1409 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1410 		(unsigned int)addr_offset,
1411 		(unsigned int)dev_base_paddr,
1412 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
1413 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1414 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1415 		srng_params.num_entries,
1416 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1417 
1418 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
1419 	hal_srng = (struct hal_srng *)
1420 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1421 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1422 			    hal_srng_to_hal_ring_handle(hal_srng),
1423 			    &srng_params);
1424 
1425 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
1426 						srng_params.ring_base_paddr;
1427 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
1428 						srng_params.ring_base_vaddr;
1429 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
1430 		(srng_params.num_entries * srng_params.entry_size) << 2;
1431 	soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr =
1432 		hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1433 				     hal_srng_to_hal_ring_handle(hal_srng));
1434 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1435 		      (unsigned long)(hal_soc->dev_base_addr);
1436 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
1437 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1438 
1439 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
1440 		(unsigned int)addr_offset,
1441 		(unsigned int)dev_base_paddr,
1442 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
1443 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1444 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1445 		srng_params.num_entries,
1446 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1447 
1448 	dp_ipa_tx_alt_ring_resource_setup(soc);
1449 
1450 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
1451 	hal_srng = (struct hal_srng *)
1452 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
1453 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1454 			    hal_srng_to_hal_ring_handle(hal_srng),
1455 			    &srng_params);
1456 
1457 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
1458 						srng_params.ring_base_paddr;
1459 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
1460 						srng_params.ring_base_vaddr;
1461 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
1462 		(srng_params.num_entries * srng_params.entry_size) << 2;
1463 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1464 		      (unsigned long)(hal_soc->dev_base_addr);
1465 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
1466 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1467 
1468 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1469 		(unsigned int)addr_offset,
1470 		(unsigned int)dev_base_paddr,
1471 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
1472 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1473 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1474 		srng_params.num_entries,
1475 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1476 
1477 	hal_srng = (struct hal_srng *)
1478 			pdev->rx_refill_buf_ring2.hal_srng;
1479 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1480 			    hal_srng_to_hal_ring_handle(hal_srng),
1481 			    &srng_params);
1482 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
1483 		srng_params.ring_base_paddr;
1484 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
1485 		srng_params.ring_base_vaddr;
1486 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
1487 		(srng_params.num_entries * srng_params.entry_size) << 2;
1488 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1489 				       hal_srng_to_hal_ring_handle(hal_srng));
1490 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
1491 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
1492 
1493 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1494 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
1495 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1496 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1497 		srng_params.num_entries,
1498 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1499 
1500 	/*
1501 	 * Set DEST_RING_MAPPING_4 to SW2 as default value for
1502 	 * DESTINATION_RING_CTRL_IX_0.
1503 	 */
1504 	ix0_map[0] = REO_REMAP_SW1;
1505 	ix0_map[1] = REO_REMAP_SW1;
1506 	ix0_map[2] = REO_REMAP_SW2;
1507 	ix0_map[3] = REO_REMAP_SW3;
1508 	ix0_map[4] = REO_REMAP_SW2;
1509 	ix0_map[5] = REO_REMAP_RELEASE;
1510 	ix0_map[6] = REO_REMAP_FW;
1511 	ix0_map[7] = REO_REMAP_FW;
1512 
1513 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1514 				    ix0_map);
1515 
1516 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
1517 
1518 	return 0;
1519 }
1520 
1521 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1522 {
1523 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1524 	struct dp_pdev *pdev =
1525 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1526 	struct dp_ipa_resources *ipa_res;
1527 
1528 	if (!pdev) {
1529 		dp_err("Invalid instance");
1530 		return QDF_STATUS_E_FAILURE;
1531 	}
1532 
1533 	ipa_res = &pdev->ipa_resource;
1534 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1535 		return QDF_STATUS_SUCCESS;
1536 
1537 	ipa_res->tx_num_alloc_buffer =
1538 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
1539 
1540 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
1541 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1542 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1543 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1544 
1545 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
1546 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1547 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1548 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1549 
1550 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
1551 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1552 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1553 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1554 
1555 	dp_ipa_get_shared_mem_info(
1556 			soc->osdev, &ipa_res->rx_refill_ring,
1557 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1558 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1559 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1560 
1561 	if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) ||
1562 	    !qdf_mem_get_dma_addr(soc->osdev,
1563 				  &ipa_res->tx_comp_ring.mem_info) ||
1564 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) ||
1565 	    !qdf_mem_get_dma_addr(soc->osdev,
1566 				  &ipa_res->rx_refill_ring.mem_info))
1567 		return QDF_STATUS_E_FAILURE;
1568 
1569 	if (dp_ipa_tx_alt_ring_get_resource(pdev))
1570 		return QDF_STATUS_E_FAILURE;
1571 
1572 	return QDF_STATUS_SUCCESS;
1573 }
1574 
1575 #ifdef IPA_SET_RESET_TX_DB_PA
1576 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res)
1577 #else
1578 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \
1579 		dp_ipa_set_tx_doorbell_paddr(soc, ipa_res)
1580 #endif
1581 
1582 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1583 {
1584 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1585 	struct dp_pdev *pdev =
1586 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1587 	struct dp_ipa_resources *ipa_res;
1588 	struct hal_srng *reo_srng = (struct hal_srng *)
1589 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
1590 
1591 	if (!pdev) {
1592 		dp_err("Invalid instance");
1593 		return QDF_STATUS_E_FAILURE;
1594 	}
1595 
1596 	ipa_res = &pdev->ipa_resource;
1597 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1598 		return QDF_STATUS_SUCCESS;
1599 
1600 	dp_ipa_map_ring_doorbell_paddr(pdev);
1601 
1602 	DP_IPA_SET_TX_DB_PADDR(soc, ipa_res);
1603 
1604 	/*
1605 	 * For RX, REO module on Napier/Hastings does reordering on incoming
1606 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
1607 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
1608 	 * to IPA.
1609 	 * Set the doorbell addr for the REO ring.
1610 	 */
1611 	hal_srng_dst_set_hp_paddr_confirm(reo_srng,
1612 					  ipa_res->rx_ready_doorbell_paddr);
1613 	return QDF_STATUS_SUCCESS;
1614 }
1615 
1616 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl,
1617 					 uint8_t pdev_id)
1618 {
1619 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1620 	struct dp_pdev *pdev =
1621 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1622 	struct dp_ipa_resources *ipa_res;
1623 
1624 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1625 		return QDF_STATUS_SUCCESS;
1626 
1627 	if (!pdev) {
1628 		dp_err("Invalid instance");
1629 		return QDF_STATUS_E_FAILURE;
1630 	}
1631 
1632 	ipa_res = &pdev->ipa_resource;
1633 	if (!ipa_res->is_db_ddr_mapped)
1634 		iounmap(ipa_res->tx_comp_doorbell_vaddr);
1635 
1636 	return QDF_STATUS_SUCCESS;
1637 }
1638 
1639 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1640 			      uint8_t *op_msg)
1641 {
1642 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1643 	struct dp_pdev *pdev =
1644 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1645 
1646 	if (!pdev) {
1647 		dp_err("Invalid instance");
1648 		return QDF_STATUS_E_FAILURE;
1649 	}
1650 
1651 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
1652 		return QDF_STATUS_SUCCESS;
1653 
1654 	if (pdev->ipa_uc_op_cb) {
1655 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
1656 	} else {
1657 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1658 		    "%s: IPA callback function is not registered", __func__);
1659 		qdf_mem_free(op_msg);
1660 		return QDF_STATUS_E_FAILURE;
1661 	}
1662 
1663 	return QDF_STATUS_SUCCESS;
1664 }
1665 
1666 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1667 				 ipa_uc_op_cb_type op_cb,
1668 				 void *usr_ctxt)
1669 {
1670 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1671 	struct dp_pdev *pdev =
1672 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1673 
1674 	if (!pdev) {
1675 		dp_err("Invalid instance");
1676 		return QDF_STATUS_E_FAILURE;
1677 	}
1678 
1679 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
1680 		return QDF_STATUS_SUCCESS;
1681 
1682 	pdev->ipa_uc_op_cb = op_cb;
1683 	pdev->usr_ctxt = usr_ctxt;
1684 
1685 	return QDF_STATUS_SUCCESS;
1686 }
1687 
1688 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1689 {
1690 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1691 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1692 
1693 	if (!pdev) {
1694 		dp_err("Invalid instance");
1695 		return;
1696 	}
1697 
1698 	dp_debug("Deregister OP handler callback");
1699 	pdev->ipa_uc_op_cb = NULL;
1700 	pdev->usr_ctxt = NULL;
1701 }
1702 
1703 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1704 {
1705 	/* TBD */
1706 	return QDF_STATUS_SUCCESS;
1707 }
1708 
1709 /**
1710  * dp_tx_send_ipa_data_frame() - send IPA data frame
1711  * @soc_hdl: datapath soc handle
1712  * @vdev_id: id of the virtual device
1713  * @skb: skb to transmit
1714  *
1715  * Return: skb/ NULL is for success
1716  */
1717 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1718 				     qdf_nbuf_t skb)
1719 {
1720 	qdf_nbuf_t ret;
1721 
1722 	/* Terminate the (single-element) list of tx frames */
1723 	qdf_nbuf_set_next(skb, NULL);
1724 	ret = dp_tx_send(soc_hdl, vdev_id, skb);
1725 	if (ret) {
1726 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1727 			  "%s: Failed to tx", __func__);
1728 		return ret;
1729 	}
1730 
1731 	return NULL;
1732 }
1733 
1734 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL
1735 /**
1736  * dp_ipa_is_target_ready() - check if target is ready or not
1737  * @soc: datapath soc handle
1738  *
1739  * Return: true if target is ready
1740  */
1741 static inline
1742 bool dp_ipa_is_target_ready(struct dp_soc *soc)
1743 {
1744 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
1745 		return false;
1746 	else
1747 		return true;
1748 }
1749 #else
1750 static inline
1751 bool dp_ipa_is_target_ready(struct dp_soc *soc)
1752 {
1753 	return true;
1754 }
1755 #endif
1756 
1757 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1758 {
1759 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1760 	struct dp_pdev *pdev =
1761 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1762 	uint32_t ix0;
1763 	uint32_t ix2;
1764 	uint8_t ix_map[8];
1765 
1766 	if (!pdev) {
1767 		dp_err("Invalid instance");
1768 		return QDF_STATUS_E_FAILURE;
1769 	}
1770 
1771 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1772 		return QDF_STATUS_SUCCESS;
1773 
1774 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
1775 		return QDF_STATUS_E_AGAIN;
1776 
1777 	if (!dp_ipa_is_target_ready(soc))
1778 		return QDF_STATUS_E_AGAIN;
1779 
1780 	/* Call HAL API to remap REO rings to REO2IPA ring */
1781 	ix_map[0] = REO_REMAP_SW1;
1782 	ix_map[1] = REO_REMAP_SW4;
1783 	ix_map[2] = REO_REMAP_SW1;
1784 	ix_map[3] = REO_REMAP_SW4;
1785 	ix_map[4] = REO_REMAP_SW4;
1786 	ix_map[5] = REO_REMAP_RELEASE;
1787 	ix_map[6] = REO_REMAP_FW;
1788 	ix_map[7] = REO_REMAP_FW;
1789 
1790 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1791 				    ix_map);
1792 
1793 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
1794 		ix_map[0] = REO_REMAP_SW4;
1795 		ix_map[1] = REO_REMAP_SW4;
1796 		ix_map[2] = REO_REMAP_SW4;
1797 		ix_map[3] = REO_REMAP_SW4;
1798 		ix_map[4] = REO_REMAP_SW4;
1799 		ix_map[5] = REO_REMAP_SW4;
1800 		ix_map[6] = REO_REMAP_SW4;
1801 		ix_map[7] = REO_REMAP_SW4;
1802 
1803 		ix2 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX2,
1804 					    ix_map);
1805 
1806 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1807 					   &ix2, &ix2);
1808 		dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
1809 	} else {
1810 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1811 					   NULL, NULL);
1812 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
1813 	}
1814 
1815 	return QDF_STATUS_SUCCESS;
1816 }
1817 
1818 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1819 {
1820 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1821 	struct dp_pdev *pdev =
1822 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1823 	uint8_t ix0_map[8];
1824 	uint32_t ix0;
1825 	uint32_t ix1;
1826 	uint32_t ix2;
1827 	uint32_t ix3;
1828 
1829 	if (!pdev) {
1830 		dp_err("Invalid instance");
1831 		return QDF_STATUS_E_FAILURE;
1832 	}
1833 
1834 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1835 		return QDF_STATUS_SUCCESS;
1836 
1837 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
1838 		return QDF_STATUS_E_AGAIN;
1839 
1840 	if (!dp_ipa_is_target_ready(soc))
1841 		return QDF_STATUS_E_AGAIN;
1842 
1843 	ix0_map[0] = REO_REMAP_SW1;
1844 	ix0_map[1] = REO_REMAP_SW1;
1845 	ix0_map[2] = REO_REMAP_SW2;
1846 	ix0_map[3] = REO_REMAP_SW3;
1847 	ix0_map[4] = REO_REMAP_SW2;
1848 	ix0_map[5] = REO_REMAP_RELEASE;
1849 	ix0_map[6] = REO_REMAP_FW;
1850 	ix0_map[7] = REO_REMAP_FW;
1851 
1852 	/* Call HAL API to remap REO rings to REO2IPA ring */
1853 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1854 				    ix0_map);
1855 
1856 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
1857 		dp_reo_remap_config(soc, &ix1, &ix2, &ix3);
1858 
1859 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1860 					   &ix2, &ix3);
1861 		dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
1862 	} else {
1863 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1864 					   NULL, NULL);
1865 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
1866 	}
1867 
1868 	return QDF_STATUS_SUCCESS;
1869 }
1870 
1871 /* This should be configurable per H/W configuration enable status */
1872 #define L3_HEADER_PADDING	2
1873 
1874 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
1875 	defined(CONFIG_IPA_WDI_UNIFIED_API)
1876 
1877 #if !defined(QCA_LL_TX_FLOW_CONTROL_V2) && !defined(QCA_IPA_LL_TX_FLOW_CONTROL)
1878 static inline void dp_setup_mcc_sys_pipes(
1879 		qdf_ipa_sys_connect_params_t *sys_in,
1880 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
1881 {
1882 	int i = 0;
1883 	/* Setup MCC sys pipe */
1884 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
1885 			DP_IPA_MAX_IFACE;
1886 	for (i = 0; i < DP_IPA_MAX_IFACE; i++)
1887 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
1888 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
1889 }
1890 #else
1891 static inline void dp_setup_mcc_sys_pipes(
1892 		qdf_ipa_sys_connect_params_t *sys_in,
1893 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
1894 {
1895 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
1896 }
1897 #endif
1898 
1899 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
1900 				 struct dp_ipa_resources *ipa_res,
1901 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
1902 				 bool over_gsi)
1903 {
1904 	struct tcl_data_cmd *tcl_desc_ptr;
1905 	uint8_t *desc_addr;
1906 	uint32_t desc_size;
1907 
1908 	if (over_gsi)
1909 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
1910 	else
1911 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1912 
1913 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1914 		qdf_mem_get_dma_addr(soc->osdev,
1915 				     &ipa_res->tx_comp_ring.mem_info);
1916 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1917 		qdf_mem_get_dma_size(soc->osdev,
1918 				     &ipa_res->tx_comp_ring.mem_info);
1919 
1920 	/* WBM Tail Pointer Address */
1921 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1922 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1923 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
1924 
1925 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1926 		qdf_mem_get_dma_addr(soc->osdev,
1927 				     &ipa_res->tx_ring.mem_info);
1928 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
1929 		qdf_mem_get_dma_size(soc->osdev,
1930 				     &ipa_res->tx_ring.mem_info);
1931 
1932 	/* TCL Head Pointer Address */
1933 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1934 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1935 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
1936 
1937 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1938 		ipa_res->tx_num_alloc_buffer;
1939 
1940 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1941 
1942 	/* Preprogram TCL descriptor */
1943 	desc_addr =
1944 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1945 	desc_size = sizeof(struct tcl_data_cmd);
1946 #ifndef DP_BE_WAR
1947 	/* TODO - KIWI does not have these fields */
1948 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1949 #endif
1950 	tcl_desc_ptr = (struct tcl_data_cmd *)
1951 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1952 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1953 		HAL_RX_BUF_RBM_SW2_BM(soc->wbm_sw0_bm_id);
1954 #ifndef DP_BE_WAR
1955 	/* TODO - KIWI does not have these fields */
1956 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1957 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1958 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1959 #endif
1960 }
1961 
1962 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
1963 				 struct dp_ipa_resources *ipa_res,
1964 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
1965 				 bool over_gsi)
1966 {
1967 	if (over_gsi)
1968 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1969 					IPA_CLIENT_WLAN2_PROD;
1970 	else
1971 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1972 					IPA_CLIENT_WLAN1_PROD;
1973 
1974 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1975 		qdf_mem_get_dma_addr(soc->osdev,
1976 				     &ipa_res->rx_rdy_ring.mem_info);
1977 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1978 		qdf_mem_get_dma_size(soc->osdev,
1979 				     &ipa_res->rx_rdy_ring.mem_info);
1980 
1981 	/* REO Tail Pointer Address */
1982 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1983 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1984 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
1985 
1986 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1987 		qdf_mem_get_dma_addr(soc->osdev,
1988 				     &ipa_res->rx_refill_ring.mem_info);
1989 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1990 		qdf_mem_get_dma_size(soc->osdev,
1991 				     &ipa_res->rx_refill_ring.mem_info);
1992 
1993 	/* FW Head Pointer Address */
1994 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1995 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1996 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
1997 
1998 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
1999 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2000 }
2001 
2002 static void
2003 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
2004 			  struct dp_ipa_resources *ipa_res,
2005 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
2006 			  bool over_gsi,
2007 			  qdf_ipa_wdi_hdl_t hdl)
2008 {
2009 	struct tcl_data_cmd *tcl_desc_ptr;
2010 	uint8_t *desc_addr;
2011 	uint32_t desc_size;
2012 
2013 	if (over_gsi) {
2014 		if (hdl == DP_IPA_HDL_FIRST)
2015 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2016 				IPA_CLIENT_WLAN2_CONS;
2017 		else if (hdl == DP_IPA_HDL_SECOND)
2018 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2019 				IPA_CLIENT_WLAN4_CONS;
2020 	} else {
2021 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2022 			IPA_CLIENT_WLAN1_CONS;
2023 	}
2024 
2025 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
2026 		     &ipa_res->tx_comp_ring.sgtable,
2027 		     sizeof(sgtable_t));
2028 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
2029 		qdf_mem_get_dma_size(soc->osdev,
2030 				     &ipa_res->tx_comp_ring.mem_info);
2031 	/* WBM Tail Pointer Address */
2032 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
2033 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
2034 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
2035 
2036 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
2037 		     &ipa_res->tx_ring.sgtable,
2038 		     sizeof(sgtable_t));
2039 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
2040 		qdf_mem_get_dma_size(soc->osdev,
2041 				     &ipa_res->tx_ring.mem_info);
2042 	/* TCL Head Pointer Address */
2043 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
2044 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
2045 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
2046 
2047 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
2048 		ipa_res->tx_num_alloc_buffer;
2049 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
2050 
2051 	/* Preprogram TCL descriptor */
2052 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
2053 			tx_smmu);
2054 	desc_size = sizeof(struct tcl_data_cmd);
2055 #ifndef DP_BE_WAR
2056 	/* TODO - KIWI does not have these fields */
2057 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
2058 #endif
2059 	tcl_desc_ptr = (struct tcl_data_cmd *)
2060 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
2061 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
2062 		HAL_RX_BUF_RBM_SW2_BM(soc->wbm_sw0_bm_id);
2063 #ifndef DP_BE_WAR
2064 	/* TODO - KIWI does not have these fields */
2065 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
2066 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
2067 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
2068 #endif
2069 }
2070 
2071 static void
2072 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
2073 			  struct dp_ipa_resources *ipa_res,
2074 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
2075 			  bool over_gsi,
2076 			  qdf_ipa_wdi_hdl_t hdl)
2077 {
2078 	if (over_gsi) {
2079 		if (hdl == DP_IPA_HDL_FIRST)
2080 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2081 				IPA_CLIENT_WLAN2_PROD;
2082 		else if (hdl == DP_IPA_HDL_SECOND)
2083 			QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2084 				IPA_CLIENT_WLAN3_PROD;
2085 	} else {
2086 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2087 					IPA_CLIENT_WLAN1_PROD;
2088 	}
2089 
2090 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
2091 		     &ipa_res->rx_rdy_ring.sgtable,
2092 		     sizeof(sgtable_t));
2093 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
2094 		qdf_mem_get_dma_size(soc->osdev,
2095 				     &ipa_res->rx_rdy_ring.mem_info);
2096 	/* REO Tail Pointer Address */
2097 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
2098 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2099 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
2100 
2101 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
2102 		     &ipa_res->rx_refill_ring.sgtable,
2103 		     sizeof(sgtable_t));
2104 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
2105 		qdf_mem_get_dma_size(soc->osdev,
2106 				     &ipa_res->rx_refill_ring.mem_info);
2107 
2108 	/* FW Head Pointer Address */
2109 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
2110 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2111 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
2112 
2113 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
2114 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2115 }
2116 
2117 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2118 			void *ipa_i2w_cb, void *ipa_w2i_cb,
2119 			void *ipa_wdi_meter_notifier_cb,
2120 			uint32_t ipa_desc_size, void *ipa_priv,
2121 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
2122 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
2123 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi,
2124 			qdf_ipa_wdi_hdl_t hdl)
2125 {
2126 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2127 	struct dp_pdev *pdev =
2128 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2129 	struct dp_ipa_resources *ipa_res;
2130 	qdf_ipa_ep_cfg_t *tx_cfg;
2131 	qdf_ipa_ep_cfg_t *rx_cfg;
2132 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
2133 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
2134 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
2135 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
2136 	qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL;
2137 	qdf_ipa_wdi_conn_out_params_t pipe_out;
2138 	int ret;
2139 
2140 	if (!pdev) {
2141 		dp_err("Invalid instance");
2142 		return QDF_STATUS_E_FAILURE;
2143 	}
2144 
2145 	ipa_res = &pdev->ipa_resource;
2146 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2147 		return QDF_STATUS_SUCCESS;
2148 
2149 	pipe_in = qdf_mem_malloc(sizeof(*pipe_in));
2150 	if (!pipe_in)
2151 		return QDF_STATUS_E_NOMEM;
2152 
2153 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
2154 
2155 	if (is_smmu_enabled)
2156 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true;
2157 	else
2158 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false;
2159 
2160 	dp_setup_mcc_sys_pipes(sys_in, pipe_in);
2161 
2162 	/* TX PIPE */
2163 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2164 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in);
2165 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
2166 	} else {
2167 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in);
2168 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
2169 	}
2170 
2171 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
2172 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2173 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
2174 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
2175 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
2176 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
2177 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
2178 
2179 	/**
2180 	 * Transfer Ring: WBM Ring
2181 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
2182 	 * Event Ring: TCL ring
2183 	 * Event Ring Doorbell PA: TCL Head Pointer Address
2184 	 */
2185 	if (is_smmu_enabled)
2186 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi, hdl);
2187 	else
2188 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
2189 
2190 	dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in);
2191 
2192 	/* RX PIPE */
2193 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2194 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in);
2195 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
2196 	} else {
2197 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in);
2198 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
2199 	}
2200 
2201 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
2202 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
2203 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
2204 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
2205 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
2206 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
2207 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
2208 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
2209 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
2210 
2211 	/**
2212 	 * Transfer Ring: REO Ring
2213 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
2214 	 * Event Ring: FW ring
2215 	 * Event Ring Doorbell PA: FW Head Pointer Address
2216 	 */
2217 	if (is_smmu_enabled)
2218 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi, hdl);
2219 	else
2220 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
2221 
2222 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb;
2223 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv;
2224 	QDF_IPA_WDI_CONN_IN_PARAMS_HANDLE(pipe_in) = hdl;
2225 
2226 	/* Connect WDI IPA PIPEs */
2227 	ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out);
2228 
2229 	if (ret) {
2230 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2231 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
2232 			  __func__, ret);
2233 		qdf_mem_free(pipe_in);
2234 		return QDF_STATUS_E_FAILURE;
2235 	}
2236 
2237 	/* IPA uC Doorbell registers */
2238 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
2239 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
2240 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
2241 
2242 	dp_ipa_set_pipe_db(ipa_res, &pipe_out);
2243 
2244 	ipa_res->is_db_ddr_mapped =
2245 		QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out);
2246 
2247 	soc->ipa_first_tx_db_access = true;
2248 	qdf_mem_free(pipe_in);
2249 
2250 	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
2251 	soc->ipa_rx_buf_map_lock_initialized = true;
2252 
2253 	return QDF_STATUS_SUCCESS;
2254 }
2255 
2256 /**
2257  * dp_ipa_setup_iface() - Setup IPA header and register interface
2258  * @ifname: Interface name
2259  * @mac_addr: Interface MAC address
2260  * @prod_client: IPA prod client type
2261  * @cons_client: IPA cons client type
2262  * @session_id: Session ID
2263  * @is_ipv6_enabled: Is IPV6 enabled or not
2264  * @hdl: IPA handle
2265  *
2266  * Return: QDF_STATUS
2267  */
2268 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
2269 			      qdf_ipa_client_type_t prod_client,
2270 			      qdf_ipa_client_type_t cons_client,
2271 			      uint8_t session_id, bool is_ipv6_enabled,
2272 			      qdf_ipa_wdi_hdl_t hdl)
2273 {
2274 	qdf_ipa_wdi_reg_intf_in_params_t in;
2275 	qdf_ipa_wdi_hdr_info_t hdr_info;
2276 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
2277 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
2278 	int ret = -EINVAL;
2279 
2280 	qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t));
2281 
2282 	dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname,
2283 		 QDF_MAC_ADDR_REF(mac_addr));
2284 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2285 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
2286 
2287 	/* IPV4 header */
2288 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
2289 
2290 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
2291 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2292 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
2293 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
2294 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
2295 
2296 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
2297 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
2298 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2299 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
2300 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
2301 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
2302 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_HANDLE(&in) = hdl;
2303 	dp_ipa_setup_iface_session_id(&in, session_id);
2304 
2305 	/* IPV6 header */
2306 	if (is_ipv6_enabled) {
2307 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
2308 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
2309 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
2310 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
2311 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
2312 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2313 	}
2314 
2315 	dp_debug("registering for session_id: %u", session_id);
2316 
2317 	ret = qdf_ipa_wdi_reg_intf(&in);
2318 
2319 	if (ret) {
2320 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2321 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
2322 		    __func__, ret);
2323 		return QDF_STATUS_E_FAILURE;
2324 	}
2325 
2326 	return QDF_STATUS_SUCCESS;
2327 }
2328 
2329 #else /* !CONFIG_IPA_WDI_UNIFIED_API */
2330 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2331 			void *ipa_i2w_cb, void *ipa_w2i_cb,
2332 			void *ipa_wdi_meter_notifier_cb,
2333 			uint32_t ipa_desc_size, void *ipa_priv,
2334 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
2335 			uint32_t *rx_pipe_handle)
2336 {
2337 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2338 	struct dp_pdev *pdev =
2339 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2340 	struct dp_ipa_resources *ipa_res;
2341 	qdf_ipa_wdi_pipe_setup_info_t *tx;
2342 	qdf_ipa_wdi_pipe_setup_info_t *rx;
2343 	qdf_ipa_wdi_conn_in_params_t pipe_in;
2344 	qdf_ipa_wdi_conn_out_params_t pipe_out;
2345 	struct tcl_data_cmd *tcl_desc_ptr;
2346 	uint8_t *desc_addr;
2347 	uint32_t desc_size;
2348 	int ret;
2349 
2350 	if (!pdev) {
2351 		dp_err("Invalid instance");
2352 		return QDF_STATUS_E_FAILURE;
2353 	}
2354 
2355 	ipa_res = &pdev->ipa_resource;
2356 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2357 		return QDF_STATUS_SUCCESS;
2358 
2359 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
2360 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
2361 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
2362 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
2363 
2364 	/* TX PIPE */
2365 	/**
2366 	 * Transfer Ring: WBM Ring
2367 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
2368 	 * Event Ring: TCL ring
2369 	 * Event Ring Doorbell PA: TCL Head Pointer Address
2370 	 */
2371 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
2372 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
2373 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2374 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
2375 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
2376 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
2377 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
2378 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
2379 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
2380 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
2381 		ipa_res->tx_comp_ring_base_paddr;
2382 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
2383 		ipa_res->tx_comp_ring_size;
2384 	/* WBM Tail Pointer Address */
2385 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
2386 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
2387 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
2388 		ipa_res->tx_ring_base_paddr;
2389 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
2390 	/* TCL Head Pointer Address */
2391 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
2392 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
2393 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
2394 		ipa_res->tx_num_alloc_buffer;
2395 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
2396 
2397 	/* Preprogram TCL descriptor */
2398 	desc_addr =
2399 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
2400 	desc_size = sizeof(struct tcl_data_cmd);
2401 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
2402 	tcl_desc_ptr = (struct tcl_data_cmd *)
2403 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
2404 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
2405 						HAL_RX_BUF_RBM_SW2_BM;
2406 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
2407 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
2408 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
2409 
2410 	/* RX PIPE */
2411 	/**
2412 	 * Transfer Ring: REO Ring
2413 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
2414 	 * Event Ring: FW ring
2415 	 * Event Ring Doorbell PA: FW Head Pointer Address
2416 	 */
2417 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
2418 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
2419 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
2420 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
2421 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
2422 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
2423 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
2424 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
2425 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
2426 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
2427 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
2428 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
2429 						ipa_res->rx_rdy_ring_base_paddr;
2430 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
2431 						ipa_res->rx_rdy_ring_size;
2432 	/* REO Tail Pointer Address */
2433 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
2434 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2435 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
2436 					ipa_res->rx_refill_ring_base_paddr;
2437 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
2438 						ipa_res->rx_refill_ring_size;
2439 	/* FW Head Pointer Address */
2440 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
2441 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2442 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = soc->rx_pkt_tlv_size +
2443 						L3_HEADER_PADDING;
2444 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
2445 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
2446 
2447 	/* Connect WDI IPA PIPE */
2448 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
2449 	if (ret) {
2450 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2451 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
2452 			  __func__, ret);
2453 		return QDF_STATUS_E_FAILURE;
2454 	}
2455 
2456 	/* IPA uC Doorbell registers */
2457 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2458 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
2459 		  __func__,
2460 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
2461 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
2462 
2463 	ipa_res->tx_comp_doorbell_paddr =
2464 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
2465 	ipa_res->tx_comp_doorbell_vaddr =
2466 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
2467 	ipa_res->rx_ready_doorbell_paddr =
2468 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
2469 
2470 	soc->ipa_first_tx_db_access = true;
2471 
2472 	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
2473 	soc->ipa_rx_buf_map_lock_initialized = true;
2474 
2475 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2476 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
2477 		  __func__,
2478 		  "transfer_ring_base_pa",
2479 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
2480 		  "transfer_ring_size",
2481 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
2482 		  "transfer_ring_doorbell_pa",
2483 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
2484 		  "event_ring_base_pa",
2485 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
2486 		  "event_ring_size",
2487 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
2488 		  "event_ring_doorbell_pa",
2489 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
2490 		  "num_pkt_buffers",
2491 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
2492 		  "tx_comp_doorbell_paddr",
2493 		  (void *)ipa_res->tx_comp_doorbell_paddr);
2494 
2495 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2496 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
2497 		  __func__,
2498 		  "transfer_ring_base_pa",
2499 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
2500 		  "transfer_ring_size",
2501 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
2502 		  "transfer_ring_doorbell_pa",
2503 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
2504 		  "event_ring_base_pa",
2505 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
2506 		  "event_ring_size",
2507 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
2508 		  "event_ring_doorbell_pa",
2509 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
2510 		  "num_pkt_buffers",
2511 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
2512 		  "tx_comp_doorbell_paddr",
2513 		  (void *)ipa_res->rx_ready_doorbell_paddr);
2514 
2515 	return QDF_STATUS_SUCCESS;
2516 }
2517 
2518 /**
2519  * dp_ipa_setup_iface() - Setup IPA header and register interface
2520  * @ifname: Interface name
2521  * @mac_addr: Interface MAC address
2522  * @prod_client: IPA prod client type
2523  * @cons_client: IPA cons client type
2524  * @session_id: Session ID
2525  * @is_ipv6_enabled: Is IPV6 enabled or not
2526  * @hdl: IPA handle
2527  *
2528  * Return: QDF_STATUS
2529  */
2530 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
2531 			      qdf_ipa_client_type_t prod_client,
2532 			      qdf_ipa_client_type_t cons_client,
2533 			      uint8_t session_id, bool is_ipv6_enabled,
2534 			      qdf_ipa_wdi_hdl_t hdl)
2535 {
2536 	qdf_ipa_wdi_reg_intf_in_params_t in;
2537 	qdf_ipa_wdi_hdr_info_t hdr_info;
2538 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
2539 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
2540 	int ret = -EINVAL;
2541 
2542 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2543 		  "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT,
2544 		  __func__, ifname, QDF_MAC_ADDR_REF(mac_addr));
2545 
2546 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2547 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
2548 
2549 	/* IPV4 header */
2550 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
2551 
2552 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
2553 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2554 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
2555 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
2556 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
2557 
2558 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
2559 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
2560 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2561 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
2562 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
2563 		htonl(session_id << 16);
2564 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
2565 
2566 	/* IPV6 header */
2567 	if (is_ipv6_enabled) {
2568 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
2569 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
2570 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
2571 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
2572 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
2573 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2574 	}
2575 
2576 	ret = qdf_ipa_wdi_reg_intf(&in);
2577 	if (ret) {
2578 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
2579 		       ret);
2580 		return QDF_STATUS_E_FAILURE;
2581 	}
2582 
2583 	return QDF_STATUS_SUCCESS;
2584 }
2585 
2586 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
2587 
2588 /**
2589  * dp_ipa_cleanup() - Disconnect IPA pipes
2590  * @soc_hdl: dp soc handle
2591  * @pdev_id: dp pdev id
2592  * @tx_pipe_handle: Tx pipe handle
2593  * @rx_pipe_handle: Rx pipe handle
2594  * @hdl: IPA handle
2595  *
2596  * Return: QDF_STATUS
2597  */
2598 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2599 			  uint32_t tx_pipe_handle, uint32_t rx_pipe_handle,
2600 			  qdf_ipa_wdi_hdl_t hdl)
2601 {
2602 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2603 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2604 	struct dp_pdev *pdev;
2605 	int ret;
2606 
2607 	ret = qdf_ipa_wdi_disconn_pipes(hdl);
2608 	if (ret) {
2609 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
2610 		       ret);
2611 		status = QDF_STATUS_E_FAILURE;
2612 	}
2613 
2614 	if (soc->ipa_rx_buf_map_lock_initialized) {
2615 		qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock);
2616 		soc->ipa_rx_buf_map_lock_initialized = false;
2617 	}
2618 
2619 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2620 	if (qdf_unlikely(!pdev)) {
2621 		dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
2622 		status = QDF_STATUS_E_FAILURE;
2623 		goto exit;
2624 	}
2625 
2626 	dp_ipa_unmap_ring_doorbell_paddr(pdev);
2627 exit:
2628 	return status;
2629 }
2630 
2631 /**
2632  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
2633  * @ifname: Interface name
2634  * @is_ipv6_enabled: Is IPV6 enabled or not
2635  * @hdl: IPA handle
2636  *
2637  * Return: QDF_STATUS
2638  */
2639 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled,
2640 				qdf_ipa_wdi_hdl_t hdl)
2641 {
2642 	int ret;
2643 
2644 	ret = qdf_ipa_wdi_dereg_intf(ifname, hdl);
2645 	if (ret) {
2646 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2647 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
2648 			  __func__, ret);
2649 		return QDF_STATUS_E_FAILURE;
2650 	}
2651 
2652 	return QDF_STATUS_SUCCESS;
2653 }
2654 
2655 #ifdef IPA_SET_RESET_TX_DB_PA
2656 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \
2657 				dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res))
2658 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \
2659 				dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res))
2660 #else
2661 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res)
2662 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res)
2663 #endif
2664 
2665 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2666 			       qdf_ipa_wdi_hdl_t hdl)
2667 {
2668 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2669 	struct dp_pdev *pdev =
2670 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2671 	struct dp_ipa_resources *ipa_res;
2672 	QDF_STATUS result;
2673 
2674 	if (!pdev) {
2675 		dp_err("Invalid instance");
2676 		return QDF_STATUS_E_FAILURE;
2677 	}
2678 
2679 	ipa_res = &pdev->ipa_resource;
2680 
2681 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
2682 	DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res);
2683 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
2684 
2685 	result = qdf_ipa_wdi_enable_pipes(hdl);
2686 	if (result) {
2687 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2688 			  "%s: Enable WDI PIPE fail, code %d",
2689 			  __func__, result);
2690 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
2691 		DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
2692 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
2693 		return QDF_STATUS_E_FAILURE;
2694 	}
2695 
2696 	if (soc->ipa_first_tx_db_access) {
2697 		dp_ipa_tx_comp_ring_init_hp(soc, ipa_res);
2698 		soc->ipa_first_tx_db_access = false;
2699 	}
2700 
2701 	return QDF_STATUS_SUCCESS;
2702 }
2703 
2704 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2705 				qdf_ipa_wdi_hdl_t hdl)
2706 {
2707 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2708 	struct dp_pdev *pdev =
2709 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2710 	QDF_STATUS result;
2711 	struct dp_ipa_resources *ipa_res;
2712 
2713 	if (!pdev) {
2714 		dp_err("Invalid instance");
2715 		return QDF_STATUS_E_FAILURE;
2716 	}
2717 
2718 	ipa_res = &pdev->ipa_resource;
2719 
2720 	qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS);
2721 	/*
2722 	 * Reset the tx completion doorbell address before invoking IPA disable
2723 	 * pipes API to ensure that there is no access to IPA tx doorbell
2724 	 * address post disable pipes.
2725 	 */
2726 	DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
2727 
2728 	result = qdf_ipa_wdi_disable_pipes(hdl);
2729 	if (result) {
2730 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2731 			  "%s: Disable WDI PIPE fail, code %d",
2732 			  __func__, result);
2733 		qdf_assert_always(0);
2734 		return QDF_STATUS_E_FAILURE;
2735 	}
2736 
2737 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
2738 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
2739 
2740 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
2741 }
2742 
2743 /**
2744  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
2745  * @client: Client type
2746  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
2747  * @hdl: IPA handle
2748  *
2749  * Return: QDF_STATUS
2750  */
2751 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps,
2752 				 qdf_ipa_wdi_hdl_t hdl)
2753 {
2754 	qdf_ipa_wdi_perf_profile_t profile;
2755 	QDF_STATUS result;
2756 
2757 	profile.client = client;
2758 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
2759 
2760 	result = qdf_ipa_wdi_set_perf_profile(hdl, &profile);
2761 	if (result) {
2762 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2763 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
2764 			  __func__, result);
2765 		return QDF_STATUS_E_FAILURE;
2766 	}
2767 
2768 	return QDF_STATUS_SUCCESS;
2769 }
2770 
2771 /**
2772  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
2773  * @pdev: pdev
2774  * @vdev: vdev
2775  * @nbuf: skb
2776  *
2777  * Return: nbuf if TX fails and NULL if TX succeeds
2778  */
2779 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
2780 				       struct dp_vdev *vdev,
2781 				       qdf_nbuf_t nbuf)
2782 {
2783 	struct dp_peer *vdev_peer;
2784 	uint16_t len;
2785 
2786 	vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA);
2787 	if (qdf_unlikely(!vdev_peer))
2788 		return nbuf;
2789 
2790 	if (qdf_unlikely(!vdev_peer->txrx_peer)) {
2791 		dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
2792 		return nbuf;
2793 	}
2794 
2795 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2796 	len = qdf_nbuf_len(nbuf);
2797 
2798 	if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
2799 		DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
2800 					      rx.intra_bss.fail, 1, len);
2801 		dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
2802 		return nbuf;
2803 	}
2804 
2805 	DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
2806 				      rx.intra_bss.pkts, 1, len);
2807 	dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
2808 	return NULL;
2809 }
2810 
2811 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2812 			    qdf_nbuf_t nbuf, bool *fwd_success)
2813 {
2814 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2815 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2816 						     DP_MOD_ID_IPA);
2817 	struct dp_pdev *pdev;
2818 	struct dp_peer *da_peer;
2819 	struct dp_peer *sa_peer;
2820 	qdf_nbuf_t nbuf_copy;
2821 	uint8_t da_is_bcmc;
2822 	struct ethhdr *eh;
2823 	bool status = false;
2824 
2825 	*fwd_success = false; /* set default as failure */
2826 
2827 	/*
2828 	 * WDI 3.0 skb->cb[] info from IPA driver
2829 	 * skb->cb[0] = vdev_id
2830 	 * skb->cb[1].bit#1 = da_is_bcmc
2831 	 */
2832 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
2833 
2834 	if (qdf_unlikely(!vdev))
2835 		return false;
2836 
2837 	pdev = vdev->pdev;
2838 	if (qdf_unlikely(!pdev))
2839 		goto out;
2840 
2841 	/* no fwd for station mode and just pass up to stack */
2842 	if (vdev->opmode == wlan_op_mode_sta)
2843 		goto out;
2844 
2845 	if (da_is_bcmc) {
2846 		nbuf_copy = qdf_nbuf_copy(nbuf);
2847 		if (!nbuf_copy)
2848 			goto out;
2849 
2850 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
2851 			qdf_nbuf_free(nbuf_copy);
2852 		else
2853 			*fwd_success = true;
2854 
2855 		/* return false to pass original pkt up to stack */
2856 		goto out;
2857 	}
2858 
2859 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2860 
2861 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
2862 		goto out;
2863 
2864 	da_peer = dp_peer_find_hash_find(soc, eh->h_dest, 0, vdev->vdev_id,
2865 					 DP_MOD_ID_IPA);
2866 	if (!da_peer)
2867 		goto out;
2868 
2869 	dp_peer_unref_delete(da_peer, DP_MOD_ID_IPA);
2870 
2871 	sa_peer = dp_peer_find_hash_find(soc, eh->h_source, 0, vdev->vdev_id,
2872 					 DP_MOD_ID_IPA);
2873 	if (!sa_peer)
2874 		goto out;
2875 
2876 	dp_peer_unref_delete(sa_peer, DP_MOD_ID_IPA);
2877 
2878 	/*
2879 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
2880 	 * Need to add skb to internal tracking table to avoid nbuf memory
2881 	 * leak check for unallocated skb.
2882 	 */
2883 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
2884 
2885 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
2886 		qdf_nbuf_free(nbuf);
2887 	else
2888 		*fwd_success = true;
2889 
2890 	status = true;
2891 out:
2892 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
2893 	return status;
2894 }
2895 
2896 #ifdef MDM_PLATFORM
2897 bool dp_ipa_is_mdm_platform(void)
2898 {
2899 	return true;
2900 }
2901 #else
2902 bool dp_ipa_is_mdm_platform(void)
2903 {
2904 	return false;
2905 }
2906 #endif
2907 
2908 /**
2909  * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
2910  * @soc: soc
2911  * @nbuf: source skb
2912  *
2913  * Return: new nbuf if success and otherwise NULL
2914  */
2915 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
2916 					     qdf_nbuf_t nbuf)
2917 {
2918 	uint8_t *src_nbuf_data;
2919 	uint8_t *dst_nbuf_data;
2920 	qdf_nbuf_t dst_nbuf;
2921 	qdf_nbuf_t temp_nbuf = nbuf;
2922 	uint32_t nbuf_len = qdf_nbuf_len(nbuf);
2923 	bool is_nbuf_head = true;
2924 	uint32_t copy_len = 0;
2925 
2926 	dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
2927 				  RX_BUFFER_RESERVATION,
2928 				  RX_DATA_BUFFER_ALIGNMENT, FALSE);
2929 
2930 	if (!dst_nbuf) {
2931 		dp_err_rl("nbuf allocate fail");
2932 		return NULL;
2933 	}
2934 
2935 	if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
2936 		qdf_nbuf_free(dst_nbuf);
2937 		dp_err_rl("nbuf is jumbo data");
2938 		return NULL;
2939 	}
2940 
2941 	/* prepeare to copy all data into new skb */
2942 	dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
2943 	while (temp_nbuf) {
2944 		src_nbuf_data = qdf_nbuf_data(temp_nbuf);
2945 		/* first head nbuf */
2946 		if (is_nbuf_head) {
2947 			qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
2948 				     soc->rx_pkt_tlv_size);
2949 			/* leave extra 2 bytes L3_HEADER_PADDING */
2950 			dst_nbuf_data += (soc->rx_pkt_tlv_size +
2951 					  L3_HEADER_PADDING);
2952 			src_nbuf_data += soc->rx_pkt_tlv_size;
2953 			copy_len = qdf_nbuf_headlen(temp_nbuf) -
2954 						soc->rx_pkt_tlv_size;
2955 			temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
2956 			is_nbuf_head = false;
2957 		} else {
2958 			copy_len = qdf_nbuf_len(temp_nbuf);
2959 			temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
2960 		}
2961 		qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
2962 		dst_nbuf_data += copy_len;
2963 	}
2964 
2965 	qdf_nbuf_set_len(dst_nbuf, nbuf_len);
2966 	/* copy is done, free original nbuf */
2967 	qdf_nbuf_free(nbuf);
2968 
2969 	return dst_nbuf;
2970 }
2971 
2972 /**
2973  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
2974  * @soc: soc
2975  * @nbuf: skb
2976  *
2977  * Return: nbuf if success and otherwise NULL
2978  */
2979 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
2980 {
2981 
2982 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2983 		return nbuf;
2984 
2985 	/* WLAN IPA is run-time disabled */
2986 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
2987 		return nbuf;
2988 
2989 	if (!qdf_nbuf_is_frag(nbuf))
2990 		return nbuf;
2991 
2992 	/* linearize skb for IPA */
2993 	return dp_ipa_frag_nbuf_linearize(soc, nbuf);
2994 }
2995 
2996 QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
2997 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2998 {
2999 	QDF_STATUS ret;
3000 
3001 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3002 	struct dp_pdev *pdev =
3003 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3004 
3005 	if (!pdev) {
3006 		dp_err("%s invalid instance", __func__);
3007 		return QDF_STATUS_E_FAILURE;
3008 	}
3009 
3010 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
3011 		dp_debug("SMMU S1 disabled");
3012 		return QDF_STATUS_SUCCESS;
3013 	}
3014 	ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true);
3015 	if (ret)
3016 		return ret;
3017 
3018 	ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true);
3019 	if (ret)
3020 		__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false);
3021 
3022 	return ret;
3023 }
3024 
3025 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
3026 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
3027 {
3028 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3029 	struct dp_pdev *pdev =
3030 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3031 
3032 	if (!pdev) {
3033 		dp_err("%s invalid instance", __func__);
3034 		return QDF_STATUS_E_FAILURE;
3035 	}
3036 
3037 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
3038 		dp_debug("SMMU S1 disabled");
3039 		return QDF_STATUS_SUCCESS;
3040 	}
3041 
3042 	if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false) ||
3043 	    dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false))
3044 		return QDF_STATUS_E_FAILURE;
3045 
3046 	return QDF_STATUS_SUCCESS;
3047 }
3048 
3049 #endif
3050