xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision 81f3009593909786596f6f268445cda849d0395a)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hal_reo.h>
25 #include <hif.h>
26 #include <htt.h>
27 #include <wdi_event.h>
28 #include <queue.h>
29 #include "dp_types.h"
30 #include "dp_htt.h"
31 #include "dp_tx.h"
32 #include "dp_rx.h"
33 #include "dp_ipa.h"
34 #include "dp_internal.h"
35 #ifdef WIFI_MONITOR_SUPPORT
36 #include "dp_mon.h"
37 #endif
38 
39 /* Ring index for WBM2SW2 release ring */
40 #define IPA_TX_COMP_RING_IDX HAL_IPA_TX_COMP_RING_IDX
41 
42 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
43 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
44 
45 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
46  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
47  * This causes back pressure, resulting in a FW crash.
48  * By leaving some entries with no buffer attached, WBM will be able to write
49  * to the ring, and from dumps we can figure out the buffer which is causing
50  * this issue.
51  */
52 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
53 /**
54  *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
55  * @ix0_reg: reo destination ring IX0 value
56  * @ix2_reg: reo destination ring IX2 value
57  * @ix3_reg: reo destination ring IX3 value
58  */
59 struct dp_ipa_reo_remap_record {
60 	uint64_t timestamp;
61 	uint32_t ix0_reg;
62 	uint32_t ix2_reg;
63 	uint32_t ix3_reg;
64 };
65 
66 #define REO_REMAP_HISTORY_SIZE 32
67 
68 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
69 
70 static qdf_atomic_t dp_ipa_reo_remap_history_index;
71 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
72 {
73 	int next = qdf_atomic_inc_return(index);
74 
75 	if (next == REO_REMAP_HISTORY_SIZE)
76 		qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
77 
78 	return next % REO_REMAP_HISTORY_SIZE;
79 }
80 
81 /**
82  * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
83  * @ix0_val: reo destination ring IX0 value
84  * @ix2_val: reo destination ring IX2 value
85  * @ix3_val: reo destination ring IX3 value
86  *
87  * Return: None
88  */
89 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
90 					 uint32_t ix3_val)
91 {
92 	int idx = dp_ipa_reo_remap_record_index_next(
93 				&dp_ipa_reo_remap_history_index);
94 	struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
95 
96 	record->timestamp = qdf_get_log_timestamp();
97 	record->ix0_reg = ix0_val;
98 	record->ix2_reg = ix2_val;
99 	record->ix3_reg = ix3_val;
100 }
101 
102 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
103 						   qdf_nbuf_t nbuf,
104 						   uint32_t size,
105 						   bool create)
106 {
107 	qdf_mem_info_t mem_map_table = {0};
108 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
109 
110 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
111 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
112 				 size);
113 
114 	if (create) {
115 		/* Assert if PA is zero */
116 		qdf_assert_always(mem_map_table.pa);
117 
118 		ret = qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
119 	} else {
120 		ret = qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
121 	}
122 	qdf_assert_always(!ret);
123 
124 	/* Return status of mapping/unmapping is stored in
125 	 * mem_map_table.result field, assert if the result
126 	 * is failure
127 	 */
128 	if (create)
129 		qdf_assert_always(!mem_map_table.result);
130 	else
131 		qdf_assert_always(mem_map_table.result >= mem_map_table.size);
132 
133 	return ret;
134 }
135 
136 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
137 					     qdf_nbuf_t nbuf,
138 					     uint32_t size,
139 					     bool create)
140 {
141 	struct dp_pdev *pdev;
142 	int i;
143 
144 	for (i = 0; i < soc->pdev_count; i++) {
145 		pdev = soc->pdev_list[i];
146 		if (pdev && dp_monitor_is_configured(pdev))
147 			return QDF_STATUS_SUCCESS;
148 	}
149 
150 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
151 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
152 		return QDF_STATUS_SUCCESS;
153 
154 	/**
155 	 * Even if ipa pipes is disabled, but if it's unmap
156 	 * operation and nbuf has done ipa smmu map before,
157 	 * do ipa smmu unmap as well.
158 	 */
159 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
160 		if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
161 			DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
162 		} else {
163 			return QDF_STATUS_SUCCESS;
164 		}
165 	}
166 
167 	if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
168 		if (create) {
169 			DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
170 		} else {
171 			DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
172 		}
173 		return QDF_STATUS_E_INVAL;
174 	}
175 
176 	qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
177 
178 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
179 }
180 
181 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
182 	struct dp_soc *soc,
183 	struct dp_pdev *pdev,
184 	bool create)
185 {
186 	uint32_t index;
187 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
188 	uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
189 	qdf_nbuf_t nbuf;
190 	uint32_t buf_len;
191 
192 	if (!ipa_is_ready()) {
193 		dp_info("IPA is not READY");
194 		return 0;
195 	}
196 
197 	for (index = 0; index < tx_buffer_cnt; index++) {
198 		nbuf = (qdf_nbuf_t)
199 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
200 		if (!nbuf)
201 			continue;
202 		buf_len = qdf_nbuf_get_data_len(nbuf);
203 		ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len,
204 						       create);
205 	}
206 
207 	return ret;
208 }
209 
210 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
211 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
212 						     bool lock_required)
213 {
214 	hal_ring_handle_t hal_ring_hdl;
215 	int ring;
216 
217 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
218 		hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng;
219 		hal_srng_lock(hal_ring_hdl);
220 		soc->ipa_reo_ctx_lock_required[ring] = lock_required;
221 		hal_srng_unlock(hal_ring_hdl);
222 	}
223 }
224 #else
225 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
226 						     bool lock_required)
227 {
228 }
229 
230 #endif
231 
232 #ifdef RX_DESC_MULTI_PAGE_ALLOC
233 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
234 							 struct dp_pdev *pdev,
235 							 bool create)
236 {
237 	struct rx_desc_pool *rx_pool;
238 	uint8_t pdev_id;
239 	uint32_t num_desc, page_id, offset, i;
240 	uint16_t num_desc_per_page;
241 	union dp_rx_desc_list_elem_t *rx_desc_elem;
242 	struct dp_rx_desc *rx_desc;
243 	qdf_nbuf_t nbuf;
244 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
245 
246 	if (!qdf_ipa_is_ready())
247 		return ret;
248 
249 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
250 		return ret;
251 
252 	pdev_id = pdev->pdev_id;
253 	rx_pool = &soc->rx_desc_buf[pdev_id];
254 
255 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
256 	qdf_spin_lock_bh(&rx_pool->lock);
257 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
258 	num_desc = rx_pool->pool_size;
259 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
260 	for (i = 0; i < num_desc; i++) {
261 		page_id = i / num_desc_per_page;
262 		offset = i % num_desc_per_page;
263 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
264 			break;
265 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
266 		rx_desc = &rx_desc_elem->rx_desc;
267 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
268 			continue;
269 		nbuf = rx_desc->nbuf;
270 
271 		if (qdf_unlikely(create ==
272 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
273 			if (create) {
274 				DP_STATS_INC(soc,
275 					     rx.err.ipa_smmu_map_dup, 1);
276 			} else {
277 				DP_STATS_INC(soc,
278 					     rx.err.ipa_smmu_unmap_dup, 1);
279 			}
280 			continue;
281 		}
282 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
283 
284 		ret = __dp_ipa_handle_buf_smmu_mapping(
285 				soc, nbuf, rx_pool->buf_size, create);
286 	}
287 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
288 	qdf_spin_unlock_bh(&rx_pool->lock);
289 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
290 
291 	return ret;
292 }
293 #else
294 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
295 							 struct dp_pdev *pdev,
296 							 bool create)
297 {
298 	struct rx_desc_pool *rx_pool;
299 	uint8_t pdev_id;
300 	qdf_nbuf_t nbuf;
301 	int i;
302 
303 	if (!qdf_ipa_is_ready())
304 		return QDF_STATUS_SUCCESS;
305 
306 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
307 		return QDF_STATUS_SUCCESS;
308 
309 	pdev_id = pdev->pdev_id;
310 	rx_pool = &soc->rx_desc_buf[pdev_id];
311 
312 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
313 	qdf_spin_lock_bh(&rx_pool->lock);
314 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
315 	for (i = 0; i < rx_pool->pool_size; i++) {
316 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
317 		    rx_pool->array[i].rx_desc.unmapped)
318 			continue;
319 
320 		nbuf = rx_pool->array[i].rx_desc.nbuf;
321 
322 		if (qdf_unlikely(create ==
323 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
324 			if (create) {
325 				DP_STATS_INC(soc,
326 					     rx.err.ipa_smmu_map_dup, 1);
327 			} else {
328 				DP_STATS_INC(soc,
329 					     rx.err.ipa_smmu_unmap_dup, 1);
330 			}
331 			continue;
332 		}
333 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
334 
335 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
336 						 rx_pool->buf_size, create);
337 	}
338 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
339 	qdf_spin_unlock_bh(&rx_pool->lock);
340 	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
341 
342 	return QDF_STATUS_SUCCESS;
343 }
344 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
345 
346 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
347 					     qdf_shared_mem_t *shared_mem,
348 					     void *cpu_addr,
349 					     qdf_dma_addr_t dma_addr,
350 					     uint32_t size)
351 {
352 	qdf_dma_addr_t paddr;
353 	int ret;
354 
355 	shared_mem->vaddr = cpu_addr;
356 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
357 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
358 
359 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
360 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
361 
362 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
363 				      shared_mem->vaddr, dma_addr, size);
364 	if (ret) {
365 		dp_err("Unable to get DMA sgtable");
366 		return QDF_STATUS_E_NOMEM;
367 	}
368 
369 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
370 
371 	return QDF_STATUS_SUCCESS;
372 }
373 
374 #ifdef IPA_WDI3_TX_TWO_PIPES
375 static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
376 {
377 	struct dp_ipa_resources *ipa_res;
378 	qdf_nbuf_t nbuf;
379 	int idx;
380 
381 	for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) {
382 		nbuf = (qdf_nbuf_t)
383 			soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx];
384 		if (!nbuf)
385 			continue;
386 
387 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
388 		qdf_mem_dp_tx_skb_cnt_dec();
389 		qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
390 		qdf_nbuf_free(nbuf);
391 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] =
392 						(void *)NULL;
393 	}
394 
395 	qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
396 	soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
397 
398 	ipa_res = &pdev->ipa_resource;
399 	if (!ipa_res->is_db_ddr_mapped)
400 		iounmap(ipa_res->tx_alt_comp_doorbell_vaddr);
401 
402 	qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable);
403 	qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable);
404 }
405 
406 static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
407 {
408 	uint32_t tx_buffer_count;
409 	uint32_t ring_base_align = 8;
410 	qdf_dma_addr_t buffer_paddr;
411 	struct hal_srng *wbm_srng = (struct hal_srng *)
412 			soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
413 	struct hal_srng_params srng_params;
414 	uint32_t wbm_sw0_bm_id = soc->wbm_sw0_bm_id;
415 	void *ring_entry;
416 	int num_entries;
417 	qdf_nbuf_t nbuf;
418 	int retval = QDF_STATUS_SUCCESS;
419 	int max_alloc_count = 0;
420 
421 	/*
422 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
423 	 * unsigned int uc_tx_buf_sz =
424 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
425 	 */
426 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
427 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
428 
429 	hal_get_srng_params(soc->hal_soc,
430 			    hal_srng_to_hal_ring_handle(wbm_srng),
431 			    &srng_params);
432 	num_entries = srng_params.num_entries;
433 
434 	max_alloc_count =
435 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
436 	if (max_alloc_count <= 0) {
437 		dp_err("incorrect value for buffer count %u", max_alloc_count);
438 		return -EINVAL;
439 	}
440 
441 	dp_info("requested %d buffers to be posted to wbm ring",
442 		max_alloc_count);
443 
444 	soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned =
445 		qdf_mem_malloc(num_entries *
446 		sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned));
447 	if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) {
448 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
449 		return -ENOMEM;
450 	}
451 
452 	hal_srng_access_start_unlocked(soc->hal_soc,
453 				       hal_srng_to_hal_ring_handle(wbm_srng));
454 
455 	/*
456 	 * Allocate Tx buffers as many as possible.
457 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
458 	 * Populate Tx buffers into WBM2IPA ring
459 	 * This initial buffer population will simulate H/W as source ring,
460 	 * and update HP
461 	 */
462 	for (tx_buffer_count = 0;
463 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
464 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
465 		if (!nbuf)
466 			break;
467 
468 		ring_entry = hal_srng_dst_get_next_hp(
469 				soc->hal_soc,
470 				hal_srng_to_hal_ring_handle(wbm_srng));
471 		if (!ring_entry) {
472 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
473 				  "%s: Failed to get WBM ring entry",
474 				  __func__);
475 			qdf_nbuf_free(nbuf);
476 			break;
477 		}
478 
479 		qdf_nbuf_map_single(soc->osdev, nbuf,
480 				    QDF_DMA_BIDIRECTIONAL);
481 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
482 		qdf_mem_dp_tx_skb_cnt_inc();
483 		qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
484 
485 		hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
486 					     buffer_paddr, 0,
487 					     HAL_WBM_SW4_BM_ID(wbm_sw0_bm_id));
488 
489 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[
490 			tx_buffer_count] = (void *)nbuf;
491 	}
492 
493 	hal_srng_access_end_unlocked(soc->hal_soc,
494 				     hal_srng_to_hal_ring_handle(wbm_srng));
495 
496 	soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count;
497 
498 	if (tx_buffer_count) {
499 		dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count);
500 	} else {
501 		dp_err("Failed to allocate IPA TX buffer pool2");
502 		qdf_mem_free(
503 			soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
504 		soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
505 		retval = -ENOMEM;
506 	}
507 
508 	return retval;
509 }
510 
511 static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
512 {
513 	struct dp_soc *soc = pdev->soc;
514 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
515 
516 	ipa_res->tx_alt_ring_num_alloc_buffer =
517 		(uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt;
518 
519 	dp_ipa_get_shared_mem_info(
520 			soc->osdev, &ipa_res->tx_alt_ring,
521 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
522 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
523 			soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
524 
525 	dp_ipa_get_shared_mem_info(
526 			soc->osdev, &ipa_res->tx_alt_comp_ring,
527 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
528 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
529 			soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
530 
531 	if (!qdf_mem_get_dma_addr(soc->osdev,
532 				  &ipa_res->tx_alt_comp_ring.mem_info))
533 		return QDF_STATUS_E_FAILURE;
534 
535 	return QDF_STATUS_SUCCESS;
536 }
537 
538 static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
539 {
540 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
541 	struct hal_srng *hal_srng;
542 	struct hal_srng_params srng_params;
543 	unsigned long addr_offset, dev_base_paddr;
544 
545 	/* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */
546 	hal_srng = (struct hal_srng *)
547 		soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng;
548 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
549 			    hal_srng_to_hal_ring_handle(hal_srng),
550 			    &srng_params);
551 
552 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr =
553 		srng_params.ring_base_paddr;
554 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr =
555 		srng_params.ring_base_vaddr;
556 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size =
557 		(srng_params.num_entries * srng_params.entry_size) << 2;
558 	/*
559 	 * For the register backed memory addresses, use the scn->mem_pa to
560 	 * calculate the physical address of the shadow registers
561 	 */
562 	dev_base_paddr =
563 		(unsigned long)
564 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
565 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
566 		      (unsigned long)(hal_soc->dev_base_addr);
567 	soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr =
568 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
569 
570 	dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
571 		(unsigned int)addr_offset,
572 		(unsigned int)dev_base_paddr,
573 		(unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr),
574 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
575 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
576 		srng_params.num_entries,
577 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
578 
579 	/* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */
580 	hal_srng = (struct hal_srng *)
581 		soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
582 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
583 			    hal_srng_to_hal_ring_handle(hal_srng),
584 			    &srng_params);
585 
586 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr =
587 						srng_params.ring_base_paddr;
588 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr =
589 						srng_params.ring_base_vaddr;
590 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size =
591 		(srng_params.num_entries * srng_params.entry_size) << 2;
592 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr =
593 		hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
594 				     hal_srng_to_hal_ring_handle(hal_srng));
595 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
596 		      (unsigned long)(hal_soc->dev_base_addr);
597 	soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr =
598 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
599 
600 	dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
601 		(unsigned int)addr_offset,
602 		(unsigned int)dev_base_paddr,
603 		(unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr),
604 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
605 		(void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
606 		srng_params.num_entries,
607 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
608 }
609 
610 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
611 {
612 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
613 	uint32_t rx_ready_doorbell_dmaaddr;
614 	uint32_t tx_comp_doorbell_dmaaddr;
615 	struct dp_soc *soc = pdev->soc;
616 	int ret = 0;
617 
618 	if (ipa_res->is_db_ddr_mapped)
619 		ipa_res->tx_comp_doorbell_vaddr =
620 				phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
621 	else
622 		ipa_res->tx_comp_doorbell_vaddr =
623 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
624 
625 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
626 		ret = pld_smmu_map(soc->osdev->dev,
627 				   ipa_res->tx_comp_doorbell_paddr,
628 				   &tx_comp_doorbell_dmaaddr,
629 				   sizeof(uint32_t));
630 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
631 		qdf_assert_always(!ret);
632 
633 		ret = pld_smmu_map(soc->osdev->dev,
634 				   ipa_res->rx_ready_doorbell_paddr,
635 				   &rx_ready_doorbell_dmaaddr,
636 				   sizeof(uint32_t));
637 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
638 		qdf_assert_always(!ret);
639 	}
640 
641 	/* Setup for alternative TX pipe */
642 	if (!ipa_res->tx_alt_comp_doorbell_paddr)
643 		return;
644 
645 	if (ipa_res->is_db_ddr_mapped)
646 		ipa_res->tx_alt_comp_doorbell_vaddr =
647 			phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr);
648 	else
649 		ipa_res->tx_alt_comp_doorbell_vaddr =
650 			ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4);
651 
652 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
653 		ret = pld_smmu_map(soc->osdev->dev,
654 				   ipa_res->tx_alt_comp_doorbell_paddr,
655 				   &tx_comp_doorbell_dmaaddr,
656 				   sizeof(uint32_t));
657 		ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
658 		qdf_assert_always(!ret);
659 	}
660 }
661 
662 static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
663 {
664 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
665 	struct dp_soc *soc = pdev->soc;
666 	int ret = 0;
667 
668 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
669 		return;
670 
671 	/* Unmap must be in reverse order of map */
672 	if (ipa_res->tx_alt_comp_doorbell_paddr) {
673 		ret = pld_smmu_unmap(soc->osdev->dev,
674 				     ipa_res->tx_alt_comp_doorbell_paddr,
675 				     sizeof(uint32_t));
676 		qdf_assert_always(!ret);
677 	}
678 
679 	ret = pld_smmu_unmap(soc->osdev->dev,
680 			     ipa_res->rx_ready_doorbell_paddr,
681 			     sizeof(uint32_t));
682 	qdf_assert_always(!ret);
683 
684 	ret = pld_smmu_unmap(soc->osdev->dev,
685 			     ipa_res->tx_comp_doorbell_paddr,
686 			     sizeof(uint32_t));
687 	qdf_assert_always(!ret);
688 }
689 
690 static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
691 						 struct dp_pdev *pdev,
692 						 bool create)
693 {
694 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
695 	struct ipa_dp_tx_rsc *rsc;
696 	uint32_t tx_buffer_cnt;
697 	uint32_t buf_len;
698 	qdf_nbuf_t nbuf;
699 	uint32_t index;
700 
701 	if (!ipa_is_ready()) {
702 		dp_info("IPA is not READY");
703 		return QDF_STATUS_SUCCESS;
704 	}
705 
706 	rsc = &soc->ipa_uc_tx_rsc_alt;
707 	tx_buffer_cnt = rsc->alloc_tx_buf_cnt;
708 
709 	for (index = 0; index < tx_buffer_cnt; index++) {
710 		nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index];
711 		if (!nbuf)
712 			continue;
713 
714 		buf_len = qdf_nbuf_get_data_len(nbuf);
715 		ret = __dp_ipa_handle_buf_smmu_mapping(
716 				soc, nbuf, buf_len, create);
717 	}
718 
719 	return ret;
720 }
721 
722 static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc,
723 					  struct dp_ipa_resources *ipa_res,
724 					  qdf_ipa_wdi_pipe_setup_info_t *tx)
725 {
726 	struct tcl_data_cmd *tcl_desc_ptr;
727 	uint8_t *desc_addr;
728 	uint32_t desc_size;
729 
730 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1;
731 
732 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
733 		qdf_mem_get_dma_addr(soc->osdev,
734 				     &ipa_res->tx_alt_comp_ring.mem_info);
735 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
736 		qdf_mem_get_dma_size(soc->osdev,
737 				     &ipa_res->tx_alt_comp_ring.mem_info);
738 
739 	/* WBM Tail Pointer Address */
740 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
741 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
742 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
743 
744 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
745 		qdf_mem_get_dma_addr(soc->osdev,
746 				     &ipa_res->tx_alt_ring.mem_info);
747 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
748 		qdf_mem_get_dma_size(soc->osdev,
749 				     &ipa_res->tx_alt_ring.mem_info);
750 
751 	/* TCL Head Pointer Address */
752 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
753 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
754 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
755 
756 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
757 		ipa_res->tx_alt_ring_num_alloc_buffer;
758 
759 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
760 
761 	/* Preprogram TCL descriptor */
762 	desc_addr =
763 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
764 	desc_size = sizeof(struct tcl_data_cmd);
765 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
766 	tcl_desc_ptr = (struct tcl_data_cmd *)
767 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
768 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
769 				HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id);
770 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
771 	tcl_desc_ptr->addry_en = 1;	/* Address X search enable in ASE */
772 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
773 	tcl_desc_ptr->packet_offset = 0;	/* padding for alignment */
774 }
775 
776 static void
777 dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc,
778 				   struct dp_ipa_resources *ipa_res,
779 				   qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
780 {
781 	struct tcl_data_cmd *tcl_desc_ptr;
782 	uint8_t *desc_addr;
783 	uint32_t desc_size;
784 
785 	QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1;
786 
787 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
788 		     &ipa_res->tx_alt_comp_ring.sgtable,
789 		     sizeof(sgtable_t));
790 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
791 		qdf_mem_get_dma_size(soc->osdev,
792 				     &ipa_res->tx_alt_comp_ring.mem_info);
793 	/* WBM Tail Pointer Address */
794 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
795 		soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
796 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
797 
798 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
799 		     &ipa_res->tx_alt_ring.sgtable,
800 		     sizeof(sgtable_t));
801 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
802 		qdf_mem_get_dma_size(soc->osdev,
803 				     &ipa_res->tx_alt_ring.mem_info);
804 	/* TCL Head Pointer Address */
805 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
806 		soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
807 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
808 
809 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
810 		ipa_res->tx_alt_ring_num_alloc_buffer;
811 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
812 
813 	/* Preprogram TCL descriptor */
814 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
815 			tx_smmu);
816 	desc_size = sizeof(struct tcl_data_cmd);
817 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
818 	tcl_desc_ptr = (struct tcl_data_cmd *)
819 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
820 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
821 					HAL_WBM_SW4_BM_ID(soc->wbm_sw0_bm_id);
822 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
823 	tcl_desc_ptr->addry_en = 1;	/* Address Y search enable in ASE */
824 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
825 	tcl_desc_ptr->packet_offset = 0;	/* padding for alignment */
826 }
827 
828 static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc,
829 				     struct dp_ipa_resources *res,
830 				     qdf_ipa_wdi_conn_in_params_t *in)
831 {
832 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL;
833 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
834 	qdf_ipa_ep_cfg_t *tx_cfg;
835 
836 	QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true;
837 
838 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
839 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in);
840 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
841 		dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu);
842 	} else {
843 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in);
844 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx);
845 		dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx);
846 	}
847 
848 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
849 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
850 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
851 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
852 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
853 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
854 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
855 }
856 
857 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
858 			       qdf_ipa_wdi_conn_out_params_t *out)
859 {
860 	res->tx_comp_doorbell_paddr =
861 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
862 	res->rx_ready_doorbell_paddr =
863 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
864 	res->tx_alt_comp_doorbell_paddr =
865 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out);
866 }
867 
868 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
869 					  uint8_t session_id)
870 {
871 	bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT;
872 
873 	session_id = session_id >> IPA_SESSION_ID_SHIFT;
874 	dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface);
875 
876 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
877 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface;
878 }
879 
880 static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
881 					struct dp_ipa_resources *res)
882 {
883 	struct hal_srng *wbm_srng;
884 
885 	/* Init first TX comp ring */
886 	wbm_srng = (struct hal_srng *)
887 		soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
888 
889 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
890 			     res->tx_comp_doorbell_vaddr);
891 
892 	/* Init the alternate TX comp ring */
893 	wbm_srng = (struct hal_srng *)
894 		soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
895 
896 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
897 			     res->tx_alt_comp_doorbell_vaddr);
898 }
899 
900 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
901 					 struct dp_ipa_resources *ipa_res)
902 {
903 	struct hal_srng *wbm_srng;
904 
905 	wbm_srng = (struct hal_srng *)
906 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
907 
908 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
909 					  ipa_res->tx_comp_doorbell_paddr);
910 
911 	dp_info("paddr %pK vaddr %pK",
912 		(void *)ipa_res->tx_comp_doorbell_paddr,
913 		(void *)ipa_res->tx_comp_doorbell_vaddr);
914 
915 	/* Setup for alternative TX comp ring */
916 	wbm_srng = (struct hal_srng *)
917 			soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
918 
919 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
920 					  ipa_res->tx_alt_comp_doorbell_paddr);
921 
922 	dp_info("paddr %pK vaddr %pK",
923 		(void *)ipa_res->tx_alt_comp_doorbell_paddr,
924 		(void *)ipa_res->tx_alt_comp_doorbell_vaddr);
925 }
926 
927 #ifdef IPA_SET_RESET_TX_DB_PA
928 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
929 					      struct dp_ipa_resources *ipa_res)
930 {
931 	hal_ring_handle_t wbm_srng;
932 	qdf_dma_addr_t hp_addr;
933 
934 	wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
935 	if (!wbm_srng)
936 		return QDF_STATUS_E_FAILURE;
937 
938 	hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
939 
940 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
941 
942 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
943 
944 	/* Reset alternative TX comp ring */
945 	wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
946 	if (!wbm_srng)
947 		return QDF_STATUS_E_FAILURE;
948 
949 	hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr;
950 
951 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
952 
953 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
954 
955 	return QDF_STATUS_SUCCESS;
956 }
957 #endif /* IPA_SET_RESET_TX_DB_PA */
958 
959 #else /* !IPA_WDI3_TX_TWO_PIPES */
960 
961 static inline
962 void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
963 {
964 }
965 
966 static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
967 {
968 }
969 
970 static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
971 {
972 	return 0;
973 }
974 
975 static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
976 {
977 	return QDF_STATUS_SUCCESS;
978 }
979 
980 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
981 {
982 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
983 	uint32_t rx_ready_doorbell_dmaaddr;
984 	uint32_t tx_comp_doorbell_dmaaddr;
985 	struct dp_soc *soc = pdev->soc;
986 	int ret = 0;
987 
988 	if (ipa_res->is_db_ddr_mapped)
989 		ipa_res->tx_comp_doorbell_vaddr =
990 				phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
991 	else
992 		ipa_res->tx_comp_doorbell_vaddr =
993 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
994 
995 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
996 		ret = pld_smmu_map(soc->osdev->dev,
997 				   ipa_res->tx_comp_doorbell_paddr,
998 				   &tx_comp_doorbell_dmaaddr,
999 				   sizeof(uint32_t));
1000 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
1001 		qdf_assert_always(!ret);
1002 
1003 		ret = pld_smmu_map(soc->osdev->dev,
1004 				   ipa_res->rx_ready_doorbell_paddr,
1005 				   &rx_ready_doorbell_dmaaddr,
1006 				   sizeof(uint32_t));
1007 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
1008 		qdf_assert_always(!ret);
1009 	}
1010 }
1011 
1012 static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
1013 {
1014 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1015 	struct dp_soc *soc = pdev->soc;
1016 	int ret = 0;
1017 
1018 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
1019 		return;
1020 
1021 	ret = pld_smmu_unmap(soc->osdev->dev,
1022 			     ipa_res->rx_ready_doorbell_paddr,
1023 			     sizeof(uint32_t));
1024 	qdf_assert_always(!ret);
1025 
1026 	ret = pld_smmu_unmap(soc->osdev->dev,
1027 			     ipa_res->tx_comp_doorbell_paddr,
1028 			     sizeof(uint32_t));
1029 	qdf_assert_always(!ret);
1030 }
1031 
1032 static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
1033 							struct dp_pdev *pdev,
1034 							bool create)
1035 {
1036 	return QDF_STATUS_SUCCESS;
1037 }
1038 
1039 static inline
1040 void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res,
1041 			      qdf_ipa_wdi_conn_in_params_t *in)
1042 {
1043 }
1044 
1045 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
1046 			       qdf_ipa_wdi_conn_out_params_t *out)
1047 {
1048 	res->tx_comp_doorbell_paddr =
1049 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
1050 	res->rx_ready_doorbell_paddr =
1051 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
1052 }
1053 
1054 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
1055 					  uint8_t session_id)
1056 {
1057 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
1058 }
1059 
1060 static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
1061 					       struct dp_ipa_resources *res)
1062 {
1063 	struct hal_srng *wbm_srng = (struct hal_srng *)
1064 		soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1065 
1066 	hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
1067 			     res->tx_comp_doorbell_vaddr);
1068 }
1069 
1070 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
1071 					 struct dp_ipa_resources *ipa_res)
1072 {
1073 	struct hal_srng *wbm_srng = (struct hal_srng *)
1074 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1075 
1076 	hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
1077 					  ipa_res->tx_comp_doorbell_paddr);
1078 
1079 	dp_info("paddr %pK vaddr %pK",
1080 		(void *)ipa_res->tx_comp_doorbell_paddr,
1081 		(void *)ipa_res->tx_comp_doorbell_vaddr);
1082 }
1083 
1084 #ifdef IPA_SET_RESET_TX_DB_PA
1085 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
1086 					      struct dp_ipa_resources *ipa_res)
1087 {
1088 	hal_ring_handle_t wbm_srng =
1089 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1090 	qdf_dma_addr_t hp_addr;
1091 
1092 	if (!wbm_srng)
1093 		return QDF_STATUS_E_FAILURE;
1094 
1095 	hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
1096 
1097 	hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1098 
1099 	dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1100 
1101 	return QDF_STATUS_SUCCESS;
1102 }
1103 #endif /* IPA_SET_RESET_TX_DB_PA */
1104 
1105 #endif /* IPA_WDI3_TX_TWO_PIPES */
1106 
1107 /**
1108  * dp_tx_ipa_uc_detach - Free autonomy TX resources
1109  * @soc: data path instance
1110  * @pdev: core txrx pdev context
1111  *
1112  * Free allocated TX buffers with WBM SRNG
1113  *
1114  * Return: none
1115  */
1116 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1117 {
1118 	int idx;
1119 	qdf_nbuf_t nbuf;
1120 	struct dp_ipa_resources *ipa_res;
1121 
1122 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1123 		nbuf = (qdf_nbuf_t)
1124 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
1125 		if (!nbuf)
1126 			continue;
1127 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
1128 		qdf_mem_dp_tx_skb_cnt_dec();
1129 		qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
1130 		qdf_nbuf_free(nbuf);
1131 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
1132 						(void *)NULL;
1133 	}
1134 
1135 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1136 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1137 
1138 	ipa_res = &pdev->ipa_resource;
1139 	if (!ipa_res->is_db_ddr_mapped)
1140 		iounmap(ipa_res->tx_comp_doorbell_vaddr);
1141 
1142 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
1143 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
1144 }
1145 
1146 /**
1147  * dp_rx_ipa_uc_detach - free autonomy RX resources
1148  * @soc: data path instance
1149  * @pdev: core txrx pdev context
1150  *
1151  * This function will detach DP RX into main device context
1152  * will free DP Rx resources.
1153  *
1154  * Return: none
1155  */
1156 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1157 {
1158 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1159 
1160 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
1161 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
1162 }
1163 
1164 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1165 {
1166 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1167 		return QDF_STATUS_SUCCESS;
1168 
1169 	/* TX resource detach */
1170 	dp_tx_ipa_uc_detach(soc, pdev);
1171 
1172 	/* Cleanup 2nd TX pipe resources */
1173 	dp_ipa_tx_alt_pool_detach(soc, pdev);
1174 
1175 	/* RX resource detach */
1176 	dp_rx_ipa_uc_detach(soc, pdev);
1177 
1178 	return QDF_STATUS_SUCCESS;	/* success */
1179 }
1180 
1181 /**
1182  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
1183  * @soc: data path instance
1184  * @pdev: Physical device handle
1185  *
1186  * Allocate TX buffer from non-cacheable memory
1187  * Attache allocated TX buffers with WBM SRNG
1188  *
1189  * Return: int
1190  */
1191 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1192 {
1193 	uint32_t tx_buffer_count;
1194 	uint32_t ring_base_align = 8;
1195 	qdf_dma_addr_t buffer_paddr;
1196 	struct hal_srng *wbm_srng = (struct hal_srng *)
1197 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1198 	struct hal_srng_params srng_params;
1199 	void *ring_entry;
1200 	int num_entries;
1201 	qdf_nbuf_t nbuf;
1202 	int retval = QDF_STATUS_SUCCESS;
1203 	int max_alloc_count = 0;
1204 
1205 	/*
1206 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
1207 	 * unsigned int uc_tx_buf_sz =
1208 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
1209 	 */
1210 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
1211 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
1212 
1213 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
1214 			    &srng_params);
1215 	num_entries = srng_params.num_entries;
1216 
1217 	max_alloc_count =
1218 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
1219 	if (max_alloc_count <= 0) {
1220 		dp_err("incorrect value for buffer count %u", max_alloc_count);
1221 		return -EINVAL;
1222 	}
1223 
1224 	dp_info("requested %d buffers to be posted to wbm ring",
1225 		max_alloc_count);
1226 
1227 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
1228 		qdf_mem_malloc(num_entries *
1229 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
1230 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
1231 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
1232 		return -ENOMEM;
1233 	}
1234 
1235 	hal_srng_access_start_unlocked(soc->hal_soc,
1236 				       hal_srng_to_hal_ring_handle(wbm_srng));
1237 
1238 	/*
1239 	 * Allocate Tx buffers as many as possible.
1240 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
1241 	 * Populate Tx buffers into WBM2IPA ring
1242 	 * This initial buffer population will simulate H/W as source ring,
1243 	 * and update HP
1244 	 */
1245 	for (tx_buffer_count = 0;
1246 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
1247 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
1248 		if (!nbuf)
1249 			break;
1250 
1251 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
1252 				hal_srng_to_hal_ring_handle(wbm_srng));
1253 		if (!ring_entry) {
1254 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1255 				  "%s: Failed to get WBM ring entry",
1256 				  __func__);
1257 			qdf_nbuf_free(nbuf);
1258 			break;
1259 		}
1260 
1261 		qdf_nbuf_map_single(soc->osdev, nbuf,
1262 				    QDF_DMA_BIDIRECTIONAL);
1263 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1264 		qdf_mem_dp_tx_skb_cnt_inc();
1265 		qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
1266 
1267 		/*
1268 		 * TODO - WCN7850 code can directly call the be handler
1269 		 * instead of hal soc ops.
1270 		 */
1271 		hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
1272 					     buffer_paddr, 0,
1273 					     (IPA_TCL_DATA_RING_IDX +
1274 					      soc->wbm_sw0_bm_id));
1275 
1276 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
1277 			= (void *)nbuf;
1278 	}
1279 
1280 	hal_srng_access_end_unlocked(soc->hal_soc,
1281 				     hal_srng_to_hal_ring_handle(wbm_srng));
1282 
1283 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
1284 
1285 	if (tx_buffer_count) {
1286 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
1287 	} else {
1288 		dp_err("No IPA WDI TX buffer allocated!");
1289 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1290 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1291 		retval = -ENOMEM;
1292 	}
1293 
1294 	return retval;
1295 }
1296 
1297 /**
1298  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
1299  * @soc: data path instance
1300  * @pdev: core txrx pdev context
1301  *
1302  * This function will attach a DP RX instance into the main
1303  * device (SOC) context.
1304  *
1305  * Return: QDF_STATUS_SUCCESS: success
1306  *         QDF_STATUS_E_RESOURCES: Error return
1307  */
1308 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1309 {
1310 	return QDF_STATUS_SUCCESS;
1311 }
1312 
1313 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1314 {
1315 	int error;
1316 
1317 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1318 		return QDF_STATUS_SUCCESS;
1319 
1320 	/* TX resource attach */
1321 	error = dp_tx_ipa_uc_attach(soc, pdev);
1322 	if (error) {
1323 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1324 			  "%s: DP IPA UC TX attach fail code %d",
1325 			  __func__, error);
1326 		return error;
1327 	}
1328 
1329 	/* Setup 2nd TX pipe */
1330 	error = dp_ipa_tx_alt_pool_attach(soc);
1331 	if (error) {
1332 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1333 			  "%s: DP IPA TX pool2 attach fail code %d",
1334 			  __func__, error);
1335 		dp_tx_ipa_uc_detach(soc, pdev);
1336 		return error;
1337 	}
1338 
1339 	/* RX resource attach */
1340 	error = dp_rx_ipa_uc_attach(soc, pdev);
1341 	if (error) {
1342 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1343 			  "%s: DP IPA UC RX attach fail code %d",
1344 			  __func__, error);
1345 		dp_ipa_tx_alt_pool_detach(soc, pdev);
1346 		dp_tx_ipa_uc_detach(soc, pdev);
1347 		return error;
1348 	}
1349 
1350 	return QDF_STATUS_SUCCESS;	/* success */
1351 }
1352 
1353 /*
1354  * dp_ipa_ring_resource_setup() - setup IPA ring resources
1355  * @soc: data path SoC handle
1356  *
1357  * Return: none
1358  */
1359 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
1360 		struct dp_pdev *pdev)
1361 {
1362 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
1363 	struct hal_srng *hal_srng;
1364 	struct hal_srng_params srng_params;
1365 	qdf_dma_addr_t hp_addr;
1366 	unsigned long addr_offset, dev_base_paddr;
1367 	uint32_t ix0;
1368 	uint8_t ix0_map[8];
1369 
1370 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1371 		return QDF_STATUS_SUCCESS;
1372 
1373 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
1374 	hal_srng = (struct hal_srng *)
1375 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
1376 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1377 			    hal_srng_to_hal_ring_handle(hal_srng),
1378 			    &srng_params);
1379 
1380 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
1381 		srng_params.ring_base_paddr;
1382 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
1383 		srng_params.ring_base_vaddr;
1384 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
1385 		(srng_params.num_entries * srng_params.entry_size) << 2;
1386 	/*
1387 	 * For the register backed memory addresses, use the scn->mem_pa to
1388 	 * calculate the physical address of the shadow registers
1389 	 */
1390 	dev_base_paddr =
1391 		(unsigned long)
1392 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
1393 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
1394 		      (unsigned long)(hal_soc->dev_base_addr);
1395 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
1396 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1397 
1398 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1399 		(unsigned int)addr_offset,
1400 		(unsigned int)dev_base_paddr,
1401 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
1402 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1403 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1404 		srng_params.num_entries,
1405 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1406 
1407 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
1408 	hal_srng = (struct hal_srng *)
1409 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1410 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1411 			    hal_srng_to_hal_ring_handle(hal_srng),
1412 			    &srng_params);
1413 
1414 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
1415 						srng_params.ring_base_paddr;
1416 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
1417 						srng_params.ring_base_vaddr;
1418 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
1419 		(srng_params.num_entries * srng_params.entry_size) << 2;
1420 	soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr =
1421 		hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1422 				     hal_srng_to_hal_ring_handle(hal_srng));
1423 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1424 		      (unsigned long)(hal_soc->dev_base_addr);
1425 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
1426 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1427 
1428 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
1429 		(unsigned int)addr_offset,
1430 		(unsigned int)dev_base_paddr,
1431 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
1432 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1433 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1434 		srng_params.num_entries,
1435 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1436 
1437 	dp_ipa_tx_alt_ring_resource_setup(soc);
1438 
1439 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
1440 	hal_srng = (struct hal_srng *)
1441 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
1442 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1443 			    hal_srng_to_hal_ring_handle(hal_srng),
1444 			    &srng_params);
1445 
1446 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
1447 						srng_params.ring_base_paddr;
1448 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
1449 						srng_params.ring_base_vaddr;
1450 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
1451 		(srng_params.num_entries * srng_params.entry_size) << 2;
1452 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1453 		      (unsigned long)(hal_soc->dev_base_addr);
1454 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
1455 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1456 
1457 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1458 		(unsigned int)addr_offset,
1459 		(unsigned int)dev_base_paddr,
1460 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
1461 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1462 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1463 		srng_params.num_entries,
1464 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1465 
1466 	hal_srng = (struct hal_srng *)
1467 			pdev->rx_refill_buf_ring2.hal_srng;
1468 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1469 			    hal_srng_to_hal_ring_handle(hal_srng),
1470 			    &srng_params);
1471 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
1472 		srng_params.ring_base_paddr;
1473 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
1474 		srng_params.ring_base_vaddr;
1475 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
1476 		(srng_params.num_entries * srng_params.entry_size) << 2;
1477 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1478 				       hal_srng_to_hal_ring_handle(hal_srng));
1479 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
1480 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
1481 
1482 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1483 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
1484 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1485 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1486 		srng_params.num_entries,
1487 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1488 
1489 	/*
1490 	 * Set DEST_RING_MAPPING_4 to SW2 as default value for
1491 	 * DESTINATION_RING_CTRL_IX_0.
1492 	 */
1493 	ix0_map[0] = REO_REMAP_TCL;
1494 	ix0_map[1] = REO_REMAP_SW1;
1495 	ix0_map[2] = REO_REMAP_SW2;
1496 	ix0_map[3] = REO_REMAP_SW3;
1497 	ix0_map[4] = REO_REMAP_SW2;
1498 	ix0_map[5] = REO_REMAP_RELEASE;
1499 	ix0_map[6] = REO_REMAP_FW;
1500 	ix0_map[7] = REO_REMAP_FW;
1501 
1502 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1503 				    ix0_map);
1504 
1505 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
1506 
1507 	return 0;
1508 }
1509 
1510 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1511 {
1512 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1513 	struct dp_pdev *pdev =
1514 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1515 	struct dp_ipa_resources *ipa_res;
1516 
1517 	if (!pdev) {
1518 		dp_err("Invalid instance");
1519 		return QDF_STATUS_E_FAILURE;
1520 	}
1521 
1522 	ipa_res = &pdev->ipa_resource;
1523 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1524 		return QDF_STATUS_SUCCESS;
1525 
1526 	ipa_res->tx_num_alloc_buffer =
1527 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
1528 
1529 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
1530 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1531 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1532 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1533 
1534 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
1535 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1536 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1537 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1538 
1539 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
1540 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1541 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1542 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1543 
1544 	dp_ipa_get_shared_mem_info(
1545 			soc->osdev, &ipa_res->rx_refill_ring,
1546 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1547 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1548 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1549 
1550 	if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) ||
1551 	    !qdf_mem_get_dma_addr(soc->osdev,
1552 				  &ipa_res->tx_comp_ring.mem_info) ||
1553 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) ||
1554 	    !qdf_mem_get_dma_addr(soc->osdev,
1555 				  &ipa_res->rx_refill_ring.mem_info))
1556 		return QDF_STATUS_E_FAILURE;
1557 
1558 	if (dp_ipa_tx_alt_ring_get_resource(pdev))
1559 		return QDF_STATUS_E_FAILURE;
1560 
1561 	return QDF_STATUS_SUCCESS;
1562 }
1563 
1564 #ifdef IPA_SET_RESET_TX_DB_PA
1565 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res)
1566 #else
1567 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \
1568 		dp_ipa_set_tx_doorbell_paddr(soc, ipa_res)
1569 #endif
1570 
1571 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1572 {
1573 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1574 	struct dp_pdev *pdev =
1575 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1576 	struct dp_ipa_resources *ipa_res;
1577 	struct hal_srng *reo_srng = (struct hal_srng *)
1578 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
1579 
1580 	if (!pdev) {
1581 		dp_err("Invalid instance");
1582 		return QDF_STATUS_E_FAILURE;
1583 	}
1584 
1585 	ipa_res = &pdev->ipa_resource;
1586 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1587 		return QDF_STATUS_SUCCESS;
1588 
1589 	dp_ipa_map_ring_doorbell_paddr(pdev);
1590 
1591 	DP_IPA_SET_TX_DB_PADDR(soc, ipa_res);
1592 
1593 	/*
1594 	 * For RX, REO module on Napier/Hastings does reordering on incoming
1595 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
1596 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
1597 	 * to IPA.
1598 	 * Set the doorbell addr for the REO ring.
1599 	 */
1600 	hal_srng_dst_set_hp_paddr_confirm(reo_srng,
1601 					  ipa_res->rx_ready_doorbell_paddr);
1602 	return QDF_STATUS_SUCCESS;
1603 }
1604 
1605 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1606 			      uint8_t *op_msg)
1607 {
1608 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1609 	struct dp_pdev *pdev =
1610 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1611 
1612 	if (!pdev) {
1613 		dp_err("Invalid instance");
1614 		return QDF_STATUS_E_FAILURE;
1615 	}
1616 
1617 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
1618 		return QDF_STATUS_SUCCESS;
1619 
1620 	if (pdev->ipa_uc_op_cb) {
1621 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
1622 	} else {
1623 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1624 		    "%s: IPA callback function is not registered", __func__);
1625 		qdf_mem_free(op_msg);
1626 		return QDF_STATUS_E_FAILURE;
1627 	}
1628 
1629 	return QDF_STATUS_SUCCESS;
1630 }
1631 
1632 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1633 				 ipa_uc_op_cb_type op_cb,
1634 				 void *usr_ctxt)
1635 {
1636 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1637 	struct dp_pdev *pdev =
1638 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1639 
1640 	if (!pdev) {
1641 		dp_err("Invalid instance");
1642 		return QDF_STATUS_E_FAILURE;
1643 	}
1644 
1645 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
1646 		return QDF_STATUS_SUCCESS;
1647 
1648 	pdev->ipa_uc_op_cb = op_cb;
1649 	pdev->usr_ctxt = usr_ctxt;
1650 
1651 	return QDF_STATUS_SUCCESS;
1652 }
1653 
1654 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1655 {
1656 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1657 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1658 
1659 	if (!pdev) {
1660 		dp_err("Invalid instance");
1661 		return;
1662 	}
1663 
1664 	dp_debug("Deregister OP handler callback");
1665 	pdev->ipa_uc_op_cb = NULL;
1666 	pdev->usr_ctxt = NULL;
1667 }
1668 
1669 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1670 {
1671 	/* TBD */
1672 	return QDF_STATUS_SUCCESS;
1673 }
1674 
1675 /**
1676  * dp_tx_send_ipa_data_frame() - send IPA data frame
1677  * @soc_hdl: datapath soc handle
1678  * @vdev_id: id of the virtual device
1679  * @skb: skb to transmit
1680  *
1681  * Return: skb/ NULL is for success
1682  */
1683 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1684 				     qdf_nbuf_t skb)
1685 {
1686 	qdf_nbuf_t ret;
1687 
1688 	/* Terminate the (single-element) list of tx frames */
1689 	qdf_nbuf_set_next(skb, NULL);
1690 	ret = dp_tx_send(soc_hdl, vdev_id, skb);
1691 	if (ret) {
1692 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1693 			  "%s: Failed to tx", __func__);
1694 		return ret;
1695 	}
1696 
1697 	return NULL;
1698 }
1699 
1700 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1701 {
1702 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1703 	struct dp_pdev *pdev =
1704 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1705 	uint32_t ix0;
1706 	uint32_t ix2;
1707 	uint8_t ix_map[8];
1708 
1709 	if (!pdev) {
1710 		dp_err("Invalid instance");
1711 		return QDF_STATUS_E_FAILURE;
1712 	}
1713 
1714 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1715 		return QDF_STATUS_SUCCESS;
1716 
1717 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
1718 		return QDF_STATUS_E_AGAIN;
1719 
1720 	/* Call HAL API to remap REO rings to REO2IPA ring */
1721 	ix_map[0] = REO_REMAP_TCL;
1722 	ix_map[1] = REO_REMAP_SW4;
1723 	ix_map[2] = REO_REMAP_SW1;
1724 	ix_map[3] = REO_REMAP_SW4;
1725 	ix_map[4] = REO_REMAP_SW4;
1726 	ix_map[5] = REO_REMAP_RELEASE;
1727 	ix_map[6] = REO_REMAP_FW;
1728 	ix_map[7] = REO_REMAP_FW;
1729 
1730 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1731 				    ix_map);
1732 
1733 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
1734 		ix_map[0] = REO_REMAP_SW4;
1735 		ix_map[1] = REO_REMAP_SW4;
1736 		ix_map[2] = REO_REMAP_SW4;
1737 		ix_map[3] = REO_REMAP_SW4;
1738 		ix_map[4] = REO_REMAP_SW4;
1739 		ix_map[5] = REO_REMAP_SW4;
1740 		ix_map[6] = REO_REMAP_SW4;
1741 		ix_map[7] = REO_REMAP_SW4;
1742 
1743 		ix2 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX2,
1744 					    ix_map);
1745 
1746 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1747 					   &ix2, &ix2);
1748 		dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
1749 	} else {
1750 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1751 					   NULL, NULL);
1752 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
1753 	}
1754 
1755 	return QDF_STATUS_SUCCESS;
1756 }
1757 
1758 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1759 {
1760 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1761 	struct dp_pdev *pdev =
1762 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1763 	uint8_t ix0_map[8];
1764 	uint32_t ix0;
1765 	uint32_t ix2;
1766 	uint32_t ix3;
1767 
1768 	if (!pdev) {
1769 		dp_err("Invalid instance");
1770 		return QDF_STATUS_E_FAILURE;
1771 	}
1772 
1773 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1774 		return QDF_STATUS_SUCCESS;
1775 
1776 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
1777 		return QDF_STATUS_E_AGAIN;
1778 
1779 	ix0_map[0] = REO_REMAP_TCL;
1780 	ix0_map[1] = REO_REMAP_SW1;
1781 	ix0_map[2] = REO_REMAP_SW2;
1782 	ix0_map[3] = REO_REMAP_SW3;
1783 	ix0_map[4] = REO_REMAP_SW2;
1784 	ix0_map[5] = REO_REMAP_RELEASE;
1785 	ix0_map[6] = REO_REMAP_FW;
1786 	ix0_map[7] = REO_REMAP_FW;
1787 
1788 	/* Call HAL API to remap REO rings to REO2IPA ring */
1789 	ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1790 				    ix0_map);
1791 
1792 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
1793 		dp_reo_remap_config(soc, &ix2, &ix3);
1794 
1795 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1796 					   &ix2, &ix3);
1797 		dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
1798 	} else {
1799 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
1800 					   NULL, NULL);
1801 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
1802 	}
1803 
1804 	return QDF_STATUS_SUCCESS;
1805 }
1806 
1807 /* This should be configurable per H/W configuration enable status */
1808 #define L3_HEADER_PADDING	2
1809 
1810 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
1811 	defined(CONFIG_IPA_WDI_UNIFIED_API)
1812 
1813 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
1814 static inline void dp_setup_mcc_sys_pipes(
1815 		qdf_ipa_sys_connect_params_t *sys_in,
1816 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
1817 {
1818 	/* Setup MCC sys pipe */
1819 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
1820 			DP_IPA_MAX_IFACE;
1821 	for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
1822 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
1823 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
1824 }
1825 #else
1826 static inline void dp_setup_mcc_sys_pipes(
1827 		qdf_ipa_sys_connect_params_t *sys_in,
1828 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
1829 {
1830 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
1831 }
1832 #endif
1833 
1834 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
1835 				 struct dp_ipa_resources *ipa_res,
1836 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
1837 				 bool over_gsi)
1838 {
1839 	struct tcl_data_cmd *tcl_desc_ptr;
1840 	uint8_t *desc_addr;
1841 	uint32_t desc_size;
1842 
1843 	if (over_gsi)
1844 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
1845 	else
1846 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1847 
1848 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1849 		qdf_mem_get_dma_addr(soc->osdev,
1850 				     &ipa_res->tx_comp_ring.mem_info);
1851 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1852 		qdf_mem_get_dma_size(soc->osdev,
1853 				     &ipa_res->tx_comp_ring.mem_info);
1854 
1855 	/* WBM Tail Pointer Address */
1856 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1857 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1858 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
1859 
1860 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1861 		qdf_mem_get_dma_addr(soc->osdev,
1862 				     &ipa_res->tx_ring.mem_info);
1863 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
1864 		qdf_mem_get_dma_size(soc->osdev,
1865 				     &ipa_res->tx_ring.mem_info);
1866 
1867 	/* TCL Head Pointer Address */
1868 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1869 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1870 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
1871 
1872 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1873 		ipa_res->tx_num_alloc_buffer;
1874 
1875 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1876 
1877 	/* Preprogram TCL descriptor */
1878 	desc_addr =
1879 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1880 	desc_size = sizeof(struct tcl_data_cmd);
1881 #ifndef DP_BE_WAR
1882 	/* TODO - WCN7850 does not have these fields */
1883 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1884 #endif
1885 	tcl_desc_ptr = (struct tcl_data_cmd *)
1886 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1887 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1888 		HAL_RX_BUF_RBM_SW2_BM(soc->wbm_sw0_bm_id);
1889 #ifndef DP_BE_WAR
1890 	/* TODO - WCN7850 does not have these fields */
1891 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1892 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1893 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1894 #endif
1895 }
1896 
1897 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
1898 				 struct dp_ipa_resources *ipa_res,
1899 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
1900 				 bool over_gsi)
1901 {
1902 	if (over_gsi)
1903 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1904 					IPA_CLIENT_WLAN2_PROD;
1905 	else
1906 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1907 					IPA_CLIENT_WLAN1_PROD;
1908 
1909 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1910 		qdf_mem_get_dma_addr(soc->osdev,
1911 				     &ipa_res->rx_rdy_ring.mem_info);
1912 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1913 		qdf_mem_get_dma_size(soc->osdev,
1914 				     &ipa_res->rx_rdy_ring.mem_info);
1915 
1916 	/* REO Tail Pointer Address */
1917 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1918 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1919 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
1920 
1921 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1922 		qdf_mem_get_dma_addr(soc->osdev,
1923 				     &ipa_res->rx_refill_ring.mem_info);
1924 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1925 		qdf_mem_get_dma_size(soc->osdev,
1926 				     &ipa_res->rx_refill_ring.mem_info);
1927 
1928 	/* FW Head Pointer Address */
1929 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1930 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1931 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
1932 
1933 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
1934 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
1935 }
1936 
1937 static void
1938 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
1939 			  struct dp_ipa_resources *ipa_res,
1940 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
1941 			  bool over_gsi)
1942 {
1943 	struct tcl_data_cmd *tcl_desc_ptr;
1944 	uint8_t *desc_addr;
1945 	uint32_t desc_size;
1946 
1947 	if (over_gsi)
1948 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1949 			IPA_CLIENT_WLAN2_CONS;
1950 	else
1951 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1952 			IPA_CLIENT_WLAN1_CONS;
1953 
1954 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
1955 		     &ipa_res->tx_comp_ring.sgtable,
1956 		     sizeof(sgtable_t));
1957 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
1958 		qdf_mem_get_dma_size(soc->osdev,
1959 				     &ipa_res->tx_comp_ring.mem_info);
1960 	/* WBM Tail Pointer Address */
1961 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
1962 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1963 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
1964 
1965 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
1966 		     &ipa_res->tx_ring.sgtable,
1967 		     sizeof(sgtable_t));
1968 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
1969 		qdf_mem_get_dma_size(soc->osdev,
1970 				     &ipa_res->tx_ring.mem_info);
1971 	/* TCL Head Pointer Address */
1972 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
1973 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1974 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
1975 
1976 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
1977 		ipa_res->tx_num_alloc_buffer;
1978 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
1979 
1980 	/* Preprogram TCL descriptor */
1981 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
1982 			tx_smmu);
1983 	desc_size = sizeof(struct tcl_data_cmd);
1984 #ifndef DP_BE_WAR
1985 	/* TODO - WCN7850 does not have these fields */
1986 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1987 #endif
1988 	tcl_desc_ptr = (struct tcl_data_cmd *)
1989 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
1990 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1991 		HAL_RX_BUF_RBM_SW2_BM(soc->wbm_sw0_bm_id);
1992 #ifndef DP_BE_WAR
1993 	/* TODO - WCN7850 does not have these fields */
1994 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1995 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1996 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1997 #endif
1998 }
1999 
2000 static void
2001 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
2002 			  struct dp_ipa_resources *ipa_res,
2003 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
2004 			  bool over_gsi)
2005 {
2006 	if (over_gsi)
2007 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2008 					IPA_CLIENT_WLAN2_PROD;
2009 	else
2010 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2011 					IPA_CLIENT_WLAN1_PROD;
2012 
2013 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
2014 		     &ipa_res->rx_rdy_ring.sgtable,
2015 		     sizeof(sgtable_t));
2016 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
2017 		qdf_mem_get_dma_size(soc->osdev,
2018 				     &ipa_res->rx_rdy_ring.mem_info);
2019 	/* REO Tail Pointer Address */
2020 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
2021 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2022 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
2023 
2024 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
2025 		     &ipa_res->rx_refill_ring.sgtable,
2026 		     sizeof(sgtable_t));
2027 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
2028 		qdf_mem_get_dma_size(soc->osdev,
2029 				     &ipa_res->rx_refill_ring.mem_info);
2030 
2031 	/* FW Head Pointer Address */
2032 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
2033 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2034 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
2035 
2036 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
2037 		soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2038 }
2039 
2040 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2041 			void *ipa_i2w_cb, void *ipa_w2i_cb,
2042 			void *ipa_wdi_meter_notifier_cb,
2043 			uint32_t ipa_desc_size, void *ipa_priv,
2044 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
2045 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
2046 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
2047 {
2048 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2049 	struct dp_pdev *pdev =
2050 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2051 	struct dp_ipa_resources *ipa_res;
2052 	qdf_ipa_ep_cfg_t *tx_cfg;
2053 	qdf_ipa_ep_cfg_t *rx_cfg;
2054 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
2055 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
2056 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
2057 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
2058 	qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL;
2059 	qdf_ipa_wdi_conn_out_params_t pipe_out;
2060 	int ret;
2061 
2062 	if (!pdev) {
2063 		dp_err("Invalid instance");
2064 		return QDF_STATUS_E_FAILURE;
2065 	}
2066 
2067 	ipa_res = &pdev->ipa_resource;
2068 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2069 		return QDF_STATUS_SUCCESS;
2070 
2071 	pipe_in = qdf_mem_malloc(sizeof(*pipe_in));
2072 	if (!pipe_in)
2073 		return QDF_STATUS_E_NOMEM;
2074 
2075 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
2076 
2077 	if (is_smmu_enabled)
2078 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true;
2079 	else
2080 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false;
2081 
2082 	dp_setup_mcc_sys_pipes(sys_in, pipe_in);
2083 
2084 	/* TX PIPE */
2085 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2086 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in);
2087 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
2088 	} else {
2089 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in);
2090 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
2091 	}
2092 
2093 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
2094 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2095 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
2096 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
2097 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
2098 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
2099 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
2100 
2101 	/**
2102 	 * Transfer Ring: WBM Ring
2103 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
2104 	 * Event Ring: TCL ring
2105 	 * Event Ring Doorbell PA: TCL Head Pointer Address
2106 	 */
2107 	if (is_smmu_enabled)
2108 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
2109 	else
2110 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
2111 
2112 	dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in);
2113 
2114 	/* RX PIPE */
2115 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2116 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in);
2117 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
2118 	} else {
2119 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in);
2120 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
2121 	}
2122 
2123 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
2124 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
2125 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
2126 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
2127 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
2128 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
2129 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
2130 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
2131 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
2132 
2133 	/**
2134 	 * Transfer Ring: REO Ring
2135 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
2136 	 * Event Ring: FW ring
2137 	 * Event Ring Doorbell PA: FW Head Pointer Address
2138 	 */
2139 	if (is_smmu_enabled)
2140 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
2141 	else
2142 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
2143 
2144 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb;
2145 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv;
2146 
2147 	/* Connect WDI IPA PIPEs */
2148 	ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out);
2149 
2150 	if (ret) {
2151 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2152 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
2153 			  __func__, ret);
2154 		qdf_mem_free(pipe_in);
2155 		return QDF_STATUS_E_FAILURE;
2156 	}
2157 
2158 	/* IPA uC Doorbell registers */
2159 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
2160 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
2161 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
2162 
2163 	dp_ipa_set_pipe_db(ipa_res, &pipe_out);
2164 
2165 	ipa_res->is_db_ddr_mapped =
2166 		QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out);
2167 
2168 	soc->ipa_first_tx_db_access = true;
2169 	qdf_mem_free(pipe_in);
2170 
2171 	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
2172 	soc->ipa_rx_buf_map_lock_initialized = true;
2173 
2174 	return QDF_STATUS_SUCCESS;
2175 }
2176 
2177 /**
2178  * dp_ipa_setup_iface() - Setup IPA header and register interface
2179  * @ifname: Interface name
2180  * @mac_addr: Interface MAC address
2181  * @prod_client: IPA prod client type
2182  * @cons_client: IPA cons client type
2183  * @session_id: Session ID
2184  * @is_ipv6_enabled: Is IPV6 enabled or not
2185  *
2186  * Return: QDF_STATUS
2187  */
2188 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
2189 			      qdf_ipa_client_type_t prod_client,
2190 			      qdf_ipa_client_type_t cons_client,
2191 			      uint8_t session_id, bool is_ipv6_enabled)
2192 {
2193 	qdf_ipa_wdi_reg_intf_in_params_t in;
2194 	qdf_ipa_wdi_hdr_info_t hdr_info;
2195 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
2196 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
2197 	int ret = -EINVAL;
2198 
2199 	qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t));
2200 
2201 	dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname,
2202 		 QDF_MAC_ADDR_REF(mac_addr));
2203 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2204 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
2205 
2206 	/* IPV4 header */
2207 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
2208 
2209 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
2210 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2211 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
2212 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
2213 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
2214 
2215 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
2216 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
2217 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2218 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
2219 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
2220 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
2221 	dp_ipa_setup_iface_session_id(&in, session_id);
2222 
2223 	/* IPV6 header */
2224 	if (is_ipv6_enabled) {
2225 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
2226 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
2227 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
2228 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
2229 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
2230 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2231 	}
2232 
2233 	dp_debug("registering for session_id: %u", session_id);
2234 
2235 	ret = qdf_ipa_wdi_reg_intf(&in);
2236 
2237 	if (ret) {
2238 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2239 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
2240 		    __func__, ret);
2241 		return QDF_STATUS_E_FAILURE;
2242 	}
2243 
2244 	return QDF_STATUS_SUCCESS;
2245 }
2246 
2247 #else /* !CONFIG_IPA_WDI_UNIFIED_API */
2248 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2249 			void *ipa_i2w_cb, void *ipa_w2i_cb,
2250 			void *ipa_wdi_meter_notifier_cb,
2251 			uint32_t ipa_desc_size, void *ipa_priv,
2252 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
2253 			uint32_t *rx_pipe_handle)
2254 {
2255 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2256 	struct dp_pdev *pdev =
2257 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2258 	struct dp_ipa_resources *ipa_res;
2259 	qdf_ipa_wdi_pipe_setup_info_t *tx;
2260 	qdf_ipa_wdi_pipe_setup_info_t *rx;
2261 	qdf_ipa_wdi_conn_in_params_t pipe_in;
2262 	qdf_ipa_wdi_conn_out_params_t pipe_out;
2263 	struct tcl_data_cmd *tcl_desc_ptr;
2264 	uint8_t *desc_addr;
2265 	uint32_t desc_size;
2266 	int ret;
2267 
2268 	if (!pdev) {
2269 		dp_err("Invalid instance");
2270 		return QDF_STATUS_E_FAILURE;
2271 	}
2272 
2273 	ipa_res = &pdev->ipa_resource;
2274 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2275 		return QDF_STATUS_SUCCESS;
2276 
2277 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
2278 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
2279 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
2280 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
2281 
2282 	/* TX PIPE */
2283 	/**
2284 	 * Transfer Ring: WBM Ring
2285 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
2286 	 * Event Ring: TCL ring
2287 	 * Event Ring Doorbell PA: TCL Head Pointer Address
2288 	 */
2289 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
2290 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
2291 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2292 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
2293 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
2294 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
2295 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
2296 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
2297 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
2298 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
2299 		ipa_res->tx_comp_ring_base_paddr;
2300 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
2301 		ipa_res->tx_comp_ring_size;
2302 	/* WBM Tail Pointer Address */
2303 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
2304 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
2305 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
2306 		ipa_res->tx_ring_base_paddr;
2307 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
2308 	/* TCL Head Pointer Address */
2309 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
2310 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
2311 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
2312 		ipa_res->tx_num_alloc_buffer;
2313 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
2314 
2315 	/* Preprogram TCL descriptor */
2316 	desc_addr =
2317 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
2318 	desc_size = sizeof(struct tcl_data_cmd);
2319 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
2320 	tcl_desc_ptr = (struct tcl_data_cmd *)
2321 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
2322 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
2323 						HAL_RX_BUF_RBM_SW2_BM;
2324 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
2325 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
2326 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
2327 
2328 	/* RX PIPE */
2329 	/**
2330 	 * Transfer Ring: REO Ring
2331 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
2332 	 * Event Ring: FW ring
2333 	 * Event Ring Doorbell PA: FW Head Pointer Address
2334 	 */
2335 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
2336 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
2337 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
2338 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
2339 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
2340 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
2341 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
2342 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
2343 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
2344 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
2345 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
2346 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
2347 						ipa_res->rx_rdy_ring_base_paddr;
2348 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
2349 						ipa_res->rx_rdy_ring_size;
2350 	/* REO Tail Pointer Address */
2351 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
2352 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2353 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
2354 					ipa_res->rx_refill_ring_base_paddr;
2355 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
2356 						ipa_res->rx_refill_ring_size;
2357 	/* FW Head Pointer Address */
2358 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
2359 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2360 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = soc->rx_pkt_tlv_size +
2361 						L3_HEADER_PADDING;
2362 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
2363 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
2364 
2365 	/* Connect WDI IPA PIPE */
2366 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
2367 	if (ret) {
2368 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2369 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
2370 			  __func__, ret);
2371 		return QDF_STATUS_E_FAILURE;
2372 	}
2373 
2374 	/* IPA uC Doorbell registers */
2375 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2376 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
2377 		  __func__,
2378 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
2379 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
2380 
2381 	ipa_res->tx_comp_doorbell_paddr =
2382 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
2383 	ipa_res->tx_comp_doorbell_vaddr =
2384 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
2385 	ipa_res->rx_ready_doorbell_paddr =
2386 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
2387 
2388 	soc->ipa_first_tx_db_access = true;
2389 
2390 	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
2391 	soc->ipa_rx_buf_map_lock_initialized = true;
2392 
2393 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2394 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
2395 		  __func__,
2396 		  "transfer_ring_base_pa",
2397 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
2398 		  "transfer_ring_size",
2399 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
2400 		  "transfer_ring_doorbell_pa",
2401 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
2402 		  "event_ring_base_pa",
2403 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
2404 		  "event_ring_size",
2405 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
2406 		  "event_ring_doorbell_pa",
2407 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
2408 		  "num_pkt_buffers",
2409 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
2410 		  "tx_comp_doorbell_paddr",
2411 		  (void *)ipa_res->tx_comp_doorbell_paddr);
2412 
2413 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2414 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
2415 		  __func__,
2416 		  "transfer_ring_base_pa",
2417 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
2418 		  "transfer_ring_size",
2419 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
2420 		  "transfer_ring_doorbell_pa",
2421 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
2422 		  "event_ring_base_pa",
2423 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
2424 		  "event_ring_size",
2425 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
2426 		  "event_ring_doorbell_pa",
2427 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
2428 		  "num_pkt_buffers",
2429 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
2430 		  "tx_comp_doorbell_paddr",
2431 		  (void *)ipa_res->rx_ready_doorbell_paddr);
2432 
2433 	return QDF_STATUS_SUCCESS;
2434 }
2435 
2436 /**
2437  * dp_ipa_setup_iface() - Setup IPA header and register interface
2438  * @ifname: Interface name
2439  * @mac_addr: Interface MAC address
2440  * @prod_client: IPA prod client type
2441  * @cons_client: IPA cons client type
2442  * @session_id: Session ID
2443  * @is_ipv6_enabled: Is IPV6 enabled or not
2444  *
2445  * Return: QDF_STATUS
2446  */
2447 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
2448 			      qdf_ipa_client_type_t prod_client,
2449 			      qdf_ipa_client_type_t cons_client,
2450 			      uint8_t session_id, bool is_ipv6_enabled)
2451 {
2452 	qdf_ipa_wdi_reg_intf_in_params_t in;
2453 	qdf_ipa_wdi_hdr_info_t hdr_info;
2454 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
2455 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
2456 	int ret = -EINVAL;
2457 
2458 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2459 		  "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT,
2460 		  __func__, ifname, QDF_MAC_ADDR_REF(mac_addr));
2461 
2462 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2463 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
2464 
2465 	/* IPV4 header */
2466 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
2467 
2468 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
2469 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2470 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
2471 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
2472 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
2473 
2474 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
2475 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
2476 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2477 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
2478 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
2479 		htonl(session_id << 16);
2480 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
2481 
2482 	/* IPV6 header */
2483 	if (is_ipv6_enabled) {
2484 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
2485 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
2486 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
2487 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
2488 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
2489 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
2490 	}
2491 
2492 	ret = qdf_ipa_wdi_reg_intf(&in);
2493 	if (ret) {
2494 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
2495 		       ret);
2496 		return QDF_STATUS_E_FAILURE;
2497 	}
2498 
2499 	return QDF_STATUS_SUCCESS;
2500 }
2501 
2502 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
2503 
2504 /**
2505  * dp_ipa_cleanup() - Disconnect IPA pipes
2506  * @soc_hdl: dp soc handle
2507  * @pdev_id: dp pdev id
2508  * @tx_pipe_handle: Tx pipe handle
2509  * @rx_pipe_handle: Rx pipe handle
2510  *
2511  * Return: QDF_STATUS
2512  */
2513 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2514 			  uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
2515 {
2516 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2517 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2518 	struct dp_pdev *pdev;
2519 	int ret;
2520 
2521 	ret = qdf_ipa_wdi_disconn_pipes();
2522 	if (ret) {
2523 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
2524 		       ret);
2525 		status = QDF_STATUS_E_FAILURE;
2526 	}
2527 
2528 	if (soc->ipa_rx_buf_map_lock_initialized) {
2529 		qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock);
2530 		soc->ipa_rx_buf_map_lock_initialized = false;
2531 	}
2532 
2533 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2534 	if (qdf_unlikely(!pdev)) {
2535 		dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
2536 		status = QDF_STATUS_E_FAILURE;
2537 		goto exit;
2538 	}
2539 
2540 	dp_ipa_unmap_ring_doorbell_paddr(pdev);
2541 exit:
2542 	return status;
2543 }
2544 
2545 /**
2546  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
2547  * @ifname: Interface name
2548  * @is_ipv6_enabled: Is IPV6 enabled or not
2549  *
2550  * Return: QDF_STATUS
2551  */
2552 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
2553 {
2554 	int ret;
2555 
2556 	ret = qdf_ipa_wdi_dereg_intf(ifname);
2557 	if (ret) {
2558 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2559 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
2560 			  __func__, ret);
2561 		return QDF_STATUS_E_FAILURE;
2562 	}
2563 
2564 	return QDF_STATUS_SUCCESS;
2565 }
2566 
2567 #ifdef IPA_SET_RESET_TX_DB_PA
2568 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \
2569 				dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res))
2570 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \
2571 				dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res))
2572 #else
2573 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res)
2574 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res)
2575 #endif
2576 
2577 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2578 {
2579 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2580 	struct dp_pdev *pdev =
2581 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2582 	struct dp_ipa_resources *ipa_res;
2583 	QDF_STATUS result;
2584 
2585 	if (!pdev) {
2586 		dp_err("Invalid instance");
2587 		return QDF_STATUS_E_FAILURE;
2588 	}
2589 
2590 	ipa_res = &pdev->ipa_resource;
2591 
2592 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
2593 	DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res);
2594 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
2595 
2596 	result = qdf_ipa_wdi_enable_pipes();
2597 	if (result) {
2598 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2599 			  "%s: Enable WDI PIPE fail, code %d",
2600 			  __func__, result);
2601 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
2602 		DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
2603 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
2604 		return QDF_STATUS_E_FAILURE;
2605 	}
2606 
2607 	if (soc->ipa_first_tx_db_access) {
2608 		dp_ipa_tx_comp_ring_init_hp(soc, ipa_res);
2609 		soc->ipa_first_tx_db_access = false;
2610 	}
2611 
2612 	return QDF_STATUS_SUCCESS;
2613 }
2614 
2615 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2616 {
2617 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2618 	struct dp_pdev *pdev =
2619 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2620 	QDF_STATUS result;
2621 	struct dp_ipa_resources *ipa_res;
2622 
2623 	if (!pdev) {
2624 		dp_err("Invalid instance");
2625 		return QDF_STATUS_E_FAILURE;
2626 	}
2627 
2628 	ipa_res = &pdev->ipa_resource;
2629 
2630 	qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS);
2631 	/*
2632 	 * Reset the tx completion doorbell address before invoking IPA disable
2633 	 * pipes API to ensure that there is no access to IPA tx doorbell
2634 	 * address post disable pipes.
2635 	 */
2636 	DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
2637 
2638 	result = qdf_ipa_wdi_disable_pipes();
2639 	if (result) {
2640 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2641 			  "%s: Disable WDI PIPE fail, code %d",
2642 			  __func__, result);
2643 		qdf_assert_always(0);
2644 		return QDF_STATUS_E_FAILURE;
2645 	}
2646 
2647 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
2648 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
2649 
2650 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
2651 }
2652 
2653 /**
2654  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
2655  * @client: Client type
2656  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
2657  *
2658  * Return: QDF_STATUS
2659  */
2660 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
2661 {
2662 	qdf_ipa_wdi_perf_profile_t profile;
2663 	QDF_STATUS result;
2664 
2665 	profile.client = client;
2666 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
2667 
2668 	result = qdf_ipa_wdi_set_perf_profile(&profile);
2669 	if (result) {
2670 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2671 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
2672 			  __func__, result);
2673 		return QDF_STATUS_E_FAILURE;
2674 	}
2675 
2676 	return QDF_STATUS_SUCCESS;
2677 }
2678 
2679 /**
2680  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
2681  * @pdev: pdev
2682  * @vdev: vdev
2683  * @nbuf: skb
2684  *
2685  * Return: nbuf if TX fails and NULL if TX succeeds
2686  */
2687 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
2688 				       struct dp_vdev *vdev,
2689 				       qdf_nbuf_t nbuf)
2690 {
2691 	struct dp_peer *vdev_peer;
2692 	uint16_t len;
2693 
2694 	vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA);
2695 	if (qdf_unlikely(!vdev_peer))
2696 		return nbuf;
2697 
2698 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2699 	len = qdf_nbuf_len(nbuf);
2700 
2701 	if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
2702 		DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
2703 		dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
2704 		return nbuf;
2705 	}
2706 
2707 	DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
2708 	dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
2709 	return NULL;
2710 }
2711 
2712 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2713 			    qdf_nbuf_t nbuf, bool *fwd_success)
2714 {
2715 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2716 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2717 						     DP_MOD_ID_IPA);
2718 	struct dp_pdev *pdev;
2719 	struct dp_peer *da_peer;
2720 	struct dp_peer *sa_peer;
2721 	qdf_nbuf_t nbuf_copy;
2722 	uint8_t da_is_bcmc;
2723 	struct ethhdr *eh;
2724 	bool status = false;
2725 
2726 	*fwd_success = false; /* set default as failure */
2727 
2728 	/*
2729 	 * WDI 3.0 skb->cb[] info from IPA driver
2730 	 * skb->cb[0] = vdev_id
2731 	 * skb->cb[1].bit#1 = da_is_bcmc
2732 	 */
2733 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
2734 
2735 	if (qdf_unlikely(!vdev))
2736 		return false;
2737 
2738 	pdev = vdev->pdev;
2739 	if (qdf_unlikely(!pdev))
2740 		goto out;
2741 
2742 	/* no fwd for station mode and just pass up to stack */
2743 	if (vdev->opmode == wlan_op_mode_sta)
2744 		goto out;
2745 
2746 	if (da_is_bcmc) {
2747 		nbuf_copy = qdf_nbuf_copy(nbuf);
2748 		if (!nbuf_copy)
2749 			goto out;
2750 
2751 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
2752 			qdf_nbuf_free(nbuf_copy);
2753 		else
2754 			*fwd_success = true;
2755 
2756 		/* return false to pass original pkt up to stack */
2757 		goto out;
2758 	}
2759 
2760 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2761 
2762 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
2763 		goto out;
2764 
2765 	da_peer = dp_peer_find_hash_find(soc, eh->h_dest, 0, vdev->vdev_id,
2766 					 DP_MOD_ID_IPA);
2767 	if (!da_peer)
2768 		goto out;
2769 
2770 	dp_peer_unref_delete(da_peer, DP_MOD_ID_IPA);
2771 
2772 	sa_peer = dp_peer_find_hash_find(soc, eh->h_source, 0, vdev->vdev_id,
2773 					 DP_MOD_ID_IPA);
2774 	if (!sa_peer)
2775 		goto out;
2776 
2777 	dp_peer_unref_delete(sa_peer, DP_MOD_ID_IPA);
2778 
2779 	/*
2780 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
2781 	 * Need to add skb to internal tracking table to avoid nbuf memory
2782 	 * leak check for unallocated skb.
2783 	 */
2784 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
2785 
2786 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
2787 		qdf_nbuf_free(nbuf);
2788 	else
2789 		*fwd_success = true;
2790 
2791 	status = true;
2792 out:
2793 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
2794 	return status;
2795 }
2796 
2797 #ifdef MDM_PLATFORM
2798 bool dp_ipa_is_mdm_platform(void)
2799 {
2800 	return true;
2801 }
2802 #else
2803 bool dp_ipa_is_mdm_platform(void)
2804 {
2805 	return false;
2806 }
2807 #endif
2808 
2809 /**
2810  * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
2811  * @soc: soc
2812  * @nbuf: source skb
2813  *
2814  * Return: new nbuf if success and otherwise NULL
2815  */
2816 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
2817 					     qdf_nbuf_t nbuf)
2818 {
2819 	uint8_t *src_nbuf_data;
2820 	uint8_t *dst_nbuf_data;
2821 	qdf_nbuf_t dst_nbuf;
2822 	qdf_nbuf_t temp_nbuf = nbuf;
2823 	uint32_t nbuf_len = qdf_nbuf_len(nbuf);
2824 	bool is_nbuf_head = true;
2825 	uint32_t copy_len = 0;
2826 
2827 	dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
2828 				  RX_BUFFER_RESERVATION,
2829 				  RX_DATA_BUFFER_ALIGNMENT, FALSE);
2830 
2831 	if (!dst_nbuf) {
2832 		dp_err_rl("nbuf allocate fail");
2833 		return NULL;
2834 	}
2835 
2836 	if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
2837 		qdf_nbuf_free(dst_nbuf);
2838 		dp_err_rl("nbuf is jumbo data");
2839 		return NULL;
2840 	}
2841 
2842 	/* prepeare to copy all data into new skb */
2843 	dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
2844 	while (temp_nbuf) {
2845 		src_nbuf_data = qdf_nbuf_data(temp_nbuf);
2846 		/* first head nbuf */
2847 		if (is_nbuf_head) {
2848 			qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
2849 				     soc->rx_pkt_tlv_size);
2850 			/* leave extra 2 bytes L3_HEADER_PADDING */
2851 			dst_nbuf_data += (soc->rx_pkt_tlv_size +
2852 					  L3_HEADER_PADDING);
2853 			src_nbuf_data += soc->rx_pkt_tlv_size;
2854 			copy_len = qdf_nbuf_headlen(temp_nbuf) -
2855 						soc->rx_pkt_tlv_size;
2856 			temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
2857 			is_nbuf_head = false;
2858 		} else {
2859 			copy_len = qdf_nbuf_len(temp_nbuf);
2860 			temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
2861 		}
2862 		qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
2863 		dst_nbuf_data += copy_len;
2864 	}
2865 
2866 	qdf_nbuf_set_len(dst_nbuf, nbuf_len);
2867 	/* copy is done, free original nbuf */
2868 	qdf_nbuf_free(nbuf);
2869 
2870 	return dst_nbuf;
2871 }
2872 
2873 /**
2874  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
2875  * @soc: soc
2876  * @nbuf: skb
2877  *
2878  * Return: nbuf if success and otherwise NULL
2879  */
2880 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
2881 {
2882 
2883 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2884 		return nbuf;
2885 
2886 	/* WLAN IPA is run-time disabled */
2887 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
2888 		return nbuf;
2889 
2890 	if (!qdf_nbuf_is_frag(nbuf))
2891 		return nbuf;
2892 
2893 	/* linearize skb for IPA */
2894 	return dp_ipa_frag_nbuf_linearize(soc, nbuf);
2895 }
2896 
2897 QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
2898 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2899 {
2900 	QDF_STATUS ret;
2901 
2902 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2903 	struct dp_pdev *pdev =
2904 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2905 
2906 	if (!pdev) {
2907 		dp_err("%s invalid instance", __func__);
2908 		return QDF_STATUS_E_FAILURE;
2909 	}
2910 
2911 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
2912 		dp_debug("SMMU S1 disabled");
2913 		return QDF_STATUS_SUCCESS;
2914 	}
2915 	ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true);
2916 	if (ret)
2917 		return ret;
2918 
2919 	ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true);
2920 	if (ret)
2921 		__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false);
2922 
2923 	return ret;
2924 }
2925 
2926 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
2927 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2928 {
2929 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2930 	struct dp_pdev *pdev =
2931 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2932 
2933 	if (!pdev) {
2934 		dp_err("%s invalid instance", __func__);
2935 		return QDF_STATUS_E_FAILURE;
2936 	}
2937 
2938 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
2939 		dp_debug("SMMU S1 disabled");
2940 		return QDF_STATUS_SUCCESS;
2941 	}
2942 
2943 	if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false) ||
2944 	    dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false))
2945 		return QDF_STATUS_E_FAILURE;
2946 
2947 	return QDF_STATUS_SUCCESS;
2948 }
2949 
2950 #endif
2951