xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #ifdef IPA_OFFLOAD
18 
19 #include <qdf_ipa_wdi3.h>
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include <hal_api.h>
24 #include <hif.h>
25 #include <htt.h>
26 #include <wdi_event.h>
27 #include <queue.h>
28 #include "dp_types.h"
29 #include "dp_htt.h"
30 #include "dp_tx.h"
31 #include "dp_rx.h"
32 #include "dp_ipa.h"
33 
34 /* Ring index for WBM2SW2 release ring */
35 #define IPA_TX_COMP_RING_IDX HAL_IPA_TX_COMP_RING_IDX
36 
37 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
38 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
39 
40 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
41  * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
42  * This causes back pressure, resulting in a FW crash.
43  * By leaving some entries with no buffer attached, WBM will be able to write
44  * to the ring, and from dumps we can figure out the buffer which is causing
45  * this issue.
46  */
47 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
48 /**
49  *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
50  * @ix0_reg: reo destination ring IX0 value
51  * @ix2_reg: reo destination ring IX2 value
52  * @ix3_reg: reo destination ring IX3 value
53  */
54 struct dp_ipa_reo_remap_record {
55 	uint64_t timestamp;
56 	uint32_t ix0_reg;
57 	uint32_t ix2_reg;
58 	uint32_t ix3_reg;
59 };
60 
61 #define REO_REMAP_HISTORY_SIZE 32
62 
63 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
64 
65 static qdf_atomic_t dp_ipa_reo_remap_history_index;
66 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
67 {
68 	int next = qdf_atomic_inc_return(index);
69 
70 	if (next == REO_REMAP_HISTORY_SIZE)
71 		qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
72 
73 	return next % REO_REMAP_HISTORY_SIZE;
74 }
75 
76 /**
77  * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
78  * @ix0_val: reo destination ring IX0 value
79  * @ix2_val: reo destination ring IX2 value
80  * @ix3_val: reo destination ring IX3 value
81  *
82  * Return: None
83  */
84 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
85 					 uint32_t ix3_val)
86 {
87 	int idx = dp_ipa_reo_remap_record_index_next(
88 				&dp_ipa_reo_remap_history_index);
89 	struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
90 
91 	record->timestamp = qdf_get_log_timestamp();
92 	record->ix0_reg = ix0_val;
93 	record->ix2_reg = ix2_val;
94 	record->ix3_reg = ix3_val;
95 }
96 
97 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
98 						   qdf_nbuf_t nbuf,
99 						   uint32_t size,
100 						   bool create)
101 {
102 	qdf_mem_info_t mem_map_table = {0};
103 
104 	if (!qdf_ipa_is_ready())
105 		return QDF_STATUS_SUCCESS;
106 
107 	qdf_update_mem_map_table(soc->osdev, &mem_map_table,
108 				 qdf_nbuf_get_frag_paddr(nbuf, 0),
109 				 size);
110 
111 	if (create)
112 		return qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table);
113 	else
114 		return qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table);
115 }
116 
117 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
118 					     qdf_nbuf_t nbuf,
119 					     uint32_t size,
120 					     bool create)
121 {
122 	struct dp_pdev *pdev;
123 	int i;
124 
125 	for (i = 0; i < soc->pdev_count; i++) {
126 		pdev = soc->pdev_list[i];
127 		if (pdev && pdev->monitor_configured)
128 			return QDF_STATUS_SUCCESS;
129 	}
130 
131 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
132 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
133 		return QDF_STATUS_SUCCESS;
134 
135 	/**
136 	 * Even if ipa pipes is disabled, but if it's unmap
137 	 * operation and nbuf has done ipa smmu map before,
138 	 * do ipa smmu unmap as well.
139 	 */
140 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
141 		if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
142 			DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
143 		} else {
144 			return QDF_STATUS_SUCCESS;
145 		}
146 	}
147 
148 	if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
149 		if (create) {
150 			DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
151 		} else {
152 			DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
153 		}
154 		return QDF_STATUS_E_INVAL;
155 	}
156 
157 	qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
158 
159 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
160 }
161 
162 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
163 	struct dp_soc *soc,
164 	struct dp_pdev *pdev,
165 	bool create)
166 {
167 	uint32_t index;
168 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
169 	uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
170 	qdf_nbuf_t nbuf;
171 	uint32_t buf_len;
172 
173 	if (!ipa_is_ready()) {
174 		dp_info("IPA is not READY");
175 		return 0;
176 	}
177 
178 	for (index = 0; index < tx_buffer_cnt; index++) {
179 		nbuf = (qdf_nbuf_t)
180 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
181 		if (!nbuf)
182 			continue;
183 		buf_len = qdf_nbuf_get_data_len(nbuf);
184 		ret = __dp_ipa_handle_buf_smmu_mapping(
185 				soc, nbuf, buf_len, create);
186 		qdf_assert_always(!ret);
187 	}
188 
189 	return ret;
190 }
191 
192 #ifdef RX_DESC_MULTI_PAGE_ALLOC
193 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
194 							 struct dp_pdev *pdev,
195 							 bool create)
196 {
197 	struct rx_desc_pool *rx_pool;
198 	uint8_t pdev_id;
199 	uint32_t num_desc, page_id, offset, i;
200 	uint16_t num_desc_per_page;
201 	union dp_rx_desc_list_elem_t *rx_desc_elem;
202 	struct dp_rx_desc *rx_desc;
203 	qdf_nbuf_t nbuf;
204 
205 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
206 		return QDF_STATUS_SUCCESS;
207 
208 	pdev_id = pdev->pdev_id;
209 	rx_pool = &soc->rx_desc_buf[pdev_id];
210 
211 	qdf_spin_lock_bh(&rx_pool->lock);
212 	num_desc = rx_pool->pool_size;
213 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
214 	for (i = 0; i < num_desc; i++) {
215 		page_id = i / num_desc_per_page;
216 		offset = i % num_desc_per_page;
217 		if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
218 			break;
219 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
220 		rx_desc = &rx_desc_elem->rx_desc;
221 		if ((!(rx_desc->in_use)) || rx_desc->unmapped)
222 			continue;
223 		nbuf = rx_desc->nbuf;
224 
225 		if (qdf_unlikely(create ==
226 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
227 			if (create) {
228 				DP_STATS_INC(soc,
229 					     rx.err.ipa_smmu_map_dup, 1);
230 			} else {
231 				DP_STATS_INC(soc,
232 					     rx.err.ipa_smmu_unmap_dup, 1);
233 			}
234 			continue;
235 		}
236 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
237 
238 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
239 						 rx_pool->buf_size, create);
240 	}
241 	qdf_spin_unlock_bh(&rx_pool->lock);
242 
243 	return QDF_STATUS_SUCCESS;
244 }
245 #else
246 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
247 							 struct dp_pdev *pdev,
248 							 bool create)
249 {
250 	struct rx_desc_pool *rx_pool;
251 	uint8_t pdev_id;
252 	qdf_nbuf_t nbuf;
253 	int i;
254 
255 	if (!qdf_mem_smmu_s1_enabled(soc->osdev))
256 		return QDF_STATUS_SUCCESS;
257 
258 	pdev_id = pdev->pdev_id;
259 	rx_pool = &soc->rx_desc_buf[pdev_id];
260 
261 	qdf_spin_lock_bh(&rx_pool->lock);
262 	for (i = 0; i < rx_pool->pool_size; i++) {
263 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
264 		    rx_pool->array[i].rx_desc.unmapped)
265 			continue;
266 
267 		nbuf = rx_pool->array[i].rx_desc.nbuf;
268 
269 		if (qdf_unlikely(create ==
270 				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
271 			if (create) {
272 				DP_STATS_INC(soc,
273 					     rx.err.ipa_smmu_map_dup, 1);
274 			} else {
275 				DP_STATS_INC(soc,
276 					     rx.err.ipa_smmu_unmap_dup, 1);
277 			}
278 			continue;
279 		}
280 		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
281 
282 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
283 						 rx_pool->buf_size, create);
284 	}
285 	qdf_spin_unlock_bh(&rx_pool->lock);
286 
287 	return QDF_STATUS_SUCCESS;
288 }
289 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
290 
291 /**
292  * dp_tx_ipa_uc_detach - Free autonomy TX resources
293  * @soc: data path instance
294  * @pdev: core txrx pdev context
295  *
296  * Free allocated TX buffers with WBM SRNG
297  *
298  * Return: none
299  */
300 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
301 {
302 	int idx;
303 	qdf_nbuf_t nbuf;
304 	struct dp_ipa_resources *ipa_res;
305 
306 	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
307 		nbuf = (qdf_nbuf_t)
308 			soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
309 		if (!nbuf)
310 			continue;
311 		qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
312 		qdf_mem_dp_tx_skb_cnt_dec();
313 		qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_data_len(nbuf));
314 		qdf_nbuf_free(nbuf);
315 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
316 						(void *)NULL;
317 	}
318 
319 	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
320 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
321 
322 	ipa_res = &pdev->ipa_resource;
323 	if (!ipa_res->is_db_ddr_mapped)
324 		iounmap(ipa_res->tx_comp_doorbell_vaddr);
325 
326 	qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
327 	qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
328 }
329 
330 /**
331  * dp_rx_ipa_uc_detach - free autonomy RX resources
332  * @soc: data path instance
333  * @pdev: core txrx pdev context
334  *
335  * This function will detach DP RX into main device context
336  * will free DP Rx resources.
337  *
338  * Return: none
339  */
340 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
341 {
342 	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
343 
344 	qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
345 	qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
346 }
347 
348 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
349 {
350 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
351 		return QDF_STATUS_SUCCESS;
352 
353 	/* TX resource detach */
354 	dp_tx_ipa_uc_detach(soc, pdev);
355 
356 	/* RX resource detach */
357 	dp_rx_ipa_uc_detach(soc, pdev);
358 
359 	return QDF_STATUS_SUCCESS;	/* success */
360 }
361 
362 /**
363  * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
364  * @soc: data path instance
365  * @pdev: Physical device handle
366  *
367  * Allocate TX buffer from non-cacheable memory
368  * Attache allocated TX buffers with WBM SRNG
369  *
370  * Return: int
371  */
372 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
373 {
374 	uint32_t tx_buffer_count;
375 	uint32_t ring_base_align = 8;
376 	qdf_dma_addr_t buffer_paddr;
377 	struct hal_srng *wbm_srng = (struct hal_srng *)
378 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
379 	struct hal_srng_params srng_params;
380 	uint32_t paddr_lo;
381 	uint32_t paddr_hi;
382 	void *ring_entry;
383 	int num_entries;
384 	qdf_nbuf_t nbuf;
385 	int retval = QDF_STATUS_SUCCESS;
386 	int max_alloc_count = 0;
387 
388 	/*
389 	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
390 	 * unsigned int uc_tx_buf_sz =
391 	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
392 	 */
393 	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
394 	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
395 
396 	hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
397 			    &srng_params);
398 	num_entries = srng_params.num_entries;
399 
400 	max_alloc_count =
401 		num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
402 	if (max_alloc_count <= 0) {
403 		dp_err("incorrect value for buffer count %u", max_alloc_count);
404 		return -EINVAL;
405 	}
406 
407 	dp_info("requested %d buffers to be posted to wbm ring",
408 		max_alloc_count);
409 
410 	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
411 		qdf_mem_malloc(num_entries *
412 		sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
413 	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
414 		dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
415 		return -ENOMEM;
416 	}
417 
418 	hal_srng_access_start_unlocked(soc->hal_soc,
419 				       hal_srng_to_hal_ring_handle(wbm_srng));
420 
421 	/*
422 	 * Allocate Tx buffers as many as possible.
423 	 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
424 	 * Populate Tx buffers into WBM2IPA ring
425 	 * This initial buffer population will simulate H/W as source ring,
426 	 * and update HP
427 	 */
428 	for (tx_buffer_count = 0;
429 		tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
430 		nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE);
431 		if (!nbuf)
432 			break;
433 
434 		ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
435 				hal_srng_to_hal_ring_handle(wbm_srng));
436 		if (!ring_entry) {
437 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
438 				  "%s: Failed to get WBM ring entry",
439 				  __func__);
440 			qdf_nbuf_free(nbuf);
441 			break;
442 		}
443 
444 		qdf_nbuf_map_single(soc->osdev, nbuf,
445 				    QDF_DMA_BIDIRECTIONAL);
446 		buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
447 		qdf_mem_dp_tx_skb_cnt_inc();
448 		qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_data_len(nbuf));
449 
450 		paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff);
451 		paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32;
452 		HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo);
453 		HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi);
454 		HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX +
455 				      HAL_WBM_SW0_BM_ID));
456 
457 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
458 			= (void *)nbuf;
459 	}
460 
461 	hal_srng_access_end_unlocked(soc->hal_soc,
462 				     hal_srng_to_hal_ring_handle(wbm_srng));
463 
464 	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
465 
466 	if (tx_buffer_count) {
467 		dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
468 	} else {
469 		dp_err("No IPA WDI TX buffer allocated!");
470 		qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
471 		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
472 		retval = -ENOMEM;
473 	}
474 
475 	return retval;
476 }
477 
478 /**
479  * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
480  * @soc: data path instance
481  * @pdev: core txrx pdev context
482  *
483  * This function will attach a DP RX instance into the main
484  * device (SOC) context.
485  *
486  * Return: QDF_STATUS_SUCCESS: success
487  *         QDF_STATUS_E_RESOURCES: Error return
488  */
489 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
490 {
491 	return QDF_STATUS_SUCCESS;
492 }
493 
494 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
495 {
496 	int error;
497 
498 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
499 		return QDF_STATUS_SUCCESS;
500 
501 	/* TX resource attach */
502 	error = dp_tx_ipa_uc_attach(soc, pdev);
503 	if (error) {
504 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
505 			  "%s: DP IPA UC TX attach fail code %d",
506 			  __func__, error);
507 		return error;
508 	}
509 
510 	/* RX resource attach */
511 	error = dp_rx_ipa_uc_attach(soc, pdev);
512 	if (error) {
513 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
514 			  "%s: DP IPA UC RX attach fail code %d",
515 			  __func__, error);
516 		dp_tx_ipa_uc_detach(soc, pdev);
517 		return error;
518 	}
519 
520 	return QDF_STATUS_SUCCESS;	/* success */
521 }
522 
523 /*
524  * dp_ipa_ring_resource_setup() - setup IPA ring resources
525  * @soc: data path SoC handle
526  *
527  * Return: none
528  */
529 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
530 		struct dp_pdev *pdev)
531 {
532 	struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
533 	struct hal_srng *hal_srng;
534 	struct hal_srng_params srng_params;
535 	qdf_dma_addr_t hp_addr;
536 	unsigned long addr_offset, dev_base_paddr;
537 	uint32_t ix0;
538 
539 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
540 		return QDF_STATUS_SUCCESS;
541 
542 	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
543 	hal_srng = (struct hal_srng *)
544 			soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
545 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
546 			    hal_srng_to_hal_ring_handle(hal_srng),
547 			    &srng_params);
548 
549 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
550 		srng_params.ring_base_paddr;
551 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
552 		srng_params.ring_base_vaddr;
553 	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
554 		(srng_params.num_entries * srng_params.entry_size) << 2;
555 	/*
556 	 * For the register backed memory addresses, use the scn->mem_pa to
557 	 * calculate the physical address of the shadow registers
558 	 */
559 	dev_base_paddr =
560 		(unsigned long)
561 		((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
562 	addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
563 		      (unsigned long)(hal_soc->dev_base_addr);
564 	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
565 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
566 
567 	dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
568 		(unsigned int)addr_offset,
569 		(unsigned int)dev_base_paddr,
570 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
571 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
572 		(void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
573 		srng_params.num_entries,
574 		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
575 
576 	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
577 	hal_srng = (struct hal_srng *)
578 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
579 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
580 			    hal_srng_to_hal_ring_handle(hal_srng),
581 			    &srng_params);
582 
583 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
584 						srng_params.ring_base_paddr;
585 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
586 						srng_params.ring_base_vaddr;
587 	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
588 		(srng_params.num_entries * srng_params.entry_size) << 2;
589 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
590 		      (unsigned long)(hal_soc->dev_base_addr);
591 	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
592 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
593 
594 	dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
595 		(unsigned int)addr_offset,
596 		(unsigned int)dev_base_paddr,
597 		(unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
598 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
599 		(void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
600 		srng_params.num_entries,
601 		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
602 
603 	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
604 	hal_srng = (struct hal_srng *)
605 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
606 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
607 			    hal_srng_to_hal_ring_handle(hal_srng),
608 			    &srng_params);
609 
610 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
611 						srng_params.ring_base_paddr;
612 	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
613 						srng_params.ring_base_vaddr;
614 	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
615 		(srng_params.num_entries * srng_params.entry_size) << 2;
616 	addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
617 		      (unsigned long)(hal_soc->dev_base_addr);
618 	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
619 				(qdf_dma_addr_t)(addr_offset + dev_base_paddr);
620 
621 	dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
622 		(unsigned int)addr_offset,
623 		(unsigned int)dev_base_paddr,
624 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
625 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
626 		(void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
627 		srng_params.num_entries,
628 		soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
629 
630 	hal_srng = (struct hal_srng *)
631 			pdev->rx_refill_buf_ring2.hal_srng;
632 	hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
633 			    hal_srng_to_hal_ring_handle(hal_srng),
634 			    &srng_params);
635 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
636 		srng_params.ring_base_paddr;
637 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
638 		srng_params.ring_base_vaddr;
639 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
640 		(srng_params.num_entries * srng_params.entry_size) << 2;
641 	hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
642 				       hal_srng_to_hal_ring_handle(hal_srng));
643 	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
644 		qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
645 
646 	dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
647 		(unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
648 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
649 		(void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
650 		srng_params.num_entries,
651 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
652 
653 	/*
654 	 * Set DEST_RING_MAPPING_4 to SW2 as default value for
655 	 * DESTINATION_RING_CTRL_IX_0.
656 	 */
657 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
658 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
659 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
660 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
661 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
662 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
663 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
664 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
665 
666 	hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
667 
668 	return 0;
669 }
670 
671 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
672 					     qdf_shared_mem_t *shared_mem,
673 					     void *cpu_addr,
674 					     qdf_dma_addr_t dma_addr,
675 					     uint32_t size)
676 {
677 	qdf_dma_addr_t paddr;
678 	int ret;
679 
680 	shared_mem->vaddr = cpu_addr;
681 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
682 	*qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
683 
684 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
685 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
686 
687 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
688 				      shared_mem->vaddr, dma_addr, size);
689 	if (ret) {
690 		dp_err("Unable to get DMA sgtable");
691 		return QDF_STATUS_E_NOMEM;
692 	}
693 
694 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
695 
696 	return QDF_STATUS_SUCCESS;
697 }
698 
699 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
700 {
701 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
702 	struct dp_pdev *pdev =
703 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
704 	struct dp_ipa_resources *ipa_res;
705 
706 	if (!pdev) {
707 		dp_err("Invalid instance");
708 		return QDF_STATUS_E_FAILURE;
709 	}
710 
711 	ipa_res = &pdev->ipa_resource;
712 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
713 		return QDF_STATUS_SUCCESS;
714 
715 	ipa_res->tx_num_alloc_buffer =
716 		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
717 
718 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
719 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
720 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
721 				   soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
722 
723 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
724 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
725 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
726 				   soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
727 
728 	dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
729 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
730 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
731 				   soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
732 
733 	dp_ipa_get_shared_mem_info(
734 			soc->osdev, &ipa_res->rx_refill_ring,
735 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
736 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
737 			soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
738 
739 	if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) ||
740 	    !qdf_mem_get_dma_addr(soc->osdev,
741 				  &ipa_res->tx_comp_ring.mem_info) ||
742 	    !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) ||
743 	    !qdf_mem_get_dma_addr(soc->osdev,
744 				  &ipa_res->rx_refill_ring.mem_info))
745 		return QDF_STATUS_E_FAILURE;
746 
747 	return QDF_STATUS_SUCCESS;
748 }
749 
750 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
751 {
752 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
753 	struct dp_pdev *pdev =
754 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
755 	struct dp_ipa_resources *ipa_res;
756 	struct hal_srng *wbm_srng = (struct hal_srng *)
757 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
758 	struct hal_srng *reo_srng = (struct hal_srng *)
759 			soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
760 	uint32_t tx_comp_doorbell_dmaaddr;
761 	uint32_t rx_ready_doorbell_dmaaddr;
762 
763 	if (!pdev) {
764 		dp_err("Invalid instance");
765 		return QDF_STATUS_E_FAILURE;
766 	}
767 
768 	ipa_res = &pdev->ipa_resource;
769 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
770 		return QDF_STATUS_SUCCESS;
771 
772 	if (ipa_res->is_db_ddr_mapped)
773 		ipa_res->tx_comp_doorbell_vaddr =
774 				phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
775 	else
776 		ipa_res->tx_comp_doorbell_vaddr =
777 				ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
778 
779 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
780 		pld_smmu_map(soc->osdev->dev, ipa_res->tx_comp_doorbell_paddr,
781 			     &tx_comp_doorbell_dmaaddr, sizeof(uint32_t));
782 		ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
783 
784 		pld_smmu_map(soc->osdev->dev, ipa_res->rx_ready_doorbell_paddr,
785 			     &rx_ready_doorbell_dmaaddr, sizeof(uint32_t));
786 		ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
787 	}
788 
789 	hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr);
790 
791 	dp_info("paddr %pK vaddr %pK",
792 		(void *)ipa_res->tx_comp_doorbell_paddr,
793 		(void *)ipa_res->tx_comp_doorbell_vaddr);
794 
795 	/*
796 	 * For RX, REO module on Napier/Hastings does reordering on incoming
797 	 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
798 	 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
799 	 * to IPA.
800 	 * Set the doorbell addr for the REO ring.
801 	 */
802 	hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr);
803 	return QDF_STATUS_SUCCESS;
804 }
805 
806 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
807 			      uint8_t *op_msg)
808 {
809 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
810 	struct dp_pdev *pdev =
811 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
812 
813 	if (!pdev) {
814 		dp_err("Invalid instance");
815 		return QDF_STATUS_E_FAILURE;
816 	}
817 
818 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
819 		return QDF_STATUS_SUCCESS;
820 
821 	if (pdev->ipa_uc_op_cb) {
822 		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
823 	} else {
824 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
825 		    "%s: IPA callback function is not registered", __func__);
826 		qdf_mem_free(op_msg);
827 		return QDF_STATUS_E_FAILURE;
828 	}
829 
830 	return QDF_STATUS_SUCCESS;
831 }
832 
833 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
834 				 ipa_uc_op_cb_type op_cb,
835 				 void *usr_ctxt)
836 {
837 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
838 	struct dp_pdev *pdev =
839 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
840 
841 	if (!pdev) {
842 		dp_err("Invalid instance");
843 		return QDF_STATUS_E_FAILURE;
844 	}
845 
846 	if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
847 		return QDF_STATUS_SUCCESS;
848 
849 	pdev->ipa_uc_op_cb = op_cb;
850 	pdev->usr_ctxt = usr_ctxt;
851 
852 	return QDF_STATUS_SUCCESS;
853 }
854 
855 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
856 {
857 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
858 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
859 
860 	if (!pdev) {
861 		dp_err("Invalid instance");
862 		return;
863 	}
864 
865 	dp_debug("Deregister OP handler callback");
866 	pdev->ipa_uc_op_cb = NULL;
867 	pdev->usr_ctxt = NULL;
868 }
869 
870 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
871 {
872 	/* TBD */
873 	return QDF_STATUS_SUCCESS;
874 }
875 
876 /**
877  * dp_tx_send_ipa_data_frame() - send IPA data frame
878  * @soc_hdl: datapath soc handle
879  * @vdev_id: id of the virtual device
880  * @skb: skb to transmit
881  *
882  * Return: skb/ NULL is for success
883  */
884 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
885 				     qdf_nbuf_t skb)
886 {
887 	qdf_nbuf_t ret;
888 
889 	/* Terminate the (single-element) list of tx frames */
890 	qdf_nbuf_set_next(skb, NULL);
891 	ret = dp_tx_send(soc_hdl, vdev_id, skb);
892 	if (ret) {
893 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
894 			  "%s: Failed to tx", __func__);
895 		return ret;
896 	}
897 
898 	return NULL;
899 }
900 
901 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
902 {
903 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
904 	struct dp_pdev *pdev =
905 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
906 	uint32_t ix0;
907 	uint32_t ix2;
908 
909 	if (!pdev) {
910 		dp_err("Invalid instance");
911 		return QDF_STATUS_E_FAILURE;
912 	}
913 
914 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
915 		return QDF_STATUS_SUCCESS;
916 
917 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
918 		return QDF_STATUS_E_AGAIN;
919 
920 	/* Call HAL API to remap REO rings to REO2IPA ring */
921 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
922 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) |
923 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 2) |
924 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) |
925 	      HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) |
926 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
927 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
928 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
929 
930 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
931 		ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) |
932 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) |
933 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
934 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
935 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) |
936 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
937 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) |
938 		      HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
939 
940 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
941 					   &ix2, &ix2);
942 		dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
943 	} else {
944 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
945 					   NULL, NULL);
946 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
947 	}
948 
949 	return QDF_STATUS_SUCCESS;
950 }
951 
952 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
953 {
954 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
955 	struct dp_pdev *pdev =
956 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
957 	uint32_t ix0;
958 	uint32_t ix2;
959 	uint32_t ix3;
960 
961 	if (!pdev) {
962 		dp_err("Invalid instance");
963 		return QDF_STATUS_E_FAILURE;
964 	}
965 
966 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
967 		return QDF_STATUS_SUCCESS;
968 
969 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
970 		return QDF_STATUS_E_AGAIN;
971 
972 	/* Call HAL API to remap REO rings to REO2IPA ring */
973 	ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) |
974 	      HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) |
975 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) |
976 	      HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) |
977 	      HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) |
978 	      HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) |
979 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) |
980 	      HAL_REO_REMAP_IX0(REO_REMAP_FW, 7);
981 
982 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
983 		dp_reo_remap_config(soc, &ix2, &ix3);
984 
985 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
986 					   &ix2, &ix3);
987 		dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
988 	} else {
989 		hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
990 					   NULL, NULL);
991 		dp_ipa_reo_remap_history_add(ix0, 0, 0);
992 	}
993 
994 	return QDF_STATUS_SUCCESS;
995 }
996 
997 /* This should be configurable per H/W configuration enable status */
998 #define L3_HEADER_PADDING	2
999 
1000 #ifdef CONFIG_IPA_WDI_UNIFIED_API
1001 
1002 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
1003 static inline void dp_setup_mcc_sys_pipes(
1004 		qdf_ipa_sys_connect_params_t *sys_in,
1005 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
1006 {
1007 	/* Setup MCC sys pipe */
1008 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
1009 			DP_IPA_MAX_IFACE;
1010 	for (int i = 0; i < DP_IPA_MAX_IFACE; i++)
1011 		memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
1012 		       &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
1013 }
1014 #else
1015 static inline void dp_setup_mcc_sys_pipes(
1016 		qdf_ipa_sys_connect_params_t *sys_in,
1017 		qdf_ipa_wdi_conn_in_params_t *pipe_in)
1018 {
1019 	QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
1020 }
1021 #endif
1022 
1023 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
1024 				 struct dp_ipa_resources *ipa_res,
1025 				 qdf_ipa_wdi_pipe_setup_info_t *tx,
1026 				 bool over_gsi)
1027 {
1028 	struct tcl_data_cmd *tcl_desc_ptr;
1029 	uint8_t *desc_addr;
1030 	uint32_t desc_size;
1031 
1032 	if (over_gsi)
1033 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
1034 	else
1035 		QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1036 
1037 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1038 		qdf_mem_get_dma_addr(soc->osdev,
1039 				     &ipa_res->tx_comp_ring.mem_info);
1040 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1041 		qdf_mem_get_dma_size(soc->osdev,
1042 				     &ipa_res->tx_comp_ring.mem_info);
1043 
1044 	/* WBM Tail Pointer Address */
1045 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1046 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1047 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
1048 
1049 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1050 		qdf_mem_get_dma_addr(soc->osdev,
1051 				     &ipa_res->tx_ring.mem_info);
1052 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
1053 		qdf_mem_get_dma_size(soc->osdev,
1054 				     &ipa_res->tx_ring.mem_info);
1055 
1056 	/* TCL Head Pointer Address */
1057 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1058 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1059 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
1060 
1061 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1062 		ipa_res->tx_num_alloc_buffer;
1063 
1064 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1065 
1066 	/* Preprogram TCL descriptor */
1067 	desc_addr =
1068 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1069 	desc_size = sizeof(struct tcl_data_cmd);
1070 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1071 	tcl_desc_ptr = (struct tcl_data_cmd *)
1072 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1073 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1074 		HAL_RX_BUF_RBM_SW2_BM;
1075 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1076 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1077 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1078 }
1079 
1080 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
1081 				 struct dp_ipa_resources *ipa_res,
1082 				 qdf_ipa_wdi_pipe_setup_info_t *rx,
1083 				 bool over_gsi)
1084 {
1085 	if (over_gsi)
1086 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1087 					IPA_CLIENT_WLAN2_PROD;
1088 	else
1089 		QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
1090 					IPA_CLIENT_WLAN1_PROD;
1091 
1092 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1093 		qdf_mem_get_dma_addr(soc->osdev,
1094 				     &ipa_res->rx_rdy_ring.mem_info);
1095 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1096 		qdf_mem_get_dma_size(soc->osdev,
1097 				     &ipa_res->rx_rdy_ring.mem_info);
1098 
1099 	/* REO Tail Pointer Address */
1100 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1101 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1102 	QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
1103 
1104 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1105 		qdf_mem_get_dma_addr(soc->osdev,
1106 				     &ipa_res->rx_refill_ring.mem_info);
1107 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1108 		qdf_mem_get_dma_size(soc->osdev,
1109 				     &ipa_res->rx_refill_ring.mem_info);
1110 
1111 	/* FW Head Pointer Address */
1112 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1113 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1114 	QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
1115 
1116 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
1117 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1118 }
1119 
1120 static void
1121 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
1122 			  struct dp_ipa_resources *ipa_res,
1123 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
1124 			  bool over_gsi)
1125 {
1126 	struct tcl_data_cmd *tcl_desc_ptr;
1127 	uint8_t *desc_addr;
1128 	uint32_t desc_size;
1129 
1130 	if (over_gsi)
1131 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1132 			IPA_CLIENT_WLAN2_CONS;
1133 	else
1134 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
1135 			IPA_CLIENT_WLAN1_CONS;
1136 
1137 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
1138 		     &ipa_res->tx_comp_ring.sgtable,
1139 		     sizeof(sgtable_t));
1140 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
1141 		qdf_mem_get_dma_size(soc->osdev,
1142 				     &ipa_res->tx_comp_ring.mem_info);
1143 	/* WBM Tail Pointer Address */
1144 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
1145 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1146 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
1147 
1148 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
1149 		     &ipa_res->tx_ring.sgtable,
1150 		     sizeof(sgtable_t));
1151 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
1152 		qdf_mem_get_dma_size(soc->osdev,
1153 				     &ipa_res->tx_ring.mem_info);
1154 	/* TCL Head Pointer Address */
1155 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
1156 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1157 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
1158 
1159 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
1160 		ipa_res->tx_num_alloc_buffer;
1161 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
1162 
1163 	/* Preprogram TCL descriptor */
1164 	desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(
1165 			tx_smmu);
1166 	desc_size = sizeof(struct tcl_data_cmd);
1167 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1168 	tcl_desc_ptr = (struct tcl_data_cmd *)
1169 		(QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1);
1170 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1171 		HAL_RX_BUF_RBM_SW2_BM;
1172 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1173 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1174 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1175 }
1176 
1177 static void
1178 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
1179 			  struct dp_ipa_resources *ipa_res,
1180 			  qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
1181 			  bool over_gsi)
1182 {
1183 	if (over_gsi)
1184 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1185 					IPA_CLIENT_WLAN2_PROD;
1186 	else
1187 		QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
1188 					IPA_CLIENT_WLAN1_PROD;
1189 
1190 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
1191 		     &ipa_res->rx_rdy_ring.sgtable,
1192 		     sizeof(sgtable_t));
1193 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
1194 		qdf_mem_get_dma_size(soc->osdev,
1195 				     &ipa_res->rx_rdy_ring.mem_info);
1196 	/* REO Tail Pointer Address */
1197 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
1198 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1199 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
1200 
1201 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
1202 		     &ipa_res->rx_refill_ring.sgtable,
1203 		     sizeof(sgtable_t));
1204 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
1205 		qdf_mem_get_dma_size(soc->osdev,
1206 				     &ipa_res->rx_refill_ring.mem_info);
1207 
1208 	/* FW Head Pointer Address */
1209 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
1210 		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1211 	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
1212 
1213 	QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
1214 		RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
1215 }
1216 
1217 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1218 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1219 			void *ipa_wdi_meter_notifier_cb,
1220 			uint32_t ipa_desc_size, void *ipa_priv,
1221 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1222 			uint32_t *rx_pipe_handle, bool is_smmu_enabled,
1223 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi)
1224 {
1225 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1226 	struct dp_pdev *pdev =
1227 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1228 	struct dp_ipa_resources *ipa_res;
1229 	qdf_ipa_ep_cfg_t *tx_cfg;
1230 	qdf_ipa_ep_cfg_t *rx_cfg;
1231 	qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
1232 	qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
1233 	qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
1234 	qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu;
1235 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1236 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1237 	int ret;
1238 
1239 	if (!pdev) {
1240 		dp_err("Invalid instance");
1241 		return QDF_STATUS_E_FAILURE;
1242 	}
1243 
1244 	ipa_res = &pdev->ipa_resource;
1245 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1246 		return QDF_STATUS_SUCCESS;
1247 
1248 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1249 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1250 
1251 	if (is_smmu_enabled)
1252 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
1253 	else
1254 		QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
1255 
1256 	dp_setup_mcc_sys_pipes(sys_in, &pipe_in);
1257 
1258 	/* TX PIPE */
1259 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1260 		tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in);
1261 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
1262 	} else {
1263 		tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1264 		tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
1265 	}
1266 
1267 	QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
1268 	QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1269 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
1270 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
1271 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
1272 	QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
1273 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
1274 
1275 	/**
1276 	 * Transfer Ring: WBM Ring
1277 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1278 	 * Event Ring: TCL ring
1279 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1280 	 */
1281 	if (is_smmu_enabled)
1282 		dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi);
1283 	else
1284 		dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
1285 
1286 	/* RX PIPE */
1287 	if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
1288 		rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in);
1289 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
1290 	} else {
1291 		rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1292 		rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
1293 	}
1294 
1295 	QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
1296 	QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1297 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
1298 	QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
1299 	QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
1300 	QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
1301 	QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
1302 	QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
1303 	QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
1304 
1305 	/**
1306 	 * Transfer Ring: REO Ring
1307 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1308 	 * Event Ring: FW ring
1309 	 * Event Ring Doorbell PA: FW Head Pointer Address
1310 	 */
1311 	if (is_smmu_enabled)
1312 		dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi);
1313 	else
1314 		dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
1315 
1316 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1317 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1318 
1319 	/* Connect WDI IPA PIPEs */
1320 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1321 
1322 	if (ret) {
1323 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1324 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1325 			  __func__, ret);
1326 		return QDF_STATUS_E_FAILURE;
1327 	}
1328 
1329 	/* IPA uC Doorbell registers */
1330 	dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
1331 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1332 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1333 
1334 	ipa_res->tx_comp_doorbell_paddr =
1335 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1336 	ipa_res->rx_ready_doorbell_paddr =
1337 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1338 
1339 	ipa_res->is_db_ddr_mapped =
1340 		QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out);
1341 
1342 	soc->ipa_first_tx_db_access = true;
1343 
1344 	return QDF_STATUS_SUCCESS;
1345 }
1346 
1347 /**
1348  * dp_ipa_setup_iface() - Setup IPA header and register interface
1349  * @ifname: Interface name
1350  * @mac_addr: Interface MAC address
1351  * @prod_client: IPA prod client type
1352  * @cons_client: IPA cons client type
1353  * @session_id: Session ID
1354  * @is_ipv6_enabled: Is IPV6 enabled or not
1355  *
1356  * Return: QDF_STATUS
1357  */
1358 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1359 			      qdf_ipa_client_type_t prod_client,
1360 			      qdf_ipa_client_type_t cons_client,
1361 			      uint8_t session_id, bool is_ipv6_enabled)
1362 {
1363 	qdf_ipa_wdi_reg_intf_in_params_t in;
1364 	qdf_ipa_wdi_hdr_info_t hdr_info;
1365 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1366 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1367 	int ret = -EINVAL;
1368 
1369 	dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname,
1370 		 QDF_MAC_ADDR_REF(mac_addr));
1371 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1372 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1373 
1374 	/* IPV4 header */
1375 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1376 
1377 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1378 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1379 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1380 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1381 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1382 
1383 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1384 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1385 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1386 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
1387 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1388 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1389 		htonl(session_id << 16);
1390 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1391 
1392 	/* IPV6 header */
1393 	if (is_ipv6_enabled) {
1394 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1395 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1396 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1397 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1398 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1399 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1400 	}
1401 
1402 	dp_debug("registering for session_id: %u", session_id);
1403 
1404 	ret = qdf_ipa_wdi_reg_intf(&in);
1405 
1406 	if (ret) {
1407 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1408 		    "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1409 		    __func__, ret);
1410 		return QDF_STATUS_E_FAILURE;
1411 	}
1412 
1413 	return QDF_STATUS_SUCCESS;
1414 }
1415 
1416 #else /* CONFIG_IPA_WDI_UNIFIED_API */
1417 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1418 			void *ipa_i2w_cb, void *ipa_w2i_cb,
1419 			void *ipa_wdi_meter_notifier_cb,
1420 			uint32_t ipa_desc_size, void *ipa_priv,
1421 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
1422 			uint32_t *rx_pipe_handle)
1423 {
1424 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1425 	struct dp_pdev *pdev =
1426 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1427 	struct dp_ipa_resources *ipa_res;
1428 	qdf_ipa_wdi_pipe_setup_info_t *tx;
1429 	qdf_ipa_wdi_pipe_setup_info_t *rx;
1430 	qdf_ipa_wdi_conn_in_params_t pipe_in;
1431 	qdf_ipa_wdi_conn_out_params_t pipe_out;
1432 	struct tcl_data_cmd *tcl_desc_ptr;
1433 	uint8_t *desc_addr;
1434 	uint32_t desc_size;
1435 	int ret;
1436 
1437 	if (!pdev) {
1438 		dp_err("Invalid instance");
1439 		return QDF_STATUS_E_FAILURE;
1440 	}
1441 
1442 	ipa_res = &pdev->ipa_resource;
1443 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1444 		return QDF_STATUS_SUCCESS;
1445 
1446 	qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1447 	qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
1448 	qdf_mem_zero(&pipe_in, sizeof(pipe_in));
1449 	qdf_mem_zero(&pipe_out, sizeof(pipe_out));
1450 
1451 	/* TX PIPE */
1452 	/**
1453 	 * Transfer Ring: WBM Ring
1454 	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
1455 	 * Event Ring: TCL ring
1456 	 * Event Ring Doorbell PA: TCL Head Pointer Address
1457 	 */
1458 	tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
1459 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
1460 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1461 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
1462 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
1463 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
1464 	QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
1465 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
1466 	QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
1467 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
1468 		ipa_res->tx_comp_ring_base_paddr;
1469 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
1470 		ipa_res->tx_comp_ring_size;
1471 	/* WBM Tail Pointer Address */
1472 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
1473 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
1474 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
1475 		ipa_res->tx_ring_base_paddr;
1476 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
1477 	/* TCL Head Pointer Address */
1478 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
1479 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
1480 	QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
1481 		ipa_res->tx_num_alloc_buffer;
1482 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
1483 
1484 	/* Preprogram TCL descriptor */
1485 	desc_addr =
1486 		(uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
1487 	desc_size = sizeof(struct tcl_data_cmd);
1488 	HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
1489 	tcl_desc_ptr = (struct tcl_data_cmd *)
1490 		(QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
1491 	tcl_desc_ptr->buf_addr_info.return_buffer_manager =
1492 						HAL_RX_BUF_RBM_SW2_BM;
1493 	tcl_desc_ptr->addrx_en = 1;	/* Address X search enable in ASE */
1494 	tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
1495 	tcl_desc_ptr->packet_offset = 2;	/* padding for alignment */
1496 
1497 	/* RX PIPE */
1498 	/**
1499 	 * Transfer Ring: REO Ring
1500 	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
1501 	 * Event Ring: FW ring
1502 	 * Event Ring Doorbell PA: FW Head Pointer Address
1503 	 */
1504 	rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
1505 	QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
1506 	QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
1507 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
1508 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
1509 	QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
1510 	QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
1511 	QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
1512 	QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
1513 	QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
1514 	QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
1515 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
1516 						ipa_res->rx_rdy_ring_base_paddr;
1517 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
1518 						ipa_res->rx_rdy_ring_size;
1519 	/* REO Tail Pointer Address */
1520 	QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
1521 					soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
1522 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
1523 					ipa_res->rx_refill_ring_base_paddr;
1524 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
1525 						ipa_res->rx_refill_ring_size;
1526 	/* FW Head Pointer Address */
1527 	QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
1528 				soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
1529 	QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN +
1530 						L3_HEADER_PADDING;
1531 	QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
1532 	QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
1533 
1534 	/* Connect WDI IPA PIPE */
1535 	ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
1536 	if (ret) {
1537 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1538 			  "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
1539 			  __func__, ret);
1540 		return QDF_STATUS_E_FAILURE;
1541 	}
1542 
1543 	/* IPA uC Doorbell registers */
1544 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1545 		  "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
1546 		  __func__,
1547 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
1548 		(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
1549 
1550 	ipa_res->tx_comp_doorbell_paddr =
1551 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
1552 	ipa_res->tx_comp_doorbell_vaddr =
1553 		QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
1554 	ipa_res->rx_ready_doorbell_paddr =
1555 		QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
1556 
1557 	soc->ipa_first_tx_db_access = true;
1558 
1559 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1560 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1561 		  __func__,
1562 		  "transfer_ring_base_pa",
1563 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
1564 		  "transfer_ring_size",
1565 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
1566 		  "transfer_ring_doorbell_pa",
1567 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
1568 		  "event_ring_base_pa",
1569 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
1570 		  "event_ring_size",
1571 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
1572 		  "event_ring_doorbell_pa",
1573 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
1574 		  "num_pkt_buffers",
1575 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
1576 		  "tx_comp_doorbell_paddr",
1577 		  (void *)ipa_res->tx_comp_doorbell_paddr);
1578 
1579 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1580 		  "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
1581 		  __func__,
1582 		  "transfer_ring_base_pa",
1583 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
1584 		  "transfer_ring_size",
1585 		  QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
1586 		  "transfer_ring_doorbell_pa",
1587 		  (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
1588 		  "event_ring_base_pa",
1589 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
1590 		  "event_ring_size",
1591 		  QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
1592 		  "event_ring_doorbell_pa",
1593 		  (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
1594 		  "num_pkt_buffers",
1595 		  QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
1596 		  "tx_comp_doorbell_paddr",
1597 		  (void *)ipa_res->rx_ready_doorbell_paddr);
1598 
1599 	return QDF_STATUS_SUCCESS;
1600 }
1601 
1602 /**
1603  * dp_ipa_setup_iface() - Setup IPA header and register interface
1604  * @ifname: Interface name
1605  * @mac_addr: Interface MAC address
1606  * @prod_client: IPA prod client type
1607  * @cons_client: IPA cons client type
1608  * @session_id: Session ID
1609  * @is_ipv6_enabled: Is IPV6 enabled or not
1610  *
1611  * Return: QDF_STATUS
1612  */
1613 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
1614 			      qdf_ipa_client_type_t prod_client,
1615 			      qdf_ipa_client_type_t cons_client,
1616 			      uint8_t session_id, bool is_ipv6_enabled)
1617 {
1618 	qdf_ipa_wdi_reg_intf_in_params_t in;
1619 	qdf_ipa_wdi_hdr_info_t hdr_info;
1620 	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
1621 	struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
1622 	int ret = -EINVAL;
1623 
1624 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1625 		  "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT,
1626 		  __func__, ifname, QDF_MAC_ADDR_REF(mac_addr));
1627 
1628 	qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1629 	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
1630 
1631 	/* IPV4 header */
1632 	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
1633 
1634 	QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
1635 	QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1636 	QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
1637 	QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
1638 		DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
1639 
1640 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
1641 	qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
1642 		     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1643 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
1644 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
1645 		htonl(session_id << 16);
1646 	QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
1647 
1648 	/* IPV6 header */
1649 	if (is_ipv6_enabled) {
1650 		qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
1651 			     DP_IPA_UC_WLAN_TX_HDR_LEN);
1652 		uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
1653 		QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
1654 		qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
1655 			     &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
1656 	}
1657 
1658 	ret = qdf_ipa_wdi_reg_intf(&in);
1659 	if (ret) {
1660 		dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d",
1661 		       ret);
1662 		return QDF_STATUS_E_FAILURE;
1663 	}
1664 
1665 	return QDF_STATUS_SUCCESS;
1666 }
1667 
1668 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
1669 
1670 /**
1671  * dp_ipa_cleanup() - Disconnect IPA pipes
1672  * @soc_hdl: dp soc handle
1673  * @pdev_id: dp pdev id
1674  * @tx_pipe_handle: Tx pipe handle
1675  * @rx_pipe_handle: Rx pipe handle
1676  *
1677  * Return: QDF_STATUS
1678  */
1679 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1680 			  uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
1681 {
1682 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1683 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1684 	struct dp_ipa_resources *ipa_res;
1685 	struct dp_pdev *pdev;
1686 	int ret;
1687 
1688 	ret = qdf_ipa_wdi_disconn_pipes();
1689 	if (ret) {
1690 		dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
1691 		       ret);
1692 		status = QDF_STATUS_E_FAILURE;
1693 	}
1694 
1695 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1696 	if (qdf_unlikely(!pdev)) {
1697 		dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
1698 		status = QDF_STATUS_E_FAILURE;
1699 		goto exit;
1700 	}
1701 
1702 	if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
1703 		ipa_res = &pdev->ipa_resource;
1704 
1705 		/* unmap has to be the reverse order of smmu map */
1706 		ret = pld_smmu_unmap(soc->osdev->dev,
1707 				     ipa_res->rx_ready_doorbell_paddr,
1708 				     sizeof(uint32_t));
1709 		if (ret)
1710 			dp_err_rl("IPA RX DB smmu unmap failed");
1711 
1712 		ret = pld_smmu_unmap(soc->osdev->dev,
1713 				     ipa_res->tx_comp_doorbell_paddr,
1714 				     sizeof(uint32_t));
1715 		if (ret)
1716 			dp_err_rl("IPA TX DB smmu unmap failed");
1717 	}
1718 
1719 exit:
1720 	return status;
1721 }
1722 
1723 /**
1724  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
1725  * @ifname: Interface name
1726  * @is_ipv6_enabled: Is IPV6 enabled or not
1727  *
1728  * Return: QDF_STATUS
1729  */
1730 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
1731 {
1732 	int ret;
1733 
1734 	ret = qdf_ipa_wdi_dereg_intf(ifname);
1735 	if (ret) {
1736 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1737 			  "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
1738 			  __func__, ret);
1739 		return QDF_STATUS_E_FAILURE;
1740 	}
1741 
1742 	return QDF_STATUS_SUCCESS;
1743 }
1744 
1745 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1746 {
1747 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1748 	struct dp_pdev *pdev =
1749 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1750 	struct hal_srng *wbm_srng = (struct hal_srng *)
1751 			soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1752 	struct dp_ipa_resources *ipa_res;
1753 	QDF_STATUS result;
1754 
1755 	if (!pdev) {
1756 		dp_err("Invalid instance");
1757 		return QDF_STATUS_E_FAILURE;
1758 	}
1759 
1760 	ipa_res = &pdev->ipa_resource;
1761 
1762 	qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
1763 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true);
1764 
1765 	result = qdf_ipa_wdi_enable_pipes();
1766 	if (result) {
1767 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1768 			  "%s: Enable WDI PIPE fail, code %d",
1769 			  __func__, result);
1770 		qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1771 		dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1772 		return QDF_STATUS_E_FAILURE;
1773 	}
1774 
1775 	if (soc->ipa_first_tx_db_access) {
1776 		hal_srng_dst_init_hp(
1777 			soc->hal_soc, wbm_srng,
1778 			ipa_res->tx_comp_doorbell_vaddr);
1779 		soc->ipa_first_tx_db_access = false;
1780 	}
1781 
1782 	return QDF_STATUS_SUCCESS;
1783 }
1784 
1785 #ifdef DEVICE_FORCE_WAKE_ENABLED
1786 /*
1787  * dp_ipa_get_tx_comp_pending_check() - Check if tx completions are pending.
1788  * @soc: DP pdev Context
1789  *
1790  * Ring full condition is checked to find if buffers are left for
1791  * processing as host only allocates buffers in this ring and IPA HW processes
1792  * the buffer.
1793  *
1794  * Return: True if tx completions are pending
1795  */
1796 static bool dp_ipa_get_tx_comp_pending_check(struct dp_soc *soc)
1797 {
1798 	struct dp_srng *tx_comp_ring =
1799 				&soc->tx_comp_ring[IPA_TX_COMP_RING_IDX];
1800 	uint32_t hp, tp, entry_size, buf_cnt;
1801 
1802 	hal_get_hw_hptp(soc->hal_soc, tx_comp_ring->hal_srng, &hp, &tp,
1803 			WBM2SW_RELEASE);
1804 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM2SW_RELEASE) >> 2;
1805 
1806 	if (hp > tp)
1807 		buf_cnt = (hp - tp) / entry_size;
1808 	else
1809 		buf_cnt = (tx_comp_ring->num_entries - tp + hp) / entry_size;
1810 
1811 	return (soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt != buf_cnt);
1812 }
1813 #endif
1814 
1815 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1816 {
1817 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1818 	struct dp_pdev *pdev =
1819 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1820 	int timeout = TX_COMP_DRAIN_WAIT_TIMEOUT_MS;
1821 	QDF_STATUS result;
1822 
1823 	if (!pdev) {
1824 		dp_err("Invalid instance");
1825 		return QDF_STATUS_E_FAILURE;
1826 	}
1827 
1828 	/*
1829 	 * The tx completions pending check will trigger register read
1830 	 * for HP and TP of wbm2sw2 ring. There is a possibility for
1831 	 * these reg read to cause a NOC error if UMAC is in low power
1832 	 * state. The WAR is to sleep for the drain timeout without checking
1833 	 * for the pending tx completions. This WAR can be replaced with
1834 	 * poll logic for HP/TP difference once force wake is in place.
1835 	 */
1836 #ifdef DEVICE_FORCE_WAKE_ENABLED
1837 	while (dp_ipa_get_tx_comp_pending_check(soc)) {
1838 		qdf_sleep(TX_COMP_DRAIN_WAIT_MS);
1839 		timeout -= TX_COMP_DRAIN_WAIT_MS;
1840 		if (timeout <= 0) {
1841 			dp_err("Tx completions pending. Force Disabling pipes");
1842 			break;
1843 		}
1844 	}
1845 #else
1846 	qdf_sleep(timeout);
1847 #endif
1848 
1849 	result = qdf_ipa_wdi_disable_pipes();
1850 	if (result) {
1851 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1852 			  "%s: Disable WDI PIPE fail, code %d",
1853 			  __func__, result);
1854 		qdf_assert_always(0);
1855 		return QDF_STATUS_E_FAILURE;
1856 	}
1857 
1858 	qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
1859 	dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false);
1860 
1861 	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
1862 }
1863 
1864 /**
1865  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
1866  * @client: Client type
1867  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
1868  *
1869  * Return: QDF_STATUS
1870  */
1871 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
1872 {
1873 	qdf_ipa_wdi_perf_profile_t profile;
1874 	QDF_STATUS result;
1875 
1876 	profile.client = client;
1877 	profile.max_supported_bw_mbps = max_supported_bw_mbps;
1878 
1879 	result = qdf_ipa_wdi_set_perf_profile(&profile);
1880 	if (result) {
1881 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1882 			  "%s: ipa_wdi_set_perf_profile fail, code %d",
1883 			  __func__, result);
1884 		return QDF_STATUS_E_FAILURE;
1885 	}
1886 
1887 	return QDF_STATUS_SUCCESS;
1888 }
1889 
1890 /**
1891  * dp_ipa_intrabss_send - send IPA RX intra-bss frames
1892  * @pdev: pdev
1893  * @vdev: vdev
1894  * @nbuf: skb
1895  *
1896  * Return: nbuf if TX fails and NULL if TX succeeds
1897  */
1898 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
1899 				       struct dp_vdev *vdev,
1900 				       qdf_nbuf_t nbuf)
1901 {
1902 	struct dp_peer *vdev_peer;
1903 	uint16_t len;
1904 
1905 	vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA);
1906 	if (qdf_unlikely(!vdev_peer))
1907 		return nbuf;
1908 
1909 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1910 	len = qdf_nbuf_len(nbuf);
1911 
1912 	if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
1913 		DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
1914 		dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
1915 		return nbuf;
1916 	}
1917 
1918 	DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len);
1919 	dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
1920 	return NULL;
1921 }
1922 
1923 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1924 			    qdf_nbuf_t nbuf, bool *fwd_success)
1925 {
1926 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1927 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1928 						     DP_MOD_ID_IPA);
1929 	struct dp_pdev *pdev;
1930 	struct dp_peer *da_peer;
1931 	struct dp_peer *sa_peer;
1932 	qdf_nbuf_t nbuf_copy;
1933 	uint8_t da_is_bcmc;
1934 	struct ethhdr *eh;
1935 	bool status = false;
1936 
1937 	*fwd_success = false; /* set default as failure */
1938 
1939 	/*
1940 	 * WDI 3.0 skb->cb[] info from IPA driver
1941 	 * skb->cb[0] = vdev_id
1942 	 * skb->cb[1].bit#1 = da_is_bcmc
1943 	 */
1944 	da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
1945 
1946 	if (qdf_unlikely(!vdev))
1947 		return false;
1948 
1949 	pdev = vdev->pdev;
1950 	if (qdf_unlikely(!pdev))
1951 		goto out;
1952 
1953 	/* no fwd for station mode and just pass up to stack */
1954 	if (vdev->opmode == wlan_op_mode_sta)
1955 		goto out;
1956 
1957 	if (da_is_bcmc) {
1958 		nbuf_copy = qdf_nbuf_copy(nbuf);
1959 		if (!nbuf_copy)
1960 			goto out;
1961 
1962 		if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
1963 			qdf_nbuf_free(nbuf_copy);
1964 		else
1965 			*fwd_success = true;
1966 
1967 		/* return false to pass original pkt up to stack */
1968 		goto out;
1969 	}
1970 
1971 	eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
1972 
1973 	if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
1974 		goto out;
1975 
1976 	da_peer = dp_peer_find_hash_find(soc, eh->h_dest, 0, vdev->vdev_id,
1977 					 DP_MOD_ID_IPA);
1978 	if (!da_peer)
1979 		goto out;
1980 
1981 	dp_peer_unref_delete(da_peer, DP_MOD_ID_IPA);
1982 
1983 	sa_peer = dp_peer_find_hash_find(soc, eh->h_source, 0, vdev->vdev_id,
1984 					 DP_MOD_ID_IPA);
1985 	if (!sa_peer)
1986 		goto out;
1987 
1988 	dp_peer_unref_delete(sa_peer, DP_MOD_ID_IPA);
1989 
1990 	/*
1991 	 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
1992 	 * Need to add skb to internal tracking table to avoid nbuf memory
1993 	 * leak check for unallocated skb.
1994 	 */
1995 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
1996 
1997 	if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
1998 		qdf_nbuf_free(nbuf);
1999 	else
2000 		*fwd_success = true;
2001 
2002 	status = true;
2003 out:
2004 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
2005 	return status;
2006 }
2007 
2008 #ifdef MDM_PLATFORM
2009 bool dp_ipa_is_mdm_platform(void)
2010 {
2011 	return true;
2012 }
2013 #else
2014 bool dp_ipa_is_mdm_platform(void)
2015 {
2016 	return false;
2017 }
2018 #endif
2019 
2020 /**
2021  * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA
2022  * @soc: soc
2023  * @nbuf: source skb
2024  *
2025  * Return: new nbuf if success and otherwise NULL
2026  */
2027 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
2028 					     qdf_nbuf_t nbuf)
2029 {
2030 	uint8_t *src_nbuf_data;
2031 	uint8_t *dst_nbuf_data;
2032 	qdf_nbuf_t dst_nbuf;
2033 	qdf_nbuf_t temp_nbuf = nbuf;
2034 	uint32_t nbuf_len = qdf_nbuf_len(nbuf);
2035 	bool is_nbuf_head = true;
2036 	uint32_t copy_len = 0;
2037 
2038 	dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE,
2039 				  RX_BUFFER_RESERVATION,
2040 				  RX_DATA_BUFFER_ALIGNMENT, FALSE);
2041 
2042 	if (!dst_nbuf) {
2043 		dp_err_rl("nbuf allocate fail");
2044 		return NULL;
2045 	}
2046 
2047 	if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) {
2048 		qdf_nbuf_free(dst_nbuf);
2049 		dp_err_rl("nbuf is jumbo data");
2050 		return NULL;
2051 	}
2052 
2053 	/* prepeare to copy all data into new skb */
2054 	dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
2055 	while (temp_nbuf) {
2056 		src_nbuf_data = qdf_nbuf_data(temp_nbuf);
2057 		/* first head nbuf */
2058 		if (is_nbuf_head) {
2059 			qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
2060 				     RX_PKT_TLVS_LEN);
2061 			/* leave extra 2 bytes L3_HEADER_PADDING */
2062 			dst_nbuf_data += (RX_PKT_TLVS_LEN + L3_HEADER_PADDING);
2063 			src_nbuf_data += RX_PKT_TLVS_LEN;
2064 			copy_len = qdf_nbuf_headlen(temp_nbuf) -
2065 						RX_PKT_TLVS_LEN;
2066 			temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
2067 			is_nbuf_head = false;
2068 		} else {
2069 			copy_len = qdf_nbuf_len(temp_nbuf);
2070 			temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
2071 		}
2072 		qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
2073 		dst_nbuf_data += copy_len;
2074 	}
2075 
2076 	qdf_nbuf_set_len(dst_nbuf, nbuf_len);
2077 	/* copy is done, free original nbuf */
2078 	qdf_nbuf_free(nbuf);
2079 
2080 	return dst_nbuf;
2081 }
2082 
2083 /**
2084  * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
2085  * @soc: soc
2086  * @nbuf: skb
2087  *
2088  * Return: nbuf if success and otherwise NULL
2089  */
2090 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
2091 {
2092 
2093 	if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2094 		return nbuf;
2095 
2096 	/* WLAN IPA is run-time disabled */
2097 	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
2098 		return nbuf;
2099 
2100 	if (!qdf_nbuf_is_frag(nbuf))
2101 		return nbuf;
2102 
2103 	/* linearize skb for IPA */
2104 	return dp_ipa_frag_nbuf_linearize(soc, nbuf);
2105 }
2106 
2107 QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
2108 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2109 {
2110 	QDF_STATUS ret;
2111 
2112 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2113 	struct dp_pdev *pdev =
2114 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2115 
2116 	if (!pdev) {
2117 		dp_err("%s invalid instance", __func__);
2118 		return QDF_STATUS_E_FAILURE;
2119 	}
2120 
2121 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
2122 		dp_debug("SMMU S1 disabled");
2123 		return QDF_STATUS_SUCCESS;
2124 	}
2125 	ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true);
2126 
2127 	return ret;
2128 }
2129 
2130 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
2131 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2132 {
2133 	QDF_STATUS ret;
2134 
2135 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2136 	struct dp_pdev *pdev =
2137 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2138 
2139 	if (!pdev) {
2140 		dp_err("%s invalid instance", __func__);
2141 		return QDF_STATUS_E_FAILURE;
2142 	}
2143 
2144 	if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
2145 		dp_debug("SMMU S1 disabled");
2146 		return QDF_STATUS_SUCCESS;
2147 	}
2148 	ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, false);
2149 
2150 	return ret;
2151 }
2152 
2153 #endif
2154