xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.c (revision c62ef80144c85f126f4dd52e661336fccb8b56cf)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_utility.h>
21 #include <dp_internal.h>
22 #include "dp_rings.h"
23 #include <dp_htt.h>
24 #include "dp_be.h"
25 #include "dp_be_tx.h"
26 #include "dp_be_rx.h"
27 #ifdef WIFI_MONITOR_SUPPORT
28 #if !defined(DISABLE_MON_CONFIG) && (defined(WLAN_PKT_CAPTURE_TX_2_0) || \
29 	defined(WLAN_PKT_CAPTURE_RX_2_0))
30 #include "dp_mon_2.0.h"
31 #endif
32 #include "dp_mon.h"
33 #endif
34 #include <hal_be_api.h>
35 #ifdef WLAN_SUPPORT_PPEDS
36 #include "be/dp_ppeds.h"
37 #include <ppe_vp_public.h>
38 #include <ppe_drv_sc.h>
39 #endif
40 
41 #ifdef WLAN_SUPPORT_PPEDS
42 static const char *ring_usage_dump[RING_USAGE_MAX] = {
43 	"100%",
44 	"Greater than 90%",
45 	"70 to 90%",
46 	"50 to 70%",
47 	"Less than 50%"
48 };
49 #endif
50 
51 /* Generic AST entry aging timer value */
52 #define DP_AST_AGING_TIMER_DEFAULT_MS	5000
53 
54 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
55 #define DP_TX_VDEV_ID_CHECK_ENABLE 0
56 
57 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
58 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
59 	{1, 4, HAL_BE_WBM_SW4_BM_ID, 0},
60 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
61 #ifdef QCA_WIFI_KIWI_V2
62 	{3, 5, HAL_BE_WBM_SW5_BM_ID, 0},
63 	{4, 6, HAL_BE_WBM_SW6_BM_ID, 0}
64 #else
65 	{3, 6, HAL_BE_WBM_SW5_BM_ID, 0},
66 	{4, 7, HAL_BE_WBM_SW6_BM_ID, 0}
67 #endif
68 };
69 #else
70 #define DP_TX_VDEV_ID_CHECK_ENABLE 1
71 
72 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
73 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
74 	{1, 1, HAL_BE_WBM_SW1_BM_ID, 0},
75 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
76 	{3, 3, HAL_BE_WBM_SW3_BM_ID, 0},
77 	{4, 4, HAL_BE_WBM_SW4_BM_ID, 0}
78 };
79 #endif
80 
81 #ifdef WLAN_SUPPORT_PPEDS
82 static struct cdp_ppeds_txrx_ops dp_ops_ppeds_be = {
83 	.ppeds_entry_attach = dp_ppeds_attach_vdev_be,
84 	.ppeds_entry_detach = dp_ppeds_detach_vdev_be,
85 	.ppeds_set_int_pri2tid = dp_ppeds_set_int_pri2tid_be,
86 	.ppeds_update_int_pri2tid = dp_ppeds_update_int_pri2tid_be,
87 	.ppeds_entry_dump = dp_ppeds_dump_ppe_vp_tbl_be,
88 	.ppeds_enable_pri2tid = dp_ppeds_vdev_enable_pri2tid_be,
89 	.ppeds_vp_setup_recovery = dp_ppeds_vp_setup_on_fw_recovery,
90 	.ppeds_stats_sync = dp_ppeds_stats_sync_be,
91 };
92 
93 static void dp_ppeds_rings_status(struct dp_soc *soc)
94 {
95 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
96 
97 	dp_print_ring_stat_from_hal(soc, &be_soc->reo2ppe_ring, REO2PPE);
98 	dp_print_ring_stat_from_hal(soc, &be_soc->ppe2tcl_ring, PPE2TCL);
99 	dp_print_ring_stat_from_hal(soc, &be_soc->ppeds_wbm_release_ring,
100 				    WBM2SW_RELEASE);
101 }
102 
103 static void dp_ppeds_inuse_desc(struct dp_soc *soc)
104 {
105 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
106 
107 	DP_PRINT_STATS("PPE-DS Tx Descriptors in Use = %u num_free %u",
108 		       be_soc->ppeds_tx_desc.num_allocated,
109 		       be_soc->ppeds_tx_desc.num_free);
110 
111 	DP_PRINT_STATS("PPE-DS Tx desc alloc failed %u",
112 		       be_soc->ppeds_stats.tx.desc_alloc_failed);
113 }
114 
115 static void dp_ppeds_clear_stats(struct dp_soc *soc)
116 {
117 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
118 
119 	be_soc->ppeds_stats.tx.desc_alloc_failed = 0;
120 }
121 
122 static void dp_ppeds_rings_stats(struct dp_soc *soc)
123 {
124 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
125 	int i = 0;
126 
127 	DP_PRINT_STATS("Ring utilization statistics");
128 	DP_PRINT_STATS("WBM2SW_RELEASE");
129 
130 	for (i = 0; i < RING_USAGE_MAX; i++)
131 		DP_PRINT_STATS("\t %s utilized %d instances",
132 			       ring_usage_dump[i],
133 			       be_soc->ppeds_wbm_release_ring.stats.util[i]);
134 
135 	DP_PRINT_STATS("PPE2TCL");
136 
137 	for (i = 0; i < RING_USAGE_MAX; i++)
138 		DP_PRINT_STATS("\t %s utilized %d instances",
139 			       ring_usage_dump[i],
140 			       be_soc->ppe2tcl_ring.stats.util[i]);
141 }
142 
143 static void dp_ppeds_clear_rings_stats(struct dp_soc *soc)
144 {
145 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
146 
147 	memset(&be_soc->ppeds_wbm_release_ring.stats, 0,
148 	       sizeof(struct ring_util_stats));
149 	memset(&be_soc->ppe2tcl_ring.stats, 0, sizeof(struct ring_util_stats));
150 }
151 #endif
152 
153 static void dp_soc_cfg_attach_be(struct dp_soc *soc)
154 {
155 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
156 	dp_soc_cfg_attach(soc);
157 
158 	wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
159 
160 	soc->wlan_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
161 
162 	/* this is used only when dmac mode is enabled */
163 	soc->num_rx_refill_buf_rings = 1;
164 
165 	soc->wlan_cfg_ctx->notify_frame_support =
166 				DP_MARK_NOTIFY_FRAME_SUPPORT;
167 }
168 
169 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type)
170 {
171 	switch (context_type) {
172 	case DP_CONTEXT_TYPE_SOC:
173 		return sizeof(struct dp_soc_be);
174 	case DP_CONTEXT_TYPE_PDEV:
175 		return sizeof(struct dp_pdev_be);
176 	case DP_CONTEXT_TYPE_VDEV:
177 		return sizeof(struct dp_vdev_be);
178 	case DP_CONTEXT_TYPE_PEER:
179 		return sizeof(struct dp_peer_be);
180 	default:
181 		return 0;
182 	}
183 }
184 
185 #if defined(DP_FEATURE_HW_COOKIE_CONVERSION) || defined(WLAN_SUPPORT_RX_FISA)
186 static uint64_t dp_get_cmem_chunk(struct dp_soc *soc, uint64_t size,
187 				  enum CMEM_MEM_CLIENTS client)
188 {
189 	uint64_t cmem_chunk;
190 
191 	dp_info("cmem base 0x%llx, total size 0x%llx avail_size 0x%llx",
192 		soc->cmem_base, soc->cmem_total_size, soc->cmem_avail_size);
193 
194 	/* Check if requested cmem space is available */
195 	if (soc->cmem_avail_size < size) {
196 		dp_err("cmem_size 0x%llx bytes < requested size 0x%llx bytes",
197 		       soc->cmem_avail_size, size);
198 		return 0;
199 	}
200 
201 	cmem_chunk = soc->cmem_base +
202 		     (soc->cmem_total_size - soc->cmem_avail_size);
203 	soc->cmem_avail_size -= size;
204 	dp_info("Reserved cmem space 0x%llx, size 0x%llx for client %d",
205 		cmem_chunk, size, client);
206 
207 	return cmem_chunk;
208 }
209 #endif
210 
211 #ifdef WLAN_SUPPORT_RX_FISA
212 static uint64_t dp_get_fst_cmem_base_be(struct dp_soc *soc, uint64_t size)
213 {
214 	return dp_get_cmem_chunk(soc, size, FISA_FST);
215 }
216 
217 static void dp_initialize_arch_ops_be_fisa(struct dp_arch_ops *arch_ops)
218 {
219 	arch_ops->dp_get_fst_cmem_base = dp_get_fst_cmem_base_be;
220 }
221 #else
222 static void dp_initialize_arch_ops_be_fisa(struct dp_arch_ops *arch_ops)
223 {
224 }
225 #endif
226 
227 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
228 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
229 /**
230  * dp_cc_wbm_sw_en_cfg() - configure HW cookie conversion enablement
231  *			   per wbm2sw ring
232  *
233  * @cc_cfg: HAL HW cookie conversion configuration structure pointer
234  *
235  * Return: None
236  */
237 #ifdef IPA_OPT_WIFI_DP
238 static inline
239 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
240 {
241 	cc_cfg->wbm2sw6_cc_en = 1;
242 	cc_cfg->wbm2sw5_cc_en = 0;
243 	cc_cfg->wbm2sw4_cc_en = 1;
244 	cc_cfg->wbm2sw3_cc_en = 1;
245 	cc_cfg->wbm2sw2_cc_en = 1;
246 	/* disable wbm2sw1 hw cc as it's for FW */
247 	cc_cfg->wbm2sw1_cc_en = 0;
248 	cc_cfg->wbm2sw0_cc_en = 1;
249 	cc_cfg->wbm2fw_cc_en = 0;
250 }
251 #else
252 static inline
253 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
254 {
255 	cc_cfg->wbm2sw6_cc_en = 1;
256 	cc_cfg->wbm2sw5_cc_en = 1;
257 	cc_cfg->wbm2sw4_cc_en = 1;
258 	cc_cfg->wbm2sw3_cc_en = 1;
259 	cc_cfg->wbm2sw2_cc_en = 1;
260 	/* disable wbm2sw1 hw cc as it's for FW */
261 	cc_cfg->wbm2sw1_cc_en = 0;
262 	cc_cfg->wbm2sw0_cc_en = 1;
263 	cc_cfg->wbm2fw_cc_en = 0;
264 }
265 #endif
266 #else
267 static inline
268 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
269 {
270 	cc_cfg->wbm2sw6_cc_en = 1;
271 	cc_cfg->wbm2sw5_cc_en = 1;
272 	cc_cfg->wbm2sw4_cc_en = 1;
273 	cc_cfg->wbm2sw3_cc_en = 1;
274 	cc_cfg->wbm2sw2_cc_en = 1;
275 	cc_cfg->wbm2sw1_cc_en = 1;
276 	cc_cfg->wbm2sw0_cc_en = 1;
277 	cc_cfg->wbm2fw_cc_en = 0;
278 }
279 #endif
280 
281 /**
282  * dp_cc_reg_cfg_init() - initialize and configure HW cookie
283  *			  conversion register
284  *
285  * @soc: SOC handle
286  * @is_4k_align: page address 4k aligned
287  *
288  * Return: None
289  */
290 static void dp_cc_reg_cfg_init(struct dp_soc *soc,
291 			       bool is_4k_align)
292 {
293 	struct hal_hw_cc_config cc_cfg = { 0 };
294 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
295 
296 	if (soc->cdp_soc.ol_ops->get_con_mode &&
297 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
298 		return;
299 
300 	if (!soc->wlan_cfg_ctx->hw_cc_enabled) {
301 		dp_info("INI skip HW CC register setting");
302 		return;
303 	}
304 
305 	cc_cfg.lut_base_addr_31_0 = be_soc->cc_cmem_base;
306 	cc_cfg.cc_global_en = true;
307 	cc_cfg.page_4k_align = is_4k_align;
308 	cc_cfg.cookie_offset_msb = DP_CC_DESC_ID_SPT_VA_OS_MSB;
309 	cc_cfg.cookie_page_msb = DP_CC_DESC_ID_PPT_PAGE_OS_MSB;
310 	/* 36th bit should be 1 then HW know this is CMEM address */
311 	cc_cfg.lut_base_addr_39_32 = 0x10;
312 
313 	cc_cfg.error_path_cookie_conv_en = true;
314 	cc_cfg.release_path_cookie_conv_en = true;
315 	dp_cc_wbm_sw_en_cfg(&cc_cfg);
316 
317 	hal_cookie_conversion_reg_cfg_be(soc->hal_soc, &cc_cfg);
318 }
319 
320 /**
321  * dp_hw_cc_cmem_write() - DP wrapper function for CMEM buffer writing
322  * @hal_soc_hdl: HAL SOC handle
323  * @offset: CMEM address
324  * @value: value to write
325  *
326  * Return: None.
327  */
328 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
329 				       uint32_t offset,
330 				       uint32_t value)
331 {
332 	hal_cmem_write(hal_soc_hdl, offset, value);
333 }
334 
335 /**
336  * dp_hw_cc_cmem_addr_init() - Check and initialize CMEM base address for
337  *			       HW cookie conversion
338  *
339  * @soc: SOC handle
340  *
341  * Return: 0 in case of success, else error value
342  */
343 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(struct dp_soc *soc)
344 {
345 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
346 
347 	be_soc->cc_cmem_base = dp_get_cmem_chunk(soc, DP_CC_PPT_MEM_SIZE,
348 					      COOKIE_CONVERSION);
349 	return QDF_STATUS_SUCCESS;
350 }
351 
352 #else
353 
354 static inline void dp_cc_reg_cfg_init(struct dp_soc *soc,
355 				      bool is_4k_align) {}
356 
357 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
358 				       uint32_t offset,
359 				       uint32_t value)
360 { }
361 
362 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(struct dp_soc *soc)
363 {
364 	return QDF_STATUS_SUCCESS;
365 }
366 #endif
367 
368 #if defined(DP_FEATURE_HW_COOKIE_CONVERSION) || defined(WLAN_SUPPORT_RX_FISA)
369 static QDF_STATUS dp_get_cmem_allocation(struct dp_soc *soc,
370 					 uint8_t for_feature)
371 {
372 	QDF_STATUS status = QDF_STATUS_E_NOMEM;
373 
374 	switch (for_feature) {
375 	case COOKIE_CONVERSION:
376 		status = dp_hw_cc_cmem_addr_init(soc);
377 		break;
378 	default:
379 		dp_err("Invalid CMEM request");
380 	}
381 
382 	return status;
383 }
384 #else
385 static QDF_STATUS dp_get_cmem_allocation(struct dp_soc *soc,
386 					 uint8_t for_feature)
387 {
388 	return QDF_STATUS_SUCCESS;
389 }
390 #endif
391 
392 QDF_STATUS
393 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
394 			       struct dp_hw_cookie_conversion_t *cc_ctx,
395 			       uint32_t num_descs,
396 			       enum qdf_dp_desc_type desc_type,
397 			       uint8_t desc_pool_id)
398 {
399 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
400 	uint32_t num_spt_pages, i = 0;
401 	struct dp_spt_page_desc *spt_desc;
402 	struct qdf_mem_dma_page_t *dma_page;
403 	uint8_t chip_id;
404 
405 	/* estimate how many SPT DDR pages needed */
406 	num_spt_pages = qdf_do_div(
407 				num_descs + (DP_CC_SPT_PAGE_MAX_ENTRIES - 1),
408 				DP_CC_SPT_PAGE_MAX_ENTRIES);
409 	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
410 					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
411 	dp_info("num_spt_pages needed %d", num_spt_pages);
412 
413 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_CC_SPT_PAGE_TYPE,
414 				      &cc_ctx->page_pool, qdf_page_size,
415 				      num_spt_pages, 0, false);
416 	if (!cc_ctx->page_pool.dma_pages) {
417 		dp_err("spt ddr pages allocation failed");
418 		return QDF_STATUS_E_RESOURCES;
419 	}
420 	cc_ctx->page_desc_base = qdf_mem_malloc(
421 			num_spt_pages * sizeof(struct dp_spt_page_desc));
422 	if (!cc_ctx->page_desc_base) {
423 		dp_err("spt page descs allocation failed");
424 		goto fail_0;
425 	}
426 
427 	chip_id = dp_mlo_get_chip_id(soc);
428 	cc_ctx->cmem_offset = dp_desc_pool_get_cmem_base(chip_id, desc_pool_id,
429 							 desc_type);
430 
431 	/* initial page desc */
432 	spt_desc = cc_ctx->page_desc_base;
433 	dma_page = cc_ctx->page_pool.dma_pages;
434 	while (i < num_spt_pages) {
435 		/* check if page address 4K aligned */
436 		if (qdf_unlikely(dma_page[i].page_p_addr & 0xFFF)) {
437 			dp_err("non-4k aligned pages addr %pK",
438 			       (void *)dma_page[i].page_p_addr);
439 			goto fail_1;
440 		}
441 
442 		spt_desc[i].page_v_addr =
443 					dma_page[i].page_v_addr_start;
444 		spt_desc[i].page_p_addr =
445 					dma_page[i].page_p_addr;
446 		i++;
447 	}
448 
449 	cc_ctx->total_page_num = num_spt_pages;
450 	qdf_spinlock_create(&cc_ctx->cc_lock);
451 
452 	return QDF_STATUS_SUCCESS;
453 fail_1:
454 	qdf_mem_free(cc_ctx->page_desc_base);
455 	cc_ctx->page_desc_base = NULL;
456 fail_0:
457 	dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_CC_SPT_PAGE_TYPE,
458 				     &cc_ctx->page_pool, 0, false);
459 
460 	return QDF_STATUS_E_FAILURE;
461 }
462 
463 QDF_STATUS
464 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
465 			       struct dp_hw_cookie_conversion_t *cc_ctx)
466 {
467 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
468 
469 	dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_CC_SPT_PAGE_TYPE,
470 				     &cc_ctx->page_pool, 0, false);
471 	if (cc_ctx->page_desc_base)
472 		qdf_spinlock_destroy(&cc_ctx->cc_lock);
473 
474 	qdf_mem_free(cc_ctx->page_desc_base);
475 	cc_ctx->page_desc_base = NULL;
476 
477 	return QDF_STATUS_SUCCESS;
478 }
479 
480 QDF_STATUS
481 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
482 			     struct dp_hw_cookie_conversion_t *cc_ctx)
483 {
484 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
485 	uint32_t i = 0;
486 	struct dp_spt_page_desc *spt_desc;
487 	uint32_t ppt_index;
488 	uint32_t ppt_id_start;
489 
490 	if (!cc_ctx->total_page_num) {
491 		dp_err("total page num is 0");
492 		return QDF_STATUS_E_INVAL;
493 	}
494 
495 	ppt_id_start = DP_CMEM_OFFSET_TO_PPT_ID(cc_ctx->cmem_offset);
496 	spt_desc = cc_ctx->page_desc_base;
497 	while (i < cc_ctx->total_page_num) {
498 		/* write page PA to CMEM */
499 		dp_hw_cc_cmem_write(soc->hal_soc,
500 				    (cc_ctx->cmem_offset + be_soc->cc_cmem_base
501 				     + (i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)),
502 				    (spt_desc[i].page_p_addr >>
503 				     DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED));
504 
505 		ppt_index = ppt_id_start + i;
506 
507 		if (ppt_index >= DP_CC_PPT_MAX_ENTRIES)
508 			qdf_assert_always(0);
509 
510 		spt_desc[i].ppt_index = ppt_index;
511 
512 		be_soc->page_desc_base[ppt_index].page_v_addr =
513 				spt_desc[i].page_v_addr;
514 		i++;
515 	}
516 	return QDF_STATUS_SUCCESS;
517 }
518 
519 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
520 QDF_STATUS
521 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
522 			       struct dp_hw_cookie_conversion_t *cc_ctx)
523 {
524 	uint32_t ppt_index;
525 	struct dp_spt_page_desc *spt_desc;
526 	int i = 0;
527 
528 	spt_desc = cc_ctx->page_desc_base;
529 	while (i < cc_ctx->total_page_num) {
530 		ppt_index = spt_desc[i].ppt_index;
531 		be_soc->page_desc_base[ppt_index].page_v_addr = NULL;
532 		i++;
533 	}
534 	return QDF_STATUS_SUCCESS;
535 }
536 #else
537 QDF_STATUS
538 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
539 			       struct dp_hw_cookie_conversion_t *cc_ctx)
540 {
541 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
542 	uint32_t ppt_index;
543 	struct dp_spt_page_desc *spt_desc;
544 	int i = 0;
545 
546 	spt_desc = cc_ctx->page_desc_base;
547 	while (i < cc_ctx->total_page_num) {
548 		/* reset PA in CMEM to NULL */
549 		dp_hw_cc_cmem_write(soc->hal_soc,
550 				    (cc_ctx->cmem_offset + be_soc->cc_cmem_base
551 				     + (i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)),
552 				    0);
553 
554 		ppt_index = spt_desc[i].ppt_index;
555 		be_soc->page_desc_base[ppt_index].page_v_addr = NULL;
556 		i++;
557 	}
558 	return QDF_STATUS_SUCCESS;
559 }
560 #endif
561 
562 #ifdef WLAN_SUPPORT_PPEDS
563 static QDF_STATUS dp_soc_ppeds_attach_be(struct dp_soc *soc)
564 {
565 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
566 	int target_type = hal_get_target_type(soc->hal_soc);
567 	struct cdp_ops *cdp_ops = soc->cdp_soc.ops;
568 
569 	/*
570 	 * Check if PPE DS is enabled and wlan soc supports it.
571 	 */
572 	if (!wlan_cfg_get_dp_soc_ppeds_enable(soc->wlan_cfg_ctx) ||
573 	    !dp_ppeds_target_supported(target_type))
574 		return QDF_STATUS_SUCCESS;
575 
576 	if (dp_ppeds_attach_soc_be(be_soc) != QDF_STATUS_SUCCESS)
577 		return QDF_STATUS_SUCCESS;
578 
579 	cdp_ops->ppeds_ops = &dp_ops_ppeds_be;
580 
581 	return QDF_STATUS_SUCCESS;
582 }
583 
584 static QDF_STATUS dp_soc_ppeds_detach_be(struct dp_soc *soc)
585 {
586 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
587 	struct cdp_ops *cdp_ops = soc->cdp_soc.ops;
588 
589 	if (!be_soc->ppeds_handle)
590 		return QDF_STATUS_E_FAILURE;
591 
592 	dp_ppeds_detach_soc_be(be_soc);
593 
594 	cdp_ops->ppeds_ops = NULL;
595 
596 	return QDF_STATUS_SUCCESS;
597 }
598 
599 static QDF_STATUS dp_peer_ppeds_default_route_be(struct dp_soc *soc,
600 						 struct dp_peer_be *be_peer,
601 						 uint8_t vdev_id,
602 						 uint16_t src_info)
603 {
604 	uint16_t service_code;
605 	uint8_t priority_valid;
606 	uint8_t use_ppe_ds = PEER_ROUTING_USE_PPE;
607 	uint8_t peer_routing_enabled = PEER_ROUTING_ENABLED;
608 	QDF_STATUS status = QDF_STATUS_SUCCESS;
609 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
610 	struct dp_vdev_be *be_vdev;
611 
612 	be_vdev = dp_get_be_vdev_from_dp_vdev(be_peer->peer.vdev);
613 
614 	/*
615 	 * Program service code bypass to avoid L2 new mac address
616 	 * learning exception when fdb learning is disabled.
617 	 */
618 	service_code = PPE_DRV_SC_SPF_BYPASS;
619 	priority_valid = be_peer->priority_valid;
620 
621 	/*
622 	 * if FST is enabled then let flow rule take the decision of
623 	 * routing the pkt to DS or host
624 	 */
625 	if (wlan_cfg_is_rx_flow_tag_enabled(cfg))
626 		use_ppe_ds = 0;
627 
628 	if (soc->cdp_soc.ol_ops->peer_set_ppeds_default_routing) {
629 		status =
630 		soc->cdp_soc.ol_ops->peer_set_ppeds_default_routing
631 				(soc->ctrl_psoc,
632 				be_peer->peer.mac_addr.raw,
633 				service_code, priority_valid,
634 				src_info, vdev_id, use_ppe_ds,
635 				peer_routing_enabled);
636 		if (status != QDF_STATUS_SUCCESS) {
637 			dp_err("vdev_id: %d, PPE peer routing mac:"
638 			       QDF_MAC_ADDR_FMT, vdev_id,
639 			       QDF_MAC_ADDR_REF(be_peer->peer.mac_addr.raw));
640 
641 			return QDF_STATUS_E_FAILURE;
642 		}
643 	}
644 
645 	return QDF_STATUS_SUCCESS;
646 }
647 
648 #ifdef WLAN_FEATURE_11BE_MLO
649 QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc,
650 				  struct dp_peer *peer,
651 				  struct dp_vdev_be *be_vdev,
652 				  void *args)
653 {
654 	struct dp_peer *mld_peer;
655 	struct dp_soc *mld_soc;
656 	struct dp_soc_be *be_soc;
657 	struct cdp_soc_t *cdp_soc;
658 	struct dp_peer_be *be_peer = dp_get_be_peer_from_dp_peer(peer);
659 	struct cdp_ds_vp_params vp_params = {0};
660 	struct dp_ppe_vp_profile *ppe_vp_profile = (struct dp_ppe_vp_profile *)args;
661 	uint16_t src_info = ppe_vp_profile->vp_num;
662 	uint8_t vdev_id = be_vdev->vdev.vdev_id;
663 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
664 
665 	if (!be_peer) {
666 		dp_err("BE peer is null");
667 		return QDF_STATUS_E_NULL_VALUE;
668 	}
669 
670 	if (IS_DP_LEGACY_PEER(peer)) {
671 		qdf_status = dp_peer_ppeds_default_route_be(soc, be_peer,
672 							    vdev_id, src_info);
673 	} else if (IS_MLO_DP_MLD_PEER(peer)) {
674 		int i;
675 		struct dp_peer *link_peer = NULL;
676 		struct dp_mld_link_peers link_peers_info;
677 
678 		/* get link peers with reference */
679 		dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
680 						    DP_MOD_ID_DS);
681 
682 		for (i = 0; i < link_peers_info.num_links; i++) {
683 			link_peer = link_peers_info.link_peers[i];
684 			be_peer = dp_get_be_peer_from_dp_peer(link_peer);
685 			if (!be_peer) {
686 				dp_err("BE peer is null");
687 				continue;
688 			}
689 
690 			be_vdev = dp_get_be_vdev_from_dp_vdev(link_peer->vdev);
691 			if (!be_vdev) {
692 				dp_err("BE vap is null for peer id %d ",
693 				       link_peer->peer_id);
694 				continue;
695 			}
696 
697 			vdev_id = be_vdev->vdev.vdev_id;
698 			soc = link_peer->vdev->pdev->soc;
699 			qdf_status = dp_peer_ppeds_default_route_be(soc,
700 								    be_peer,
701 								    vdev_id,
702 								    src_info);
703 		}
704 
705 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_DS);
706 	} else {
707 		mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
708 
709 		if (!mld_peer)
710 			return qdf_status;
711 
712 		/*
713 		 * In case of MLO link peer,
714 		 * Fetch the VP profile from the mld vdev.
715 		 */
716 		be_vdev = dp_get_be_vdev_from_dp_vdev(mld_peer->vdev);
717 		if (!be_vdev) {
718 			dp_err("BE vap is null");
719 			return QDF_STATUS_E_NULL_VALUE;
720 		}
721 
722 		/*
723 		 * Extract the VP profile from the vap
724 		 * in case of MLO peer, we have to get the profile from
725 		 * the MLD vdev's osif handle and not the link peer.
726 		 */
727 		mld_soc = mld_peer->vdev->pdev->soc;
728 		cdp_soc = &mld_soc->cdp_soc;
729 		if (!cdp_soc->ol_ops->get_ppeds_profile_info_for_vap) {
730 			dp_err("%pK: Register PPEDS profile info API before use", cdp_soc);
731 			return QDF_STATUS_E_NULL_VALUE;
732 		}
733 
734 		qdf_status = cdp_soc->ol_ops->get_ppeds_profile_info_for_vap(mld_soc->ctrl_psoc,
735 									     mld_peer->vdev->vdev_id,
736 									     &vp_params);
737 		if (qdf_status == QDF_STATUS_E_NULL_VALUE) {
738 			dp_err("%pK: Failed to get ppeds profile for mld soc", mld_soc);
739 			return qdf_status;
740 		}
741 
742 		/*
743 		 * Check if PPE DS routing is enabled on
744 		 * the associated vap.
745 		 */
746 		if (vp_params.ppe_vp_type != PPE_VP_USER_TYPE_DS)
747 			return qdf_status;
748 
749 		be_soc = dp_get_be_soc_from_dp_soc(mld_soc);
750 		ppe_vp_profile = &be_soc->ppe_vp_profile[vp_params.ppe_vp_profile_idx];
751 		src_info = ppe_vp_profile->vp_num;
752 
753 		qdf_status = dp_peer_ppeds_default_route_be(soc, be_peer,
754 							    vdev_id, src_info);
755 	}
756 
757 	return qdf_status;
758 }
759 #else
760 static QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc,
761 					 struct dp_peer *peer,
762 					 struct dp_vdev_be *be_vdev
763 					 void *args)
764 {
765 	struct dp_ppe_vp_profile *vp_profile = (struct dp_ppe_vp_profile *)args;
766 	struct dp_peer_be *be_peer = dp_get_be_peer_from_dp_peer(peer);
767 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
768 
769 	if (!be_peer) {
770 		dp_err("BE peer is null");
771 		return QDF_STATUS_E_NULL_VALUE;
772 	}
773 
774 	qdf_status = dp_peer_ppeds_default_route_be(soc, be_peer,
775 						    be_vdev->vdev.vdev_id,
776 						    vp_profile->vp_num);
777 
778 	return qdf_status;
779 }
780 #endif
781 #else
782 static QDF_STATUS dp_ppeds_init_soc_be(struct dp_soc *soc)
783 {
784 	return QDF_STATUS_SUCCESS;
785 }
786 
787 static QDF_STATUS dp_ppeds_deinit_soc_be(struct dp_soc *soc)
788 {
789 	return QDF_STATUS_SUCCESS;
790 }
791 
792 static inline QDF_STATUS dp_soc_ppeds_attach_be(struct dp_soc *soc)
793 {
794 	return QDF_STATUS_SUCCESS;
795 }
796 
797 static inline QDF_STATUS dp_soc_ppeds_detach_be(struct dp_soc *soc)
798 {
799 	return QDF_STATUS_SUCCESS;
800 }
801 
802 QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc, struct dp_peer *peer,
803 				  struct dp_vdev_be *be_vdev,
804 				  void *args)
805 {
806 	return QDF_STATUS_SUCCESS;
807 }
808 
809 static inline void dp_ppeds_stop_soc_be(struct dp_soc *soc)
810 {
811 }
812 #endif /* WLAN_SUPPORT_PPEDS */
813 
814 void dp_reo_shared_qaddr_detach(struct dp_soc *soc)
815 {
816 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
817 				REO_QUEUE_REF_ML_TABLE_SIZE,
818 				soc->reo_qref.mlo_reo_qref_table_vaddr,
819 				soc->reo_qref.mlo_reo_qref_table_paddr, 0);
820 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
821 				REO_QUEUE_REF_NON_ML_TABLE_SIZE,
822 				soc->reo_qref.non_mlo_reo_qref_table_vaddr,
823 				soc->reo_qref.non_mlo_reo_qref_table_paddr, 0);
824 }
825 
826 static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc)
827 {
828 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
829 	dp_mlo_dev_obj_t mlo_dev_obj = dp_get_mlo_dev_list_obj(be_soc);
830 	int i = 0;
831 
832 	dp_soc_ppeds_detach_be(soc);
833 	dp_reo_shared_qaddr_detach(soc);
834 	dp_mlo_dev_ctxt_list_detach_wrapper(mlo_dev_obj);
835 
836 	for (i = 0; i < MAX_TXDESC_POOLS; i++)
837 		dp_hw_cookie_conversion_detach(be_soc,
838 					       &be_soc->tx_cc_ctx[i]);
839 
840 	for (i = 0; i < MAX_RXDESC_POOLS; i++)
841 		dp_hw_cookie_conversion_detach(be_soc,
842 					       &be_soc->rx_cc_ctx[i]);
843 
844 	qdf_mem_free(be_soc->page_desc_base);
845 	be_soc->page_desc_base = NULL;
846 
847 	return QDF_STATUS_SUCCESS;
848 }
849 
850 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
851 static void dp_set_rx_fst_be(struct dp_rx_fst *fst)
852 {
853 	struct dp_global_context *dp_global = wlan_objmgr_get_global_ctx();
854 
855 	if (dp_global)
856 		dp_global->fst_ctx = fst;
857 }
858 
859 static struct dp_rx_fst *dp_get_rx_fst_be(void)
860 {
861 	struct dp_global_context *dp_global = wlan_objmgr_get_global_ctx();
862 
863 	if (dp_global)
864 		return dp_global->fst_ctx;
865 
866 	return NULL;
867 }
868 
869 static uint32_t dp_rx_fst_release_ref_be(void)
870 {
871 	struct dp_global_context *dp_global = wlan_objmgr_get_global_ctx();
872 	uint32_t rx_fst_ref_cnt;
873 
874 	if (dp_global) {
875 		rx_fst_ref_cnt = qdf_atomic_read(&dp_global->rx_fst_ref_cnt);
876 		qdf_atomic_dec(&dp_global->rx_fst_ref_cnt);
877 		return rx_fst_ref_cnt;
878 	}
879 
880 	return 1;
881 }
882 
883 static void dp_rx_fst_get_ref_be(void)
884 {
885 	struct dp_global_context *dp_global = wlan_objmgr_get_global_ctx();
886 
887 	if (dp_global)
888 		qdf_atomic_inc(&dp_global->rx_fst_ref_cnt);
889 }
890 #else
891 static void dp_set_rx_fst_be(struct dp_rx_fst *fst)
892 {
893 }
894 
895 static struct dp_rx_fst *dp_get_rx_fst_be(void)
896 {
897 	return NULL;
898 }
899 
900 static uint32_t dp_rx_fst_release_ref_be(void)
901 {
902 	return 1;
903 }
904 
905 static void dp_rx_fst_get_ref_be(void)
906 {
907 }
908 #endif
909 
910 #ifdef WLAN_MLO_MULTI_CHIP
911 #ifdef WLAN_MCAST_MLO
912 static inline void
913 dp_mlo_mcast_init(struct dp_soc *soc, struct dp_vdev *vdev)
914 {
915 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
916 
917 	be_vdev->mcast_primary = false;
918 
919 	hal_tx_mcast_mlo_reinject_routing_set(
920 				soc->hal_soc,
921 				HAL_TX_MCAST_MLO_REINJECT_TQM_NOTIFY);
922 
923 	if (vdev->opmode == wlan_op_mode_ap) {
924 		hal_tx_vdev_mcast_ctrl_set(vdev->pdev->soc->hal_soc,
925 					   vdev->vdev_id,
926 					   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
927 	}
928 }
929 
930 static inline void
931 dp_mlo_mcast_deinit(struct dp_soc *soc, struct dp_vdev *vdev)
932 {
933 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
934 
935 	be_vdev->mcast_primary = false;
936 	vdev->mlo_vdev = 0;
937 }
938 
939 #else
940 static inline void
941 dp_mlo_mcast_init(struct dp_soc *soc, struct dp_vdev *vdev)
942 {
943 }
944 
945 static inline void
946 dp_mlo_mcast_deinit(struct dp_soc *soc, struct dp_vdev *vdev)
947 {
948 }
949 #endif
950 static void dp_mlo_init_ptnr_list(struct dp_vdev *vdev)
951 {
952 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
953 
954 	qdf_mem_set(be_vdev->partner_vdev_list,
955 		    WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC,
956 		    CDP_INVALID_VDEV_ID);
957 	qdf_mem_set(be_vdev->bridge_vdev_list,
958 		    WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC,
959 		    CDP_INVALID_VDEV_ID);
960 }
961 
962 static void dp_get_rx_hash_key_be(struct dp_soc *soc,
963 				  struct cdp_lro_hash_config *lro_hash)
964 {
965 	dp_mlo_get_rx_hash_key(soc, lro_hash);
966 }
967 
968 #ifdef WLAN_DP_MLO_DEV_CTX
969 static inline void
970 dp_attach_vdev_list_in_mlo_dev_ctxt(struct dp_soc_be *be_soc,
971 				    struct dp_vdev *vdev,
972 				    struct dp_mlo_dev_ctxt *mlo_dev_ctxt)
973 {
974 	uint8_t pdev_id = vdev->pdev->pdev_id;
975 
976 	qdf_spin_lock_bh(&mlo_dev_ctxt->vdev_list_lock);
977 	if (vdev->is_bridge_vdev) {
978 		if (mlo_dev_ctxt->bridge_vdev[be_soc->mlo_chip_id][pdev_id]
979 		    != CDP_INVALID_VDEV_ID)
980 			dp_alert("bridge vdevId in MLO dev ctx is not Invalid"
981 				 "chip_id: %u, pdev_id: %u,"
982 				 "existing vdev_id: %u, new vdev_id : %u",
983 				 be_soc->mlo_chip_id, pdev_id,
984 				 mlo_dev_ctxt->bridge_vdev[be_soc->mlo_chip_id][pdev_id],
985 				 vdev->vdev_id);
986 
987 		mlo_dev_ctxt->bridge_vdev[be_soc->mlo_chip_id][pdev_id] =
988 								vdev->vdev_id;
989 		mlo_dev_ctxt->is_bridge_vdev_present = 1;
990 	} else {
991 		if (mlo_dev_ctxt->vdev_list[be_soc->mlo_chip_id][pdev_id]
992 		    != CDP_INVALID_VDEV_ID)
993 			dp_alert("vdevId in MLO dev ctx is not Invalid"
994 				 "chip_id: %u, pdev_id: %u,"
995 				 "existing vdev_id: %u, new vdev_id : %u",
996 				 be_soc->mlo_chip_id, pdev_id,
997 				 mlo_dev_ctxt->vdev_list[be_soc->mlo_chip_id][pdev_id],
998 				 vdev->vdev_id);
999 
1000 		mlo_dev_ctxt->vdev_list[be_soc->mlo_chip_id][pdev_id] =
1001 								vdev->vdev_id;
1002 	}
1003 	mlo_dev_ctxt->vdev_count++;
1004 	qdf_spin_unlock_bh(&mlo_dev_ctxt->vdev_list_lock);
1005 }
1006 
1007 static inline void
1008 dp_detach_vdev_list_in_mlo_dev_ctxt(struct dp_soc_be *be_soc,
1009 				    struct dp_vdev *vdev,
1010 				    struct dp_mlo_dev_ctxt *mlo_dev_ctxt)
1011 {
1012 	uint8_t pdev_id = vdev->pdev->pdev_id;
1013 
1014 	qdf_spin_lock_bh(&mlo_dev_ctxt->vdev_list_lock);
1015 	if (vdev->is_bridge_vdev) {
1016 		mlo_dev_ctxt->bridge_vdev[be_soc->mlo_chip_id][pdev_id] =
1017 							CDP_INVALID_VDEV_ID;
1018 	} else {
1019 		mlo_dev_ctxt->vdev_list[be_soc->mlo_chip_id][pdev_id] =
1020 							CDP_INVALID_VDEV_ID;
1021 	}
1022 	mlo_dev_ctxt->vdev_count--;
1023 	qdf_spin_unlock_bh(&mlo_dev_ctxt->vdev_list_lock);
1024 }
1025 #endif /* WLAN_DP_MLO_DEV_CTX */
1026 #else
1027 static inline void
1028 dp_mlo_mcast_init(struct dp_soc *soc, struct dp_vdev *vdev)
1029 {
1030 }
1031 
1032 static inline void
1033 dp_mlo_mcast_deinit(struct dp_soc *soc, struct dp_vdev *vdev)
1034 {
1035 }
1036 
1037 static void dp_mlo_init_ptnr_list(struct dp_vdev *vdev)
1038 {
1039 }
1040 
1041 static void dp_get_rx_hash_key_be(struct dp_soc *soc,
1042 				  struct cdp_lro_hash_config *lro_hash)
1043 {
1044 	dp_get_rx_hash_key_bytes(lro_hash);
1045 }
1046 
1047 #ifdef WLAN_DP_MLO_DEV_CTX
1048 static inline void
1049 dp_attach_vdev_list_in_mlo_dev_ctxt(struct dp_soc_be *be_soc,
1050 				    struct dp_vdev *vdev,
1051 				    struct dp_mlo_dev_ctxt *mlo_dev_ctxt)
1052 {
1053 }
1054 
1055 static inline void
1056 dp_detach_vdev_list_in_mlo_dev_ctxt(struct dp_soc_be *be_soc,
1057 				    struct dp_vdev *vdev,
1058 				    struct dp_mlo_dev_ctxt *mlo_dev_ctxt)
1059 {
1060 }
1061 #endif /* WLAN_DP_MLO_DEV_CTX */
1062 #endif
1063 
1064 static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc,
1065 				   struct cdp_soc_attach_params *params)
1066 {
1067 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1068 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1069 	uint32_t max_tx_rx_desc_num, num_spt_pages;
1070 	uint32_t num_entries;
1071 	int i = 0;
1072 	dp_mlo_dev_obj_t mlo_dev_obj = dp_get_mlo_dev_list_obj(be_soc);
1073 
1074 	max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS +
1075 		WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS +
1076 		WLAN_CFG_NUM_PPEDS_TX_DESC_MAX * MAX_PPE_TXDESC_POOLS;
1077 	/* estimate how many SPT DDR pages needed */
1078 	num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES;
1079 	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
1080 					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
1081 
1082 	be_soc->page_desc_base = qdf_mem_malloc(
1083 		DP_CC_PPT_MAX_ENTRIES * sizeof(struct dp_spt_page_desc));
1084 	if (!be_soc->page_desc_base) {
1085 		dp_err("spt page descs allocation failed");
1086 		return QDF_STATUS_E_NOMEM;
1087 	}
1088 
1089 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
1090 
1091 	qdf_status = dp_get_cmem_allocation(soc, COOKIE_CONVERSION);
1092 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1093 		goto fail;
1094 
1095 	dp_soc_mlo_fill_params(soc, params);
1096 
1097 	/* Initialize common cdp mlo ops */
1098 	dp_soc_initialize_cdp_cmn_mlo_ops(soc);
1099 
1100 	/* Initialize MLO device ctxt list */
1101 	dp_mlo_dev_ctxt_list_attach_wrapper(mlo_dev_obj);
1102 
1103 	qdf_status = dp_soc_ppeds_attach_be(soc);
1104 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1105 		goto fail;
1106 
1107 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
1108 		num_entries = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
1109 		qdf_status =
1110 			dp_hw_cookie_conversion_attach(be_soc,
1111 						       &be_soc->tx_cc_ctx[i],
1112 						       num_entries,
1113 						       QDF_DP_TX_DESC_TYPE, i);
1114 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1115 			goto fail;
1116 	}
1117 
1118 	for (i = 0; i < MAX_RXDESC_POOLS; i++) {
1119 		num_entries =
1120 			wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
1121 		qdf_status =
1122 			dp_hw_cookie_conversion_attach(be_soc,
1123 						       &be_soc->rx_cc_ctx[i],
1124 						       num_entries,
1125 						       QDF_DP_RX_DESC_BUF_TYPE,
1126 						       i);
1127 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1128 			goto fail;
1129 	}
1130 
1131 	return qdf_status;
1132 fail:
1133 	dp_soc_detach_be(soc);
1134 	return qdf_status;
1135 }
1136 
1137 static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc)
1138 {
1139 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1140 	int i = 0;
1141 
1142 	qdf_atomic_set(&soc->cmn_init_done, 0);
1143 
1144 	dp_ppeds_stop_soc_be(soc);
1145 
1146 	dp_tx_deinit_bank_profiles(be_soc);
1147 	for (i = 0; i < MAX_TXDESC_POOLS; i++)
1148 		dp_hw_cookie_conversion_deinit(be_soc,
1149 					       &be_soc->tx_cc_ctx[i]);
1150 
1151 	for (i = 0; i < MAX_RXDESC_POOLS; i++)
1152 		dp_hw_cookie_conversion_deinit(be_soc,
1153 					       &be_soc->rx_cc_ctx[i]);
1154 
1155 	dp_ppeds_deinit_soc_be(soc);
1156 
1157 	return QDF_STATUS_SUCCESS;
1158 }
1159 
1160 static QDF_STATUS dp_soc_deinit_be_wrapper(struct dp_soc *soc)
1161 {
1162 	QDF_STATUS qdf_status;
1163 
1164 	qdf_status = dp_soc_deinit_be(soc);
1165 	if (QDF_IS_STATUS_ERROR(qdf_status))
1166 		return qdf_status;
1167 
1168 	dp_soc_deinit(soc);
1169 
1170 	return QDF_STATUS_SUCCESS;
1171 }
1172 
1173 static void *dp_soc_init_be(struct dp_soc *soc, HTC_HANDLE htc_handle,
1174 			    struct hif_opaque_softc *hif_handle)
1175 {
1176 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1177 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1178 	int i = 0;
1179 	void *ret_addr;
1180 
1181 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
1182 			  WLAN_MD_DP_SOC, "dp_soc");
1183 
1184 	soc->hif_handle = hif_handle;
1185 
1186 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
1187 	if (!soc->hal_soc)
1188 		return NULL;
1189 
1190 	dp_ppeds_init_soc_be(soc);
1191 
1192 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
1193 		qdf_status =
1194 			dp_hw_cookie_conversion_init(be_soc,
1195 						     &be_soc->tx_cc_ctx[i]);
1196 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1197 			goto fail;
1198 	}
1199 
1200 	for (i = 0; i < MAX_RXDESC_POOLS; i++) {
1201 		qdf_status =
1202 			dp_hw_cookie_conversion_init(be_soc,
1203 						     &be_soc->rx_cc_ctx[i]);
1204 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1205 			goto fail;
1206 	}
1207 
1208 	/* route vdev_id mismatch notification via FW completion */
1209 	hal_tx_vdev_mismatch_routing_set(soc->hal_soc,
1210 					 HAL_TX_VDEV_MISMATCH_FW_NOTIFY);
1211 
1212 	qdf_status = dp_tx_init_bank_profiles(be_soc);
1213 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1214 		goto fail;
1215 
1216 	/* write WBM/REO cookie conversion CFG register */
1217 	dp_cc_reg_cfg_init(soc, true);
1218 
1219 	ret_addr = dp_soc_init(soc, htc_handle, hif_handle);
1220 	if (!ret_addr)
1221 		goto fail;
1222 
1223 	return ret_addr;
1224 fail:
1225 	dp_soc_deinit_be(soc);
1226 	return NULL;
1227 }
1228 
1229 static QDF_STATUS dp_pdev_attach_be(struct dp_pdev *pdev,
1230 				    struct cdp_pdev_attach_params *params)
1231 {
1232 	dp_pdev_mlo_fill_params(pdev, params);
1233 
1234 	return QDF_STATUS_SUCCESS;
1235 }
1236 
1237 static QDF_STATUS dp_pdev_detach_be(struct dp_pdev *pdev)
1238 {
1239 	dp_mlo_update_link_to_pdev_unmap(pdev->soc, pdev);
1240 
1241 	return QDF_STATUS_SUCCESS;
1242 }
1243 
1244 #ifdef INTRA_BSS_FWD_OFFLOAD
1245 static
1246 void dp_vdev_set_intra_bss(struct dp_soc *soc, uint16_t vdev_id, bool enable)
1247 {
1248 	soc->cdp_soc.ol_ops->vdev_set_intra_bss(soc->ctrl_psoc, vdev_id,
1249 						enable);
1250 }
1251 #else
1252 static
1253 void dp_vdev_set_intra_bss(struct dp_soc *soc, uint16_t vdev_id, bool enable)
1254 {
1255 }
1256 #endif
1257 
1258 static QDF_STATUS dp_vdev_attach_be(struct dp_soc *soc, struct dp_vdev *vdev)
1259 {
1260 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1261 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1262 	struct dp_pdev *pdev = vdev->pdev;
1263 
1264 	if (vdev->opmode == wlan_op_mode_monitor)
1265 		return QDF_STATUS_SUCCESS;
1266 
1267 	be_vdev->vdev_id_check_en = DP_TX_VDEV_ID_CHECK_ENABLE;
1268 
1269 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1270 	vdev->bank_id = be_vdev->bank_id;
1271 
1272 	if (be_vdev->bank_id == DP_BE_INVALID_BANK_ID) {
1273 		QDF_BUG(0);
1274 		return QDF_STATUS_E_FAULT;
1275 	}
1276 
1277 	if (vdev->opmode == wlan_op_mode_sta) {
1278 		if (soc->cdp_soc.ol_ops->set_mec_timer)
1279 			soc->cdp_soc.ol_ops->set_mec_timer(
1280 					soc->ctrl_psoc,
1281 					vdev->vdev_id,
1282 					DP_AST_AGING_TIMER_DEFAULT_MS);
1283 
1284 		if (pdev->isolation)
1285 			hal_tx_vdev_mcast_ctrl_set(soc->hal_soc, vdev->vdev_id,
1286 						   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
1287 		else
1288 			hal_tx_vdev_mcast_ctrl_set(soc->hal_soc, vdev->vdev_id,
1289 						   HAL_TX_MCAST_CTRL_MEC_NOTIFY);
1290 	} else if (vdev->ap_bridge_enabled) {
1291 		dp_vdev_set_intra_bss(soc, vdev->vdev_id, true);
1292 	}
1293 
1294 	dp_mlo_mcast_init(soc, vdev);
1295 	dp_mlo_init_ptnr_list(vdev);
1296 
1297 	return QDF_STATUS_SUCCESS;
1298 }
1299 
1300 static QDF_STATUS dp_vdev_detach_be(struct dp_soc *soc, struct dp_vdev *vdev)
1301 {
1302 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1303 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1304 
1305 	if (vdev->opmode == wlan_op_mode_monitor)
1306 		return QDF_STATUS_SUCCESS;
1307 
1308 	if (vdev->opmode == wlan_op_mode_ap)
1309 		dp_mlo_mcast_deinit(soc, vdev);
1310 
1311 	dp_tx_put_bank_profile(be_soc, be_vdev);
1312 	dp_clr_mlo_ptnr_list(soc, vdev);
1313 
1314 	return QDF_STATUS_SUCCESS;
1315 }
1316 
1317 #ifdef WLAN_SUPPORT_PPEDS
1318 static void dp_soc_txrx_peer_setup_be(struct dp_soc *soc, uint8_t vdev_id,
1319 				      uint8_t *peer_mac)
1320 {
1321 	struct dp_vdev_be *be_vdev;
1322 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1323 	struct dp_soc_be *be_soc;
1324 	struct cdp_ds_vp_params vp_params = {0};
1325 	struct cdp_soc_t *cdp_soc;
1326 	enum wlan_op_mode vdev_opmode;
1327 	struct dp_peer *peer;
1328 	struct dp_peer *tgt_peer = NULL;
1329 	struct dp_soc *tgt_soc = NULL;
1330 
1331 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, DP_MOD_ID_CDP);
1332 	if (!peer)
1333 		return;
1334 	vdev_opmode = peer->vdev->opmode;
1335 
1336 	if (vdev_opmode != wlan_op_mode_ap &&
1337 	    vdev_opmode != wlan_op_mode_sta) {
1338 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1339 		return;
1340 	}
1341 
1342 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
1343 	tgt_soc = tgt_peer->vdev->pdev->soc;
1344 	be_soc = dp_get_be_soc_from_dp_soc(tgt_soc);
1345 	cdp_soc = &tgt_soc->cdp_soc;
1346 
1347 	be_vdev = dp_get_be_vdev_from_dp_vdev(tgt_peer->vdev);
1348 	if (!be_vdev) {
1349 		qdf_err("BE vap is null");
1350 		qdf_status = QDF_STATUS_E_NULL_VALUE;
1351 		goto fail;
1352 	}
1353 
1354 	/*
1355 	 * Extract the VP profile from the VAP
1356 	 */
1357 	if (!cdp_soc->ol_ops->get_ppeds_profile_info_for_vap) {
1358 		dp_err("%pK: Register get ppeds profile info first", cdp_soc);
1359 		qdf_status = QDF_STATUS_E_NULL_VALUE;
1360 		goto fail;
1361 	}
1362 
1363 	/*
1364 	 * Check if PPE DS routing is enabled on the associated vap.
1365 	 */
1366 	qdf_status =
1367 	cdp_soc->ol_ops->get_ppeds_profile_info_for_vap(tgt_soc->ctrl_psoc,
1368 							tgt_peer->vdev->vdev_id,
1369 							&vp_params);
1370 	if (qdf_status == QDF_STATUS_E_NULL_VALUE) {
1371 		dp_err("%pK: Could not find ppeds profile info vdev", be_vdev);
1372 		qdf_status = QDF_STATUS_E_NULL_VALUE;
1373 		goto fail;
1374 	}
1375 
1376 	if (vp_params.ppe_vp_type == PPE_VP_USER_TYPE_DS) {
1377 		qdf_status = dp_peer_setup_ppeds_be(tgt_soc, tgt_peer, be_vdev,
1378 						    (void *)&be_soc->ppe_vp_profile[vp_params.ppe_vp_profile_idx]);
1379 	}
1380 
1381 fail:
1382 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1383 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1384 		dp_err("Unable to do ppeds peer setup");
1385 		qdf_assert_always(0);
1386 	}
1387 }
1388 
1389 #else
1390 static inline
1391 void dp_soc_txrx_peer_setup_be(struct dp_soc *soc, uint8_t vdev_id,
1392 			       uint8_t *peer_mac)
1393 {
1394 }
1395 #endif
1396 
1397 static QDF_STATUS dp_peer_setup_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1398 				   uint8_t *peer_mac,
1399 				   struct cdp_peer_setup_info *setup_info)
1400 {
1401 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1402 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1403 
1404 	qdf_status = dp_peer_setup_wifi3(soc_hdl, vdev_id, peer_mac,
1405 					 setup_info);
1406 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1407 		dp_err("Unable to dp peer setup");
1408 		return qdf_status;
1409 	}
1410 
1411 	dp_soc_txrx_peer_setup_be(soc, vdev_id, peer_mac);
1412 
1413 	return QDF_STATUS_SUCCESS;
1414 }
1415 
1416 qdf_size_t dp_get_soc_context_size_be(void)
1417 {
1418 	return sizeof(struct dp_soc_be);
1419 }
1420 
1421 #ifdef CONFIG_WORD_BASED_TLV
1422 /**
1423  * dp_rxdma_ring_wmask_cfg_be() - Setup RXDMA ring word mask config
1424  * @soc: Common DP soc handle
1425  * @htt_tlv_filter: Rx SRNG TLV and filter setting
1426  *
1427  * Return: none
1428  */
1429 static inline void
1430 dp_rxdma_ring_wmask_cfg_be(struct dp_soc *soc,
1431 			   struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1432 {
1433 	htt_tlv_filter->rx_msdu_end_wmask =
1434 				 hal_rx_msdu_end_wmask_get(soc->hal_soc);
1435 	htt_tlv_filter->rx_mpdu_start_wmask =
1436 				 hal_rx_mpdu_start_wmask_get(soc->hal_soc);
1437 }
1438 #else
1439 static inline void
1440 dp_rxdma_ring_wmask_cfg_be(struct dp_soc *soc,
1441 			   struct htt_rx_ring_tlv_filter *htt_tlv_filter)
1442 {
1443 }
1444 #endif
1445 #ifdef WLAN_SUPPORT_PPEDS
1446 static
1447 void dp_free_ppeds_interrupts(struct dp_soc *soc, struct dp_srng *srng,
1448 			      int ring_type, int ring_num)
1449 {
1450 	if (srng->irq >= 0) {
1451 		qdf_dev_clear_irq_status_flags(srng->irq, IRQ_DISABLE_UNLAZY);
1452 		if (ring_type == WBM2SW_RELEASE &&
1453 		    ring_num == WBM2_SW_PPE_REL_RING_ID)
1454 			pld_pfrm_free_irq(soc->osdev->dev, srng->irq, soc);
1455 		else if (ring_type == REO2PPE || ring_type == PPE2TCL)
1456 			pld_pfrm_free_irq(soc->osdev->dev, srng->irq,
1457 					  dp_get_ppe_ds_ctxt(soc));
1458 	}
1459 }
1460 
1461 static
1462 int dp_register_ppeds_interrupts(struct dp_soc *soc, struct dp_srng *srng,
1463 				 int vector, int ring_type, int ring_num)
1464 {
1465 	int irq = -1, ret = 0;
1466 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1467 	int pci_slot = pld_get_pci_slot(soc->osdev->dev);
1468 
1469 	srng->irq = -1;
1470 	irq = pld_get_msi_irq(soc->osdev->dev, vector);
1471 	qdf_dev_set_irq_status_flags(irq, IRQ_DISABLE_UNLAZY);
1472 
1473 	if (ring_type == WBM2SW_RELEASE &&
1474 	    ring_num == WBM2_SW_PPE_REL_RING_ID) {
1475 		snprintf(be_soc->irq_name[2], DP_PPE_INTR_STRNG_LEN,
1476 			 "pci%d_ppe_wbm_rel", pci_slot);
1477 
1478 		ret = pld_pfrm_request_irq(soc->osdev->dev, irq,
1479 					   dp_ppeds_handle_tx_comp,
1480 					   IRQF_SHARED | IRQF_NO_SUSPEND,
1481 					   be_soc->irq_name[2], (void *)soc);
1482 
1483 		if (ret)
1484 			goto fail;
1485 	} else if (ring_type == REO2PPE && be_soc->ppeds_int_mode_enabled) {
1486 		snprintf(be_soc->irq_name[0], DP_PPE_INTR_STRNG_LEN,
1487 			 "pci%d_reo2ppe", pci_slot);
1488 		ret = pld_pfrm_request_irq(soc->osdev->dev, irq,
1489 					   dp_ppe_ds_reo2ppe_irq_handler,
1490 					   IRQF_SHARED | IRQF_NO_SUSPEND,
1491 					   be_soc->irq_name[0],
1492 					   dp_get_ppe_ds_ctxt(soc));
1493 
1494 		if (ret)
1495 			goto fail;
1496 	} else if (ring_type == PPE2TCL && be_soc->ppeds_int_mode_enabled) {
1497 		snprintf(be_soc->irq_name[1], DP_PPE_INTR_STRNG_LEN,
1498 			 "pci%d_ppe2tcl", pci_slot);
1499 		ret = pld_pfrm_request_irq(soc->osdev->dev, irq,
1500 					   dp_ppe_ds_ppe2tcl_irq_handler,
1501 					   IRQF_NO_SUSPEND,
1502 					   be_soc->irq_name[1],
1503 					   dp_get_ppe_ds_ctxt(soc));
1504 		if (ret)
1505 			goto fail;
1506 
1507 		pld_pfrm_disable_irq_nosync(soc->osdev->dev, irq);
1508 	} else {
1509 		return 0;
1510 	}
1511 
1512 	srng->irq = irq;
1513 
1514 	dp_info("Registered irq %d for soc %pK ring type %d",
1515 		irq, soc, ring_type);
1516 
1517 	return 0;
1518 fail:
1519 	dp_err("Unable to config irq : ring type %d irq %d vector %d",
1520 	       ring_type, irq, vector);
1521 	qdf_dev_clear_irq_status_flags(irq, IRQ_DISABLE_UNLAZY);
1522 
1523 	return ret;
1524 }
1525 
1526 void dp_ppeds_disable_irq(struct dp_soc *soc, struct dp_srng *srng)
1527 {
1528 	if (srng->irq >= 0)
1529 		pld_pfrm_disable_irq_nosync(soc->osdev->dev, srng->irq);
1530 }
1531 
1532 void dp_ppeds_enable_irq(struct dp_soc *soc, struct dp_srng *srng)
1533 {
1534 	if (srng->irq >= 0)
1535 		pld_pfrm_enable_irq(soc->osdev->dev, srng->irq);
1536 }
1537 #endif
1538 
1539 #ifdef NO_RX_PKT_HDR_TLV
1540 /**
1541  * dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config
1542  * @soc: Common DP soc handle
1543  *
1544  * Return: QDF_STATUS
1545  */
1546 static QDF_STATUS
1547 dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc)
1548 {
1549 	int i;
1550 	int mac_id;
1551 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
1552 	struct dp_srng *rx_mac_srng;
1553 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1554 
1555 	/*
1556 	 * In Beryllium chipset msdu_start, mpdu_end
1557 	 * and rx_attn are part of msdu_end/mpdu_start
1558 	 */
1559 	htt_tlv_filter.msdu_start = 0;
1560 	htt_tlv_filter.mpdu_end = 0;
1561 	htt_tlv_filter.attention = 0;
1562 	htt_tlv_filter.mpdu_start = 1;
1563 	htt_tlv_filter.msdu_end = 1;
1564 	htt_tlv_filter.packet = 1;
1565 	htt_tlv_filter.packet_header = 0;
1566 
1567 	htt_tlv_filter.ppdu_start = 0;
1568 	htt_tlv_filter.ppdu_end = 0;
1569 	htt_tlv_filter.ppdu_end_user_stats = 0;
1570 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
1571 	htt_tlv_filter.ppdu_end_status_done = 0;
1572 	htt_tlv_filter.enable_fp = 1;
1573 	htt_tlv_filter.enable_md = 0;
1574 	htt_tlv_filter.enable_md = 0;
1575 	htt_tlv_filter.enable_mo = 0;
1576 
1577 	htt_tlv_filter.fp_mgmt_filter = 0;
1578 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
1579 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
1580 					 FILTER_DATA_DATA);
1581 	htt_tlv_filter.fp_data_filter |=
1582 		hal_rx_en_mcast_fp_data_filter(soc->hal_soc) ?
1583 					FILTER_DATA_MCAST : 0;
1584 	htt_tlv_filter.mo_mgmt_filter = 0;
1585 	htt_tlv_filter.mo_ctrl_filter = 0;
1586 	htt_tlv_filter.mo_data_filter = 0;
1587 	htt_tlv_filter.md_data_filter = 0;
1588 
1589 	htt_tlv_filter.offset_valid = true;
1590 
1591 	/* Not subscribing to mpdu_end, msdu_start and rx_attn */
1592 	htt_tlv_filter.rx_mpdu_end_offset = 0;
1593 	htt_tlv_filter.rx_msdu_start_offset = 0;
1594 	htt_tlv_filter.rx_attn_offset = 0;
1595 
1596 	/*
1597 	 * For monitor mode, the packet hdr tlv is enabled later during
1598 	 * filter update
1599 	 */
1600 	if (soc->cdp_soc.ol_ops->get_con_mode &&
1601 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)
1602 		htt_tlv_filter.rx_packet_offset = soc->rx_mon_pkt_tlv_size;
1603 	else
1604 		htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
1605 
1606 	/*Not subscribing rx_pkt_header*/
1607 	htt_tlv_filter.rx_header_offset = 0;
1608 	htt_tlv_filter.rx_mpdu_start_offset =
1609 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
1610 	htt_tlv_filter.rx_msdu_end_offset =
1611 				hal_rx_msdu_end_offset_get(soc->hal_soc);
1612 
1613 	dp_rxdma_ring_wmask_cfg_be(soc, &htt_tlv_filter);
1614 
1615 	for (i = 0; i < MAX_PDEV_CNT; i++) {
1616 		struct dp_pdev *pdev = soc->pdev_list[i];
1617 
1618 		if (!pdev)
1619 			continue;
1620 
1621 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1622 			int mac_for_pdev =
1623 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
1624 			/*
1625 			 * Obtain lmac id from pdev to access the LMAC ring
1626 			 * in soc context
1627 			 */
1628 			int lmac_id =
1629 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
1630 							   pdev->pdev_id);
1631 
1632 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
1633 
1634 			if (!rx_mac_srng->hal_srng)
1635 				continue;
1636 
1637 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
1638 					    rx_mac_srng->hal_srng,
1639 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
1640 					    &htt_tlv_filter);
1641 		}
1642 	}
1643 	return status;
1644 }
1645 #else
1646 /**
1647  * dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config
1648  * @soc: Common DP soc handle
1649  *
1650  * Return: QDF_STATUS
1651  */
1652 static QDF_STATUS
1653 dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc)
1654 {
1655 	int i;
1656 	int mac_id;
1657 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
1658 	struct dp_srng *rx_mac_srng;
1659 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1660 
1661 	/*
1662 	 * In Beryllium chipset msdu_start, mpdu_end
1663 	 * and rx_attn are part of msdu_end/mpdu_start
1664 	 */
1665 	htt_tlv_filter.msdu_start = 0;
1666 	htt_tlv_filter.mpdu_end = 0;
1667 	htt_tlv_filter.attention = 0;
1668 	htt_tlv_filter.mpdu_start = 1;
1669 	htt_tlv_filter.msdu_end = 1;
1670 	htt_tlv_filter.packet = 1;
1671 	htt_tlv_filter.packet_header = 1;
1672 
1673 	htt_tlv_filter.ppdu_start = 0;
1674 	htt_tlv_filter.ppdu_end = 0;
1675 	htt_tlv_filter.ppdu_end_user_stats = 0;
1676 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
1677 	htt_tlv_filter.ppdu_end_status_done = 0;
1678 	htt_tlv_filter.enable_fp = 1;
1679 	htt_tlv_filter.enable_md = 0;
1680 	htt_tlv_filter.enable_md = 0;
1681 	htt_tlv_filter.enable_mo = 0;
1682 
1683 	htt_tlv_filter.fp_mgmt_filter = 0;
1684 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
1685 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
1686 					 FILTER_DATA_DATA);
1687 	htt_tlv_filter.fp_data_filter |=
1688 		hal_rx_en_mcast_fp_data_filter(soc->hal_soc) ?
1689 					FILTER_DATA_MCAST : 0;
1690 	htt_tlv_filter.mo_mgmt_filter = 0;
1691 	htt_tlv_filter.mo_ctrl_filter = 0;
1692 	htt_tlv_filter.mo_data_filter = 0;
1693 	htt_tlv_filter.md_data_filter = 0;
1694 
1695 	htt_tlv_filter.offset_valid = true;
1696 
1697 	/* Not subscribing to mpdu_end, msdu_start and rx_attn */
1698 	htt_tlv_filter.rx_mpdu_end_offset = 0;
1699 	htt_tlv_filter.rx_msdu_start_offset = 0;
1700 	htt_tlv_filter.rx_attn_offset = 0;
1701 
1702 	/*
1703 	 * For monitor mode, the packet hdr tlv is enabled later during
1704 	 * filter update
1705 	 */
1706 	if (soc->cdp_soc.ol_ops->get_con_mode &&
1707 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)
1708 		htt_tlv_filter.rx_packet_offset = soc->rx_mon_pkt_tlv_size;
1709 	else
1710 		htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
1711 
1712 	htt_tlv_filter.rx_header_offset =
1713 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
1714 	htt_tlv_filter.rx_mpdu_start_offset =
1715 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
1716 	htt_tlv_filter.rx_msdu_end_offset =
1717 				hal_rx_msdu_end_offset_get(soc->hal_soc);
1718 
1719 	dp_info("TLV subscription\n"
1720 		"msdu_start %d, mpdu_end %d, attention %d"
1721 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n"
1722 		"TLV offsets\n"
1723 		"msdu_start %d, mpdu_end %d, attention %d"
1724 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n",
1725 		htt_tlv_filter.msdu_start,
1726 		htt_tlv_filter.mpdu_end,
1727 		htt_tlv_filter.attention,
1728 		htt_tlv_filter.mpdu_start,
1729 		htt_tlv_filter.msdu_end,
1730 		htt_tlv_filter.packet_header,
1731 		htt_tlv_filter.packet,
1732 		htt_tlv_filter.rx_msdu_start_offset,
1733 		htt_tlv_filter.rx_mpdu_end_offset,
1734 		htt_tlv_filter.rx_attn_offset,
1735 		htt_tlv_filter.rx_mpdu_start_offset,
1736 		htt_tlv_filter.rx_msdu_end_offset,
1737 		htt_tlv_filter.rx_header_offset,
1738 		htt_tlv_filter.rx_packet_offset);
1739 
1740 	dp_rxdma_ring_wmask_cfg_be(soc, &htt_tlv_filter);
1741 	for (i = 0; i < MAX_PDEV_CNT; i++) {
1742 		struct dp_pdev *pdev = soc->pdev_list[i];
1743 
1744 		if (!pdev)
1745 			continue;
1746 
1747 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1748 			int mac_for_pdev =
1749 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
1750 			/*
1751 			 * Obtain lmac id from pdev to access the LMAC ring
1752 			 * in soc context
1753 			 */
1754 			int lmac_id =
1755 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
1756 							   pdev->pdev_id);
1757 
1758 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
1759 
1760 			if (!rx_mac_srng->hal_srng)
1761 				continue;
1762 
1763 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
1764 					    rx_mac_srng->hal_srng,
1765 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
1766 					    &htt_tlv_filter);
1767 		}
1768 	}
1769 	return status;
1770 
1771 }
1772 #endif
1773 
1774 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1775 /**
1776  * dp_service_near_full_srngs_be() - Main bottom half callback for the
1777  *				near-full IRQs.
1778  * @soc: Datapath SoC handle
1779  * @int_ctx: Interrupt context
1780  * @dp_budget: Budget of the work that can be done in the bottom half
1781  *
1782  * Return: work done in the handler
1783  */
1784 static uint32_t
1785 dp_service_near_full_srngs_be(struct dp_soc *soc, struct dp_intr *int_ctx,
1786 			      uint32_t dp_budget)
1787 {
1788 	int ring = 0;
1789 	int budget = dp_budget;
1790 	uint32_t work_done  = 0;
1791 	uint32_t remaining_quota = dp_budget;
1792 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1793 	int tx_ring_near_full_mask = int_ctx->tx_ring_near_full_mask;
1794 	int rx_near_full_grp_1_mask = int_ctx->rx_near_full_grp_1_mask;
1795 	int rx_near_full_grp_2_mask = int_ctx->rx_near_full_grp_2_mask;
1796 	int rx_near_full_mask = rx_near_full_grp_1_mask |
1797 				rx_near_full_grp_2_mask;
1798 
1799 	dp_verbose_debug("rx_ring_near_full 0x%x tx_ring_near_full 0x%x",
1800 			 rx_near_full_mask,
1801 			 tx_ring_near_full_mask);
1802 
1803 	if (rx_near_full_mask) {
1804 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1805 			if (!(rx_near_full_mask & (1 << ring)))
1806 				continue;
1807 
1808 			work_done = dp_rx_nf_process(int_ctx,
1809 					soc->reo_dest_ring[ring].hal_srng,
1810 					ring, remaining_quota);
1811 			if (work_done) {
1812 				intr_stats->num_rx_ring_near_full_masks[ring]++;
1813 				dp_verbose_debug("rx NF mask 0x%x ring %d, work_done %d budget %d",
1814 						 rx_near_full_mask, ring,
1815 						 work_done,
1816 						 budget);
1817 				budget -=  work_done;
1818 				if (budget <= 0)
1819 					goto budget_done;
1820 				remaining_quota = budget;
1821 			}
1822 		}
1823 	}
1824 
1825 	if (tx_ring_near_full_mask) {
1826 		for (ring = 0; ring < soc->num_tcl_data_rings; ring++) {
1827 			if (!(tx_ring_near_full_mask & (1 << ring)))
1828 				continue;
1829 
1830 			work_done = dp_tx_comp_nf_handler(int_ctx, soc,
1831 					soc->tx_comp_ring[ring].hal_srng,
1832 					ring, remaining_quota);
1833 			if (work_done) {
1834 				intr_stats->num_tx_comp_ring_near_full_masks[ring]++;
1835 				dp_verbose_debug("tx NF mask 0x%x ring %d, work_done %d budget %d",
1836 						 tx_ring_near_full_mask, ring,
1837 						 work_done, budget);
1838 				budget -=  work_done;
1839 				if (budget <= 0)
1840 					break;
1841 				remaining_quota = budget;
1842 			}
1843 		}
1844 	}
1845 
1846 	intr_stats->num_near_full_masks++;
1847 
1848 budget_done:
1849 	return dp_budget - budget;
1850 }
1851 
1852 /**
1853  * dp_srng_test_and_update_nf_params_be() - Check if the srng is in near full
1854  *				state and set the reap_limit appropriately
1855  *				as per the near full state
1856  * @soc: Datapath soc handle
1857  * @dp_srng: Datapath handle for SRNG
1858  * @max_reap_limit: [Output Buffer] Buffer to set the max reap limit as per
1859  *			the srng near-full state
1860  *
1861  * Return: 1, if the srng is in near-full state
1862  *	   0, if the srng is not in near-full state
1863  */
1864 static int
1865 dp_srng_test_and_update_nf_params_be(struct dp_soc *soc,
1866 				     struct dp_srng *dp_srng,
1867 				     int *max_reap_limit)
1868 {
1869 	return _dp_srng_test_and_update_nf_params(soc, dp_srng, max_reap_limit);
1870 }
1871 
1872 /**
1873  * dp_init_near_full_arch_ops_be() - Initialize the arch ops handler for the
1874  *			near full IRQ handling operations.
1875  * @arch_ops: arch ops handle
1876  *
1877  * Return: none
1878  */
1879 static inline void
1880 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
1881 {
1882 	arch_ops->dp_service_near_full_srngs = dp_service_near_full_srngs_be;
1883 	arch_ops->dp_srng_test_and_update_nf_params =
1884 					dp_srng_test_and_update_nf_params_be;
1885 }
1886 
1887 #else
1888 static inline void
1889 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
1890 {
1891 }
1892 #endif
1893 
1894 static inline
1895 QDF_STATUS dp_srng_init_be(struct dp_soc *soc, struct dp_srng *srng,
1896 			   int ring_type, int ring_num, int mac_id)
1897 {
1898 	return dp_srng_init_idx(soc, srng, ring_type, ring_num, mac_id, 0);
1899 }
1900 
1901 #ifdef WLAN_SUPPORT_PPEDS
1902 static void dp_soc_ppeds_srng_deinit(struct dp_soc *soc)
1903 {
1904 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1905 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1906 
1907 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1908 
1909 	if (!be_soc->ppeds_handle)
1910 		return;
1911 
1912 	dp_srng_deinit(soc, &be_soc->ppe2tcl_ring, PPE2TCL, 0);
1913 	wlan_minidump_remove(be_soc->ppe2tcl_ring.base_vaddr_unaligned,
1914 			     be_soc->ppe2tcl_ring.alloc_size,
1915 			     soc->ctrl_psoc,
1916 			     WLAN_MD_DP_SRNG_PPE2TCL,
1917 			     "ppe2tcl_ring");
1918 
1919 	dp_srng_deinit(soc, &be_soc->reo2ppe_ring, REO2PPE, 0);
1920 	wlan_minidump_remove(be_soc->reo2ppe_ring.base_vaddr_unaligned,
1921 			     be_soc->reo2ppe_ring.alloc_size,
1922 			     soc->ctrl_psoc,
1923 			     WLAN_MD_DP_SRNG_REO2PPE,
1924 			     "reo2ppe_ring");
1925 
1926 	dp_srng_deinit(soc, &be_soc->ppeds_wbm_release_ring, WBM2SW_RELEASE,
1927 		       WBM2_SW_PPE_REL_RING_ID);
1928 	wlan_minidump_remove(be_soc->ppeds_wbm_release_ring.base_vaddr_unaligned,
1929 			     be_soc->ppeds_wbm_release_ring.alloc_size,
1930 			     soc->ctrl_psoc,
1931 			     WLAN_MD_DP_SRNG_PPE_WBM2SW_RELEASE,
1932 			     "ppeds_wbm_release_ring");
1933 
1934 }
1935 
1936 static void dp_soc_ppeds_srng_free(struct dp_soc *soc)
1937 {
1938 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1939 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1940 
1941 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1942 
1943 	dp_srng_free(soc, &be_soc->ppeds_wbm_release_ring);
1944 
1945 	dp_srng_free(soc, &be_soc->ppe2tcl_ring);
1946 
1947 	dp_srng_free(soc, &be_soc->reo2ppe_ring);
1948 }
1949 
1950 static QDF_STATUS dp_soc_ppeds_srng_alloc(struct dp_soc *soc)
1951 {
1952 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1953 	uint32_t entries;
1954 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1955 
1956 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1957 
1958 	if (!be_soc->ppeds_handle)
1959 		return QDF_STATUS_SUCCESS;
1960 
1961 	entries = wlan_cfg_get_dp_soc_reo2ppe_ring_size(soc_cfg_ctx);
1962 
1963 	if (dp_srng_alloc(soc, &be_soc->reo2ppe_ring, REO2PPE,
1964 			  entries, 0)) {
1965 		dp_err("%pK: dp_srng_alloc failed for reo2ppe", soc);
1966 		goto fail;
1967 	}
1968 
1969 	entries = wlan_cfg_get_dp_soc_ppe2tcl_ring_size(soc_cfg_ctx);
1970 	if (dp_srng_alloc(soc, &be_soc->ppe2tcl_ring, PPE2TCL,
1971 			  entries, 0)) {
1972 		dp_err("%pK: dp_srng_alloc failed for ppe2tcl_ring", soc);
1973 		goto fail;
1974 	}
1975 
1976 	entries = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
1977 	if (dp_srng_alloc(soc, &be_soc->ppeds_wbm_release_ring, WBM2SW_RELEASE,
1978 			  entries, 1)) {
1979 		dp_err("%pK: dp_srng_alloc failed for ppeds_wbm_release_ring",
1980 		       soc);
1981 		goto fail;
1982 	}
1983 
1984 	return QDF_STATUS_SUCCESS;
1985 fail:
1986 	dp_soc_ppeds_srng_free(soc);
1987 	return QDF_STATUS_E_NOMEM;
1988 }
1989 
1990 static QDF_STATUS dp_soc_ppeds_srng_init(struct dp_soc *soc)
1991 {
1992 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1993 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1994 	hal_soc_handle_t hal_soc = soc->hal_soc;
1995 
1996 	struct dp_ppe_ds_idxs idx = {0};
1997 
1998 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1999 
2000 	if (!be_soc->ppeds_handle)
2001 		return QDF_STATUS_SUCCESS;
2002 
2003 	if (dp_ppeds_register_soc_be(be_soc, &idx)) {
2004 		dp_err("%pK: ppeds registration failed", soc);
2005 		goto fail;
2006 	}
2007 
2008 	if (dp_srng_init_idx(soc, &be_soc->reo2ppe_ring, REO2PPE, 0, 0,
2009 			     idx.reo2ppe_start_idx)) {
2010 		dp_err("%pK: dp_srng_init failed for reo2ppe", soc);
2011 		goto fail;
2012 	}
2013 
2014 	wlan_minidump_log(be_soc->reo2ppe_ring.base_vaddr_unaligned,
2015 			  be_soc->reo2ppe_ring.alloc_size,
2016 			  soc->ctrl_psoc,
2017 			  WLAN_MD_DP_SRNG_REO2PPE,
2018 			  "reo2ppe_ring");
2019 
2020 	hal_reo_config_reo2ppe_dest_info(hal_soc);
2021 
2022 	if (dp_srng_init_idx(soc, &be_soc->ppe2tcl_ring, PPE2TCL, 0, 0,
2023 			     idx.ppe2tcl_start_idx)) {
2024 		dp_err("%pK: dp_srng_init failed for ppe2tcl_ring", soc);
2025 		goto fail;
2026 	}
2027 
2028 	wlan_minidump_log(be_soc->ppe2tcl_ring.base_vaddr_unaligned,
2029 			  be_soc->ppe2tcl_ring.alloc_size,
2030 			  soc->ctrl_psoc,
2031 			  WLAN_MD_DP_SRNG_PPE2TCL,
2032 			  "ppe2tcl_ring");
2033 
2034 	hal_tx_config_rbm_mapping_be(soc->hal_soc,
2035 				     be_soc->ppe2tcl_ring.hal_srng,
2036 				     WBM2_SW_PPE_REL_MAP_ID);
2037 
2038 	if (dp_srng_init(soc, &be_soc->ppeds_wbm_release_ring, WBM2SW_RELEASE,
2039 			 WBM2_SW_PPE_REL_RING_ID, 0)) {
2040 		dp_err("%pK: dp_srng_init failed for ppeds_wbm_release_ring",
2041 		       soc);
2042 		goto fail;
2043 	}
2044 
2045 	wlan_minidump_log(be_soc->ppeds_wbm_release_ring.base_vaddr_unaligned,
2046 			  be_soc->ppeds_wbm_release_ring.alloc_size,
2047 			  soc->ctrl_psoc, WLAN_MD_DP_SRNG_PPE_WBM2SW_RELEASE,
2048 			  "ppeds_wbm_release_ring");
2049 
2050 	return QDF_STATUS_SUCCESS;
2051 fail:
2052 	dp_soc_ppeds_srng_deinit(soc);
2053 	return QDF_STATUS_E_NOMEM;
2054 }
2055 #else
2056 static void dp_soc_ppeds_srng_deinit(struct dp_soc *soc)
2057 {
2058 }
2059 
2060 static void dp_soc_ppeds_srng_free(struct dp_soc *soc)
2061 {
2062 }
2063 
2064 static QDF_STATUS dp_soc_ppeds_srng_alloc(struct dp_soc *soc)
2065 {
2066 	return QDF_STATUS_SUCCESS;
2067 }
2068 
2069 static QDF_STATUS dp_soc_ppeds_srng_init(struct dp_soc *soc)
2070 {
2071 	return QDF_STATUS_SUCCESS;
2072 }
2073 #endif
2074 
2075 static void dp_soc_srng_deinit_be(struct dp_soc *soc)
2076 {
2077 	uint32_t i;
2078 
2079 	dp_soc_ppeds_srng_deinit(soc);
2080 
2081 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
2082 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++) {
2083 			dp_srng_deinit(soc, &soc->rx_refill_buf_ring[i],
2084 				       RXDMA_BUF, 0);
2085 		}
2086 	}
2087 }
2088 
2089 static void dp_soc_srng_free_be(struct dp_soc *soc)
2090 {
2091 	uint32_t i;
2092 
2093 	dp_soc_ppeds_srng_free(soc);
2094 
2095 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
2096 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++)
2097 			dp_srng_free(soc, &soc->rx_refill_buf_ring[i]);
2098 	}
2099 }
2100 
2101 static QDF_STATUS dp_soc_srng_alloc_be(struct dp_soc *soc)
2102 {
2103 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2104 	uint32_t ring_size;
2105 	uint32_t i;
2106 
2107 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2108 
2109 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
2110 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
2111 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++) {
2112 			if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[i],
2113 					  RXDMA_BUF, ring_size, 0)) {
2114 				dp_err("%pK: dp_srng_alloc failed refill ring",
2115 				       soc);
2116 				goto fail;
2117 			}
2118 		}
2119 	}
2120 
2121 	if (dp_soc_ppeds_srng_alloc(soc)) {
2122 		dp_err("%pK: ppe rings alloc failed",
2123 		       soc);
2124 		goto fail;
2125 	}
2126 
2127 	return QDF_STATUS_SUCCESS;
2128 fail:
2129 	dp_soc_srng_free_be(soc);
2130 	return QDF_STATUS_E_NOMEM;
2131 }
2132 
2133 static QDF_STATUS dp_soc_srng_init_be(struct dp_soc *soc)
2134 {
2135 	int i = 0;
2136 
2137 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
2138 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++) {
2139 			if (dp_srng_init(soc, &soc->rx_refill_buf_ring[i],
2140 					 RXDMA_BUF, 0, 0)) {
2141 				dp_err("%pK: dp_srng_init failed refill ring",
2142 				       soc);
2143 				goto fail;
2144 			}
2145 		}
2146 	}
2147 
2148 	if (dp_soc_ppeds_srng_init(soc)) {
2149 		dp_err("%pK: ppe ds rings init failed",
2150 		       soc);
2151 		goto fail;
2152 	}
2153 
2154 	return QDF_STATUS_SUCCESS;
2155 fail:
2156 	dp_soc_srng_deinit_be(soc);
2157 	return QDF_STATUS_E_NOMEM;
2158 }
2159 
2160 #ifdef WLAN_FEATURE_11BE_MLO
2161 static inline unsigned
2162 dp_mlo_peer_find_hash_index(dp_mld_peer_hash_obj_t mld_hash_obj,
2163 			    union dp_align_mac_addr *mac_addr)
2164 {
2165 	uint32_t index;
2166 
2167 	index =
2168 		mac_addr->align2.bytes_ab ^
2169 		mac_addr->align2.bytes_cd ^
2170 		mac_addr->align2.bytes_ef;
2171 
2172 	index ^= index >> mld_hash_obj->mld_peer_hash.idx_bits;
2173 	index &= mld_hash_obj->mld_peer_hash.mask;
2174 
2175 	return index;
2176 }
2177 
2178 QDF_STATUS
2179 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
2180 				int hash_elems)
2181 {
2182 	int i, log2;
2183 
2184 	if (!mld_hash_obj)
2185 		return QDF_STATUS_E_FAILURE;
2186 
2187 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
2188 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
2189 	log2 = dp_log2_ceil(hash_elems);
2190 	hash_elems = 1 << log2;
2191 
2192 	mld_hash_obj->mld_peer_hash.mask = hash_elems - 1;
2193 	mld_hash_obj->mld_peer_hash.idx_bits = log2;
2194 	/* allocate an array of TAILQ peer object lists */
2195 	mld_hash_obj->mld_peer_hash.bins = qdf_mem_malloc(
2196 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
2197 	if (!mld_hash_obj->mld_peer_hash.bins)
2198 		return QDF_STATUS_E_NOMEM;
2199 
2200 	for (i = 0; i < hash_elems; i++)
2201 		TAILQ_INIT(&mld_hash_obj->mld_peer_hash.bins[i]);
2202 
2203 	qdf_spinlock_create(&mld_hash_obj->mld_peer_hash_lock);
2204 
2205 	return QDF_STATUS_SUCCESS;
2206 }
2207 
2208 void
2209 dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj)
2210 {
2211 	if (!mld_hash_obj)
2212 		return;
2213 
2214 	if (mld_hash_obj->mld_peer_hash.bins) {
2215 		qdf_mem_free(mld_hash_obj->mld_peer_hash.bins);
2216 		mld_hash_obj->mld_peer_hash.bins = NULL;
2217 		qdf_spinlock_destroy(&mld_hash_obj->mld_peer_hash_lock);
2218 	}
2219 }
2220 
2221 #ifdef WLAN_MLO_MULTI_CHIP
2222 static QDF_STATUS dp_mlo_peer_find_hash_attach_wrapper(struct dp_soc *soc)
2223 {
2224 	/* In case of MULTI chip MLO peer hash table when MLO global object
2225 	 * is created, avoid from SOC attach path
2226 	 */
2227 	return QDF_STATUS_SUCCESS;
2228 }
2229 
2230 static void dp_mlo_peer_find_hash_detach_wrapper(struct dp_soc *soc)
2231 {
2232 }
2233 
2234 void dp_mlo_dev_ctxt_list_attach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj)
2235 {
2236 }
2237 
2238 void dp_mlo_dev_ctxt_list_detach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj)
2239 {
2240 }
2241 #else
2242 static QDF_STATUS dp_mlo_peer_find_hash_attach_wrapper(struct dp_soc *soc)
2243 {
2244 	dp_mld_peer_hash_obj_t mld_hash_obj;
2245 
2246 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
2247 
2248 	if (!mld_hash_obj)
2249 		return QDF_STATUS_E_FAILURE;
2250 
2251 	return dp_mlo_peer_find_hash_attach_be(mld_hash_obj, soc->max_peers);
2252 }
2253 
2254 static void dp_mlo_peer_find_hash_detach_wrapper(struct dp_soc *soc)
2255 {
2256 	dp_mld_peer_hash_obj_t mld_hash_obj;
2257 
2258 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
2259 
2260 	if (!mld_hash_obj)
2261 		return;
2262 
2263 	return dp_mlo_peer_find_hash_detach_be(mld_hash_obj);
2264 }
2265 
2266 void dp_mlo_dev_ctxt_list_attach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj)
2267 {
2268 	dp_mlo_dev_ctxt_list_attach(mlo_dev_obj);
2269 }
2270 
2271 void dp_mlo_dev_ctxt_list_detach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj)
2272 {
2273 	dp_mlo_dev_ctxt_list_detach(mlo_dev_obj);
2274 }
2275 #endif
2276 
2277 #ifdef QCA_ENHANCED_STATS_SUPPORT
2278 static uint8_t
2279 dp_get_hw_link_id_be(struct dp_pdev *pdev)
2280 {
2281 	struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
2282 
2283 	return be_pdev->mlo_link_id;
2284 }
2285 #else
2286 static uint8_t
2287 dp_get_hw_link_id_be(struct dp_pdev *pdev)
2288 {
2289 	return 0;
2290 }
2291 #endif /* QCA_ENHANCED_STATS_SUPPORT */
2292 
2293 static struct dp_peer *
2294 dp_mlo_peer_find_hash_find_be(struct dp_soc *soc,
2295 			      uint8_t *peer_mac_addr,
2296 			      int mac_addr_is_aligned,
2297 			      enum dp_mod_id mod_id,
2298 			      uint8_t vdev_id)
2299 {
2300 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
2301 	uint32_t index;
2302 	struct dp_peer *peer;
2303 	struct dp_vdev *vdev;
2304 	dp_mld_peer_hash_obj_t mld_hash_obj;
2305 
2306 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
2307 	if (!mld_hash_obj)
2308 		return NULL;
2309 
2310 	if (!mld_hash_obj->mld_peer_hash.bins)
2311 		return NULL;
2312 
2313 	if (mac_addr_is_aligned) {
2314 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
2315 	} else {
2316 		qdf_mem_copy(
2317 			&local_mac_addr_aligned.raw[0],
2318 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
2319 		mac_addr = &local_mac_addr_aligned;
2320 	}
2321 
2322 	if (vdev_id != DP_VDEV_ALL) {
2323 		vdev = dp_vdev_get_ref_by_id(soc, vdev_id, mod_id);
2324 		if (!vdev) {
2325 			dp_err("vdev is null");
2326 			return NULL;
2327 		}
2328 	} else {
2329 		vdev = NULL;
2330 	}
2331 
2332 	/* search mld peer table if no link peer for given mac address */
2333 	index = dp_mlo_peer_find_hash_index(mld_hash_obj, mac_addr);
2334 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
2335 	TAILQ_FOREACH(peer, &mld_hash_obj->mld_peer_hash.bins[index],
2336 		      hash_list_elem) {
2337 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
2338 			if ((vdev_id == DP_VDEV_ALL) || (
2339 				dp_peer_find_mac_addr_cmp(
2340 						&peer->vdev->mld_mac_addr,
2341 						&vdev->mld_mac_addr) == 0)) {
2342 				/* take peer reference before returning */
2343 				if (dp_peer_get_ref(NULL, peer, mod_id) !=
2344 						QDF_STATUS_SUCCESS)
2345 					peer = NULL;
2346 
2347 				if (vdev)
2348 					dp_vdev_unref_delete(soc, vdev, mod_id);
2349 
2350 				qdf_spin_unlock_bh(
2351 					&mld_hash_obj->mld_peer_hash_lock);
2352 				return peer;
2353 			}
2354 		}
2355 	}
2356 
2357 	if (vdev)
2358 		dp_vdev_unref_delete(soc, vdev, mod_id);
2359 
2360 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
2361 
2362 	return NULL; /* failure */
2363 }
2364 
2365 static void
2366 dp_mlo_peer_find_hash_remove_be(struct dp_soc *soc, struct dp_peer *peer)
2367 {
2368 	uint32_t index;
2369 	struct dp_peer *tmppeer = NULL;
2370 	int found = 0;
2371 	dp_mld_peer_hash_obj_t mld_hash_obj;
2372 
2373 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
2374 
2375 	if (!mld_hash_obj)
2376 		return;
2377 
2378 	index = dp_mlo_peer_find_hash_index(mld_hash_obj, &peer->mac_addr);
2379 	QDF_ASSERT(!TAILQ_EMPTY(&mld_hash_obj->mld_peer_hash.bins[index]));
2380 
2381 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
2382 	TAILQ_FOREACH(tmppeer, &mld_hash_obj->mld_peer_hash.bins[index],
2383 		      hash_list_elem) {
2384 		if (tmppeer == peer) {
2385 			found = 1;
2386 			break;
2387 		}
2388 	}
2389 	QDF_ASSERT(found);
2390 	TAILQ_REMOVE(&mld_hash_obj->mld_peer_hash.bins[index], peer,
2391 		     hash_list_elem);
2392 
2393 	dp_info("Peer %pK (" QDF_MAC_ADDR_FMT ") removed. (found %u)",
2394 		peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), found);
2395 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2396 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
2397 
2398 }
2399 
2400 static void
2401 dp_mlo_peer_find_hash_add_be(struct dp_soc *soc, struct dp_peer *peer)
2402 {
2403 	uint32_t index;
2404 	dp_mld_peer_hash_obj_t mld_hash_obj;
2405 
2406 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
2407 
2408 	if (!mld_hash_obj)
2409 		return;
2410 
2411 	index = dp_mlo_peer_find_hash_index(mld_hash_obj, &peer->mac_addr);
2412 
2413 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
2414 
2415 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(NULL, peer,
2416 						DP_MOD_ID_CONFIG))) {
2417 		dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
2418 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2419 		qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
2420 		return;
2421 	}
2422 	TAILQ_INSERT_TAIL(&mld_hash_obj->mld_peer_hash.bins[index], peer,
2423 			  hash_list_elem);
2424 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
2425 
2426 	dp_info("Peer %pK (" QDF_MAC_ADDR_FMT ") added",
2427 		peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2428 }
2429 
2430 void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
2431 {
2432 	uint32_t index;
2433 	struct dp_peer *peer;
2434 	dp_mld_peer_hash_obj_t mld_hash_obj;
2435 
2436 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
2437 
2438 	if (!mld_hash_obj)
2439 		return;
2440 
2441 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
2442 	for (index = 0; index < mld_hash_obj->mld_peer_hash.mask; index++) {
2443 		TAILQ_FOREACH(peer, &mld_hash_obj->mld_peer_hash.bins[index],
2444 			      hash_list_elem) {
2445 			dp_print_peer_ast_entries(soc, peer, NULL);
2446 		}
2447 	}
2448 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
2449 }
2450 
2451 #endif
2452 
2453 #if defined(DP_UMAC_HW_HARD_RESET) && defined(DP_UMAC_HW_RESET_SUPPORT)
2454 static void dp_reconfig_tx_vdev_mcast_ctrl_be(struct dp_soc *soc,
2455 					      struct dp_vdev *vdev)
2456 {
2457 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2458 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2459 	hal_soc_handle_t hal_soc = soc->hal_soc;
2460 	uint8_t vdev_id = vdev->vdev_id;
2461 
2462 	if (vdev->opmode == wlan_op_mode_sta) {
2463 		if (vdev->pdev->isolation)
2464 			hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
2465 						HAL_TX_MCAST_CTRL_FW_EXCEPTION);
2466 		else
2467 			hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
2468 						HAL_TX_MCAST_CTRL_MEC_NOTIFY);
2469 	} else if (vdev->opmode == wlan_op_mode_ap) {
2470 		hal_tx_mcast_mlo_reinject_routing_set(
2471 					hal_soc,
2472 					HAL_TX_MCAST_MLO_REINJECT_TQM_NOTIFY);
2473 		if (vdev->mlo_vdev) {
2474 			hal_tx_vdev_mcast_ctrl_set(
2475 						hal_soc,
2476 						vdev_id,
2477 						HAL_TX_MCAST_CTRL_NO_SPECIAL);
2478 		} else {
2479 			hal_tx_vdev_mcast_ctrl_set(hal_soc,
2480 						   vdev_id,
2481 						   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
2482 		}
2483 	}
2484 }
2485 
2486 static void dp_bank_reconfig_be(struct dp_soc *soc, struct dp_vdev *vdev)
2487 {
2488 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2489 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2490 	union hal_tx_bank_config *bank_config;
2491 
2492 	if (!be_vdev || be_vdev->bank_id == DP_BE_INVALID_BANK_ID)
2493 		return;
2494 
2495 	bank_config = &be_soc->bank_profiles[be_vdev->bank_id].bank_config;
2496 
2497 	hal_tx_populate_bank_register(be_soc->soc.hal_soc, bank_config,
2498 				      be_vdev->bank_id);
2499 }
2500 
2501 #endif
2502 
2503 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2504 	defined(WLAN_MCAST_MLO)
2505 static void dp_mlo_mcast_reset_pri_mcast(struct dp_vdev_be *be_vdev,
2506 					 struct dp_vdev *ptnr_vdev,
2507 					 void *arg)
2508 {
2509 	struct dp_vdev_be *be_ptnr_vdev =
2510 				dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
2511 
2512 	be_ptnr_vdev->mcast_primary = false;
2513 }
2514 
2515 #if defined(CONFIG_MLO_SINGLE_DEV)
2516 static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
2517 					struct dp_vdev *vdev,
2518 					cdp_config_param_type val)
2519 {
2520 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2521 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(
2522 						be_vdev->vdev.pdev->soc);
2523 
2524 	be_vdev->mcast_primary = val.cdp_vdev_param_mcast_vdev;
2525 	vdev->mlo_vdev = 1;
2526 
2527 	if (be_vdev->mcast_primary) {
2528 		struct cdp_txrx_peer_params_update params = {0};
2529 
2530 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
2531 				      dp_mlo_mcast_reset_pri_mcast,
2532 				      (void *)&be_vdev->mcast_primary,
2533 				      DP_MOD_ID_TX_MCAST,
2534 				      DP_LINK_VDEV_ITER);
2535 
2536 		params.chip_id = be_soc->mlo_chip_id;
2537 		params.pdev_id = be_vdev->vdev.pdev->pdev_id;
2538 		params.vdev_id = vdev->vdev_id;
2539 		dp_wdi_event_handler(
2540 				WDI_EVENT_MCAST_PRIMARY_UPDATE,
2541 				be_vdev->vdev.pdev->soc,
2542 				(void *)&params, CDP_INVALID_PEER,
2543 				WDI_NO_VAL, params.pdev_id);
2544 	}
2545 }
2546 
2547 static
2548 void dp_get_vdev_stats_for_unmap_peer_be(struct dp_vdev *vdev,
2549 					 struct dp_peer *peer,
2550 					 struct cdp_vdev_stats **vdev_stats)
2551 {
2552 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2553 
2554 	if (!IS_DP_LEGACY_PEER(peer))
2555 		*vdev_stats = &be_vdev->mlo_stats;
2556 }
2557 #else
2558 static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
2559 					struct dp_vdev *vdev,
2560 					cdp_config_param_type val)
2561 {
2562 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2563 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(
2564 						be_vdev->vdev.pdev->soc);
2565 
2566 	be_vdev->mcast_primary = val.cdp_vdev_param_mcast_vdev;
2567 	vdev->mlo_vdev = 1;
2568 	hal_tx_vdev_mcast_ctrl_set(vdev->pdev->soc->hal_soc,
2569 				   vdev->vdev_id,
2570 				   HAL_TX_MCAST_CTRL_NO_SPECIAL);
2571 
2572 	if (be_vdev->mcast_primary) {
2573 		struct cdp_txrx_peer_params_update params = {0};
2574 
2575 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
2576 				      dp_mlo_mcast_reset_pri_mcast,
2577 				      (void *)&be_vdev->mcast_primary,
2578 				      DP_MOD_ID_TX_MCAST,
2579 				      DP_LINK_VDEV_ITER);
2580 
2581 		params.chip_id = be_soc->mlo_chip_id;
2582 		params.pdev_id = vdev->pdev->pdev_id;
2583 		params.vdev_id = vdev->vdev_id;
2584 		dp_wdi_event_handler(
2585 				WDI_EVENT_MCAST_PRIMARY_UPDATE,
2586 				vdev->pdev->soc,
2587 				(void *)&params, CDP_INVALID_PEER,
2588 				WDI_NO_VAL, params.pdev_id);
2589 	}
2590 }
2591 #endif
2592 
2593 static void dp_txrx_reset_mlo_mcast_primary_vdev_param_be(
2594 					struct dp_vdev *vdev,
2595 					cdp_config_param_type val)
2596 {
2597 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2598 
2599 	be_vdev->mcast_primary = false;
2600 	vdev->mlo_vdev = 0;
2601 	hal_tx_vdev_mcast_ctrl_set(vdev->pdev->soc->hal_soc,
2602 				   vdev->vdev_id,
2603 				   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
2604 }
2605 
2606 /**
2607  * dp_txrx_get_vdev_mcast_param_be() - Target specific ops for getting vdev
2608  *                                      params related to multicast
2609  * @soc: DP soc handle
2610  * @vdev: pointer to vdev structure
2611  * @val: buffer address
2612  *
2613  * Return: QDF_STATUS
2614  */
2615 static
2616 QDF_STATUS dp_txrx_get_vdev_mcast_param_be(struct dp_soc *soc,
2617 					   struct dp_vdev *vdev,
2618 					   cdp_config_param_type *val)
2619 {
2620 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2621 
2622 	if (be_vdev->mcast_primary)
2623 		val->cdp_vdev_param_mcast_vdev = true;
2624 	else
2625 		val->cdp_vdev_param_mcast_vdev = false;
2626 
2627 	return QDF_STATUS_SUCCESS;
2628 }
2629 #else
2630 static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
2631 					struct dp_vdev *vdev,
2632 					cdp_config_param_type val)
2633 {
2634 }
2635 
2636 static void dp_txrx_reset_mlo_mcast_primary_vdev_param_be(
2637 					struct dp_vdev *vdev,
2638 					cdp_config_param_type val)
2639 {
2640 }
2641 
2642 static
2643 QDF_STATUS dp_txrx_get_vdev_mcast_param_be(struct dp_soc *soc,
2644 					   struct dp_vdev *vdev,
2645 					   cdp_config_param_type *val)
2646 {
2647 	return QDF_STATUS_SUCCESS;
2648 }
2649 
2650 static
2651 void dp_get_vdev_stats_for_unmap_peer_be(struct dp_vdev *vdev,
2652 					 struct dp_peer *peer,
2653 					 struct cdp_vdev_stats **vdev_stats)
2654 {
2655 }
2656 #endif
2657 
2658 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
2659 static void dp_tx_implicit_rbm_set_be(struct dp_soc *soc,
2660 				      uint8_t tx_ring_id,
2661 				      uint8_t bm_id)
2662 {
2663 	hal_tx_config_rbm_mapping_be(soc->hal_soc,
2664 				     soc->tcl_data_ring[tx_ring_id].hal_srng,
2665 				     bm_id);
2666 }
2667 #else
2668 static void dp_tx_implicit_rbm_set_be(struct dp_soc *soc,
2669 				      uint8_t tx_ring_id,
2670 				      uint8_t bm_id)
2671 {
2672 }
2673 #endif
2674 
2675 /**
2676  * dp_txrx_set_vdev_param_be() - Target specific ops while setting vdev params
2677  * @soc: DP soc handle
2678  * @vdev: pointer to vdev structure
2679  * @param: parameter type to get value
2680  * @val: value
2681  *
2682  * Return: QDF_STATUS
2683  */
2684 static
2685 QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
2686 				     struct dp_vdev *vdev,
2687 				     enum cdp_vdev_param_type param,
2688 				     cdp_config_param_type val)
2689 {
2690 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2691 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2692 
2693 	switch (param) {
2694 	case CDP_TX_ENCAP_TYPE:
2695 	case CDP_UPDATE_DSCP_TO_TID_MAP:
2696 	case CDP_UPDATE_TDLS_FLAGS:
2697 		dp_tx_update_bank_profile(be_soc, be_vdev);
2698 		break;
2699 	case CDP_ENABLE_CIPHER:
2700 		if (vdev->tx_encap_type == htt_cmn_pkt_type_raw)
2701 			dp_tx_update_bank_profile(be_soc, be_vdev);
2702 		break;
2703 	case CDP_SET_MCAST_VDEV:
2704 		dp_txrx_set_mlo_mcast_primary_vdev_param_be(vdev, val);
2705 		break;
2706 	case CDP_RESET_MLO_MCAST_VDEV:
2707 		dp_txrx_reset_mlo_mcast_primary_vdev_param_be(vdev, val);
2708 		break;
2709 	default:
2710 		dp_warn("invalid param %d", param);
2711 		break;
2712 	}
2713 
2714 	return QDF_STATUS_SUCCESS;
2715 }
2716 
2717 #ifdef WLAN_FEATURE_11BE_MLO
2718 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2719 static inline void
2720 dp_soc_max_peer_id_set(struct dp_soc *soc)
2721 {
2722 	soc->peer_id_shift = dp_log2_ceil(soc->max_peers);
2723 	soc->peer_id_mask = (1 << soc->peer_id_shift) - 1;
2724 	/*
2725 	 * Double the peers since we use ML indication bit
2726 	 * alongwith peer_id to find peers.
2727 	 */
2728 	soc->max_peer_id = 1 << (soc->peer_id_shift + 1);
2729 }
2730 #else
2731 static inline void
2732 dp_soc_max_peer_id_set(struct dp_soc *soc)
2733 {
2734 	soc->max_peer_id =
2735 		(1 << (HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S + 1)) - 1;
2736 }
2737 #endif /* DP_USE_REDUCED_PEER_ID_FIELD_WIDTH */
2738 #else
2739 static inline void
2740 dp_soc_max_peer_id_set(struct dp_soc *soc)
2741 {
2742 	soc->max_peer_id = soc->max_peers;
2743 }
2744 #endif /* WLAN_FEATURE_11BE_MLO */
2745 
2746 static void dp_peer_map_detach_be(struct dp_soc *soc)
2747 {
2748 	if (soc->host_ast_db_enable)
2749 		dp_peer_ast_hash_detach(soc);
2750 }
2751 
2752 static QDF_STATUS dp_peer_map_attach_be(struct dp_soc *soc)
2753 {
2754 	QDF_STATUS status;
2755 
2756 	if (soc->host_ast_db_enable) {
2757 		status = dp_peer_ast_hash_attach(soc);
2758 		if (QDF_IS_STATUS_ERROR(status))
2759 			return status;
2760 	}
2761 
2762 	dp_soc_max_peer_id_set(soc);
2763 
2764 	return QDF_STATUS_SUCCESS;
2765 }
2766 
2767 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX)
2768 
2769 void dp_mlo_dev_ctxt_list_attach(dp_mlo_dev_obj_t mlo_dev_obj)
2770 {
2771 	TAILQ_INIT(&mlo_dev_obj->mlo_dev_list);
2772 	qdf_spinlock_create(&mlo_dev_obj->mlo_dev_list_lock);
2773 }
2774 
2775 void dp_mlo_dev_ctxt_list_detach(dp_mlo_dev_obj_t mlo_dev_obj)
2776 {
2777 	struct dp_mlo_dev_ctxt *mld_ctxt = NULL;
2778 	struct dp_mlo_dev_ctxt *tmp_mld_ctxt = NULL;
2779 
2780 	if (!TAILQ_EMPTY(&mlo_dev_obj->mlo_dev_list)) {
2781 		dp_alert("DP MLO dev list is not empty");
2782 		qdf_spin_lock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2783 		TAILQ_FOREACH_SAFE(mld_ctxt, &mlo_dev_obj->mlo_dev_list,
2784 				   ml_dev_list_elem, tmp_mld_ctxt) {
2785 			if (mld_ctxt) {
2786 				dp_alert("MLD MAC " QDF_MAC_ADDR_FMT " ",
2787 					 QDF_MAC_ADDR_REF(
2788 						&mld_ctxt->mld_mac_addr.raw));
2789 				qdf_mem_free(mld_ctxt);
2790 			}
2791 		}
2792 		qdf_spin_unlock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2793 	}
2794 
2795 	qdf_spinlock_destroy(&mlo_dev_obj->mlo_dev_list_lock);
2796 }
2797 
2798 void dp_mlo_dev_ctxt_unref_delete(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
2799 				  enum dp_mod_id mod_id)
2800 {
2801 	QDF_ASSERT(qdf_atomic_dec_return(&mlo_dev_ctxt->mod_refs[mod_id]) >= 0);
2802 
2803 	/* Return if this is not the last reference*/
2804 	if (!qdf_atomic_dec_and_test(&mlo_dev_ctxt->ref_cnt))
2805 		return;
2806 
2807 	QDF_ASSERT(mlo_dev_ctxt->ref_delete_pending);
2808 	qdf_spinlock_destroy(&mlo_dev_ctxt->vdev_list_lock);
2809 	qdf_mem_free(mlo_dev_ctxt);
2810 }
2811 
2812 QDF_STATUS dp_mlo_dev_get_ref(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
2813 			      enum dp_mod_id mod_id)
2814 {
2815 	if (!qdf_atomic_inc_return(&mlo_dev_ctxt->ref_cnt))
2816 		return QDF_STATUS_E_INVAL;
2817 
2818 	qdf_atomic_inc(&mlo_dev_ctxt->mod_refs[mod_id]);
2819 
2820 	return QDF_STATUS_SUCCESS;
2821 }
2822 
2823 struct dp_mlo_dev_ctxt *
2824 dp_get_mlo_dev_ctx_by_mld_mac_addr(struct dp_soc_be *be_soc,
2825 				   uint8_t *mldaddr,
2826 				   enum dp_mod_id mod_id)
2827 {
2828 	struct dp_mlo_dev_ctxt *mld_cur = NULL;
2829 	struct dp_mlo_dev_ctxt *tmp_mld_cur = NULL;
2830 	dp_mlo_dev_obj_t mlo_dev_obj = dp_get_mlo_dev_list_obj(be_soc);
2831 
2832 	if (!mlo_dev_obj) {
2833 		dp_err("DP Global MLO Context is NULL");
2834 		return NULL;
2835 	}
2836 
2837 	/*
2838 	 * Iterate through ml dev list, till mldaddr matches with
2839 	 * entry of list
2840 	 */
2841 	qdf_spin_lock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2842 	TAILQ_FOREACH_SAFE(mld_cur, &mlo_dev_obj->mlo_dev_list,
2843 			   ml_dev_list_elem, tmp_mld_cur) {
2844 		if (!qdf_mem_cmp(&mld_cur->mld_mac_addr.raw, mldaddr,
2845 				 QDF_MAC_ADDR_SIZE)) {
2846 			if (dp_mlo_dev_get_ref(mld_cur, mod_id)
2847 			    == QDF_STATUS_SUCCESS) {
2848 				qdf_spin_unlock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2849 				return mld_cur;
2850 			}
2851 		}
2852 	}
2853 	qdf_spin_unlock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2854 	return NULL;
2855 }
2856 
2857 /**
2858  * dp_mlo_dev_ctxt_create() - Allocate DP MLO dev context
2859  * @soc_hdl: SOC handle
2860  * @mld_mac_addr: MLD MAC address
2861  *
2862  * Return: QDF_STATUS
2863  */
2864 static inline
2865 QDF_STATUS dp_mlo_dev_ctxt_create(struct cdp_soc_t *soc_hdl,
2866 				  uint8_t *mld_mac_addr)
2867 {
2868 	struct dp_mlo_dev_ctxt *mlo_dev_ctxt = NULL;
2869 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2870 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2871 	dp_mlo_dev_obj_t mlo_dev_obj = dp_get_mlo_dev_list_obj(be_soc);
2872 
2873 	if (!mlo_dev_obj) {
2874 		dp_err("DP Global MLO Context is NULL");
2875 		return QDF_STATUS_E_FAILURE;
2876 	}
2877 
2878 	/* check if MLO dev ctx already available */
2879 	mlo_dev_ctxt = dp_get_mlo_dev_ctx_by_mld_mac_addr(be_soc,
2880 							  mld_mac_addr,
2881 							  DP_MOD_ID_MLO_DEV);
2882 	if (mlo_dev_ctxt) {
2883 		dp_mlo_dev_ctxt_unref_delete(mlo_dev_ctxt, DP_MOD_ID_MLO_DEV);
2884 		/* assert if we get two create request for same MLD MAC */
2885 		qdf_assert_always(0);
2886 	}
2887 
2888 	/* Allocate MLO dev ctx */
2889 	mlo_dev_ctxt = qdf_mem_malloc(sizeof(struct dp_mlo_dev_ctxt));
2890 
2891 	if (!mlo_dev_ctxt) {
2892 		dp_err("Failed to allocate DP MLO Dev Context");
2893 		return QDF_STATUS_E_NOMEM;
2894 	}
2895 
2896 	qdf_copy_macaddr((struct qdf_mac_addr *)&mlo_dev_ctxt->mld_mac_addr.raw[0],
2897 			 (struct qdf_mac_addr *)mld_mac_addr);
2898 
2899 	qdf_mem_set(mlo_dev_ctxt->vdev_list,
2900 		    WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC,
2901 		    CDP_INVALID_VDEV_ID);
2902 	qdf_mem_set(mlo_dev_ctxt->bridge_vdev,
2903 		    WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC,
2904 		    CDP_INVALID_VDEV_ID);
2905 	mlo_dev_ctxt->seq_num = 0;
2906 
2907 	/* Add mlo_dev_ctxt to the global DP MLO list */
2908 	qdf_spin_lock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2909 	TAILQ_INSERT_TAIL(&mlo_dev_obj->mlo_dev_list,
2910 			  mlo_dev_ctxt, ml_dev_list_elem);
2911 	qdf_spin_unlock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2912 
2913 	/* Ref for MLO ctxt saved in global list */
2914 	dp_mlo_dev_get_ref(mlo_dev_ctxt, DP_MOD_ID_CONFIG);
2915 
2916 	mlo_dev_ctxt->ref_delete_pending = 0;
2917 	qdf_spinlock_create(&mlo_dev_ctxt->vdev_list_lock);
2918 	return QDF_STATUS_SUCCESS;
2919 }
2920 
2921 /**
2922  * dp_mlo_dev_ctxt_destroy() - Destroy DP MLO dev context
2923  * @soc_hdl: SOC handle
2924  * @mld_mac_addr: MLD MAC address
2925  *
2926  * Return: QDF_STATUS
2927  */
2928 static inline
2929 QDF_STATUS dp_mlo_dev_ctxt_destroy(struct cdp_soc_t *soc_hdl,
2930 				   uint8_t *mld_mac_addr)
2931 {
2932 	struct dp_mlo_dev_ctxt *mlo_dev_ctxt = NULL;
2933 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2934 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2935 	dp_mlo_dev_obj_t mlo_dev_obj = dp_get_mlo_dev_list_obj(be_soc);
2936 
2937 	if (!mlo_dev_obj) {
2938 		dp_err("DP Global MLO Context is NULL");
2939 		return QDF_STATUS_E_INVAL;
2940 	}
2941 
2942 	/* GET mlo_dev_ctxt from the global list */
2943 	mlo_dev_ctxt = dp_get_mlo_dev_ctx_by_mld_mac_addr(be_soc,
2944 							  mld_mac_addr,
2945 							  DP_MOD_ID_MLO_DEV);
2946 	if (!mlo_dev_ctxt) {
2947 		dp_err("Failed to get DP MLO Dev Context by MLD mac addr");
2948 		return QDF_STATUS_E_INVAL;
2949 	}
2950 
2951 	if (mlo_dev_ctxt->vdev_count)
2952 		dp_alert("deleting MLO dev ctxt with non zero vdev count");
2953 
2954 	qdf_spin_lock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2955 	TAILQ_REMOVE(&mlo_dev_obj->mlo_dev_list,
2956 		     mlo_dev_ctxt, ml_dev_list_elem);
2957 	qdf_spin_unlock_bh(&mlo_dev_obj->mlo_dev_list_lock);
2958 
2959 	/* unref for MLO ctxt ref released from Global list */
2960 	dp_mlo_dev_ctxt_unref_delete(mlo_dev_ctxt, DP_MOD_ID_CONFIG);
2961 
2962 	mlo_dev_ctxt->ref_delete_pending = 1;
2963 	dp_mlo_dev_ctxt_unref_delete(mlo_dev_ctxt, DP_MOD_ID_MLO_DEV);
2964 	return QDF_STATUS_SUCCESS;
2965 }
2966 
2967 /**
2968  * dp_mlo_dev_ctxt_vdev_attach() - Attach vdev to DP MLO dev context
2969  * @soc_hdl: SOC handle
2970  * @vdev_id: vdev id for the vdev to be attached
2971  * @mld_mac_addr: MLD MAC address
2972  *
2973  * Return: QDF_STATUS
2974  */
2975 static inline
2976 QDF_STATUS dp_mlo_dev_ctxt_vdev_attach(struct cdp_soc_t *soc_hdl,
2977 				       uint8_t vdev_id,
2978 				       uint8_t *mld_mac_addr)
2979 {
2980 	struct dp_mlo_dev_ctxt *mlo_dev_ctxt = NULL;
2981 	struct dp_vdev *vdev = NULL;
2982 	struct dp_vdev_be *be_vdev = NULL;
2983 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2984 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2985 
2986 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
2987 	if (!vdev)
2988 		return QDF_STATUS_E_FAILURE;
2989 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
2990 
2991 	/* GET mlo_dev_ctxt from the global list */
2992 	mlo_dev_ctxt = dp_get_mlo_dev_ctx_by_mld_mac_addr(be_soc,
2993 							  mld_mac_addr,
2994 							  DP_MOD_ID_MLO_DEV);
2995 	if (!mlo_dev_ctxt) {
2996 		dp_err("Failed to get MLO ctxt for " QDF_MAC_ADDR_FMT "",
2997 		       QDF_MAC_ADDR_REF(mld_mac_addr));
2998 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
2999 		return QDF_STATUS_E_INVAL;
3000 	}
3001 
3002 	dp_attach_vdev_list_in_mlo_dev_ctxt(be_soc, vdev, mlo_dev_ctxt);
3003 	be_vdev->mlo_dev_ctxt = mlo_dev_ctxt;
3004 
3005 	/* ref for holding MLO ctxt in be_vdev */
3006 	dp_mlo_dev_get_ref(mlo_dev_ctxt, DP_MOD_ID_CHILD);
3007 	/* Save vdev stats in MLO dev ctx */
3008 	dp_update_mlo_ctxt_stats(&mlo_dev_ctxt->stats, &vdev->stats);
3009 
3010 	/* reset vdev stats to zero */
3011 	qdf_mem_set(&vdev->stats, sizeof(struct cdp_vdev_stats), 0);
3012 
3013 	/* unref for mlo ctxt taken at the start of this function */
3014 	dp_mlo_dev_ctxt_unref_delete(mlo_dev_ctxt, DP_MOD_ID_MLO_DEV);
3015 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3016 
3017 	return QDF_STATUS_SUCCESS;
3018 }
3019 
3020 /**
3021  * dp_mlo_dev_ctxt_vdev_detach() - Detach vdev from DP MLO dev context
3022  * @soc_hdl: SOC handle
3023  * @vdev_id: vdev id for the vdev to be attached
3024  * @mld_mac_addr: MLD MAC address
3025  *
3026  * Return: QDF_STATUS
3027  */
3028 static inline
3029 QDF_STATUS dp_mlo_dev_ctxt_vdev_detach(struct cdp_soc_t *soc_hdl,
3030 				       uint8_t vdev_id,
3031 				       uint8_t *mld_mac_addr)
3032 {
3033 	struct dp_vdev *vdev = NULL;
3034 	struct dp_vdev_be *be_vdev = NULL;
3035 	struct dp_mlo_dev_ctxt *mlo_dev_ctxt = NULL;
3036 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3037 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
3038 
3039 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
3040 	if (!vdev)
3041 		return QDF_STATUS_E_FAILURE;
3042 
3043 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
3044 
3045 	/* GET mlo_dev_ctxt from the global list */
3046 	mlo_dev_ctxt = dp_get_mlo_dev_ctx_by_mld_mac_addr(be_soc,
3047 							  mld_mac_addr,
3048 							  DP_MOD_ID_MLO_DEV);
3049 
3050 	if (!mlo_dev_ctxt) {
3051 		dp_err("Failed to get DP MLO Dev Context by MLD mac addr");
3052 		if (!be_vdev->mlo_dev_ctxt) {
3053 			dp_err("Failed to get DP MLO Dev Context from vdev");
3054 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3055 			return QDF_STATUS_E_INVAL;
3056 		}
3057 		mlo_dev_ctxt = be_vdev->mlo_dev_ctxt;
3058 	}
3059 
3060 	dp_detach_vdev_list_in_mlo_dev_ctxt(be_soc, vdev, mlo_dev_ctxt);
3061 	be_vdev->mlo_dev_ctxt = NULL;
3062 
3063 	/* unref for mlo ctxt removed from be_vdev*/
3064 	dp_mlo_dev_ctxt_unref_delete(mlo_dev_ctxt, DP_MOD_ID_CHILD);
3065 
3066 	/* unref for mlo ctxt taken at the start of this function */
3067 	dp_mlo_dev_ctxt_unref_delete(mlo_dev_ctxt, DP_MOD_ID_MLO_DEV);
3068 
3069 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3070 	return QDF_STATUS_SUCCESS;
3071 }
3072 #else
3073 void dp_mlo_dev_ctxt_list_attach(dp_mlo_dev_obj_t mlo_dev_obj)
3074 {
3075 }
3076 
3077 void dp_mlo_dev_ctxt_list_detach(dp_mlo_dev_obj_t mlo_dev_obj)
3078 {
3079 }
3080 
3081 static inline
3082 QDF_STATUS dp_mlo_dev_ctxt_create(struct cdp_soc_t *soc_hdl,
3083 				  uint8_t *mld_mac_addr)
3084 {
3085 	return QDF_STATUS_SUCCESS;
3086 }
3087 
3088 static inline
3089 QDF_STATUS dp_mlo_dev_ctxt_destroy(struct cdp_soc_t *soc_hdl,
3090 				   uint8_t *mld_mac_addr)
3091 {
3092 	return QDF_STATUS_SUCCESS;
3093 }
3094 
3095 static inline
3096 QDF_STATUS dp_mlo_dev_ctxt_vdev_attach(struct cdp_soc_t *soc_hdl,
3097 				       uint8_t vdev_id,
3098 				       uint8_t *mld_mac_addr)
3099 {
3100 	return QDF_STATUS_SUCCESS;
3101 }
3102 
3103 static inline
3104 QDF_STATUS dp_mlo_dev_ctxt_vdev_detach(struct cdp_soc_t *soc_hdl,
3105 				       uint8_t vdev_id,
3106 				       uint8_t *mld_mac_addr)
3107 {
3108 	return QDF_STATUS_SUCCESS;
3109 }
3110 #endif /* WLAN_DP_MLO_DEV_CTX */
3111 
3112 #ifdef WLAN_FEATURE_11BE_MLO
3113 #ifdef WLAN_MCAST_MLO
3114 static inline void
3115 dp_initialize_arch_ops_be_mcast_mlo(struct dp_arch_ops *arch_ops)
3116 {
3117 	arch_ops->dp_tx_mcast_handler = dp_tx_mlo_mcast_handler_be;
3118 	arch_ops->dp_rx_mcast_handler = dp_rx_mlo_igmp_handler;
3119 	arch_ops->dp_tx_is_mcast_primary = dp_tx_mlo_is_mcast_primary_be;
3120 }
3121 #else /* WLAN_MCAST_MLO */
3122 static inline void
3123 dp_initialize_arch_ops_be_mcast_mlo(struct dp_arch_ops *arch_ops)
3124 {
3125 }
3126 #endif /* WLAN_MCAST_MLO */
3127 
3128 #ifdef WLAN_MLO_MULTI_CHIP
3129 static inline void
3130 dp_initialize_arch_ops_be_mlo_multi_chip(struct dp_arch_ops *arch_ops)
3131 {
3132 	arch_ops->dp_partner_chips_map = dp_mlo_partner_chips_map;
3133 	arch_ops->dp_partner_chips_unmap = dp_mlo_partner_chips_unmap;
3134 	arch_ops->dp_soc_get_by_idle_bm_id = dp_soc_get_by_idle_bm_id;
3135 }
3136 #else
3137 static inline void
3138 dp_initialize_arch_ops_be_mlo_multi_chip(struct dp_arch_ops *arch_ops)
3139 {
3140 }
3141 #endif
3142 
3143 static inline void
3144 dp_initialize_arch_ops_be_mlo(struct dp_arch_ops *arch_ops)
3145 {
3146 	dp_initialize_arch_ops_be_mcast_mlo(arch_ops);
3147 	dp_initialize_arch_ops_be_mlo_multi_chip(arch_ops);
3148 	arch_ops->mlo_peer_find_hash_detach =
3149 	dp_mlo_peer_find_hash_detach_wrapper;
3150 	arch_ops->mlo_peer_find_hash_attach =
3151 	dp_mlo_peer_find_hash_attach_wrapper;
3152 	arch_ops->mlo_peer_find_hash_add = dp_mlo_peer_find_hash_add_be;
3153 	arch_ops->mlo_peer_find_hash_remove = dp_mlo_peer_find_hash_remove_be;
3154 	arch_ops->mlo_peer_find_hash_find = dp_mlo_peer_find_hash_find_be;
3155 	arch_ops->get_hw_link_id = dp_get_hw_link_id_be;
3156 }
3157 #else /* WLAN_FEATURE_11BE_MLO */
3158 static inline void
3159 dp_initialize_arch_ops_be_mlo(struct dp_arch_ops *arch_ops)
3160 {
3161 }
3162 #endif /* WLAN_FEATURE_11BE_MLO */
3163 
3164 static struct cdp_cmn_mlo_ops dp_cmn_mlo_ops = {
3165 	.mlo_dev_ctxt_create = dp_mlo_dev_ctxt_create,
3166 	.mlo_dev_ctxt_attach = dp_mlo_dev_ctxt_vdev_attach,
3167 	.mlo_dev_ctxt_detach = dp_mlo_dev_ctxt_vdev_detach,
3168 	.mlo_dev_ctxt_destroy = dp_mlo_dev_ctxt_destroy,
3169 };
3170 
3171 void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc)
3172 {
3173 	soc->cdp_soc.ops->cmn_mlo_ops = &dp_cmn_mlo_ops;
3174 }
3175 
3176 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3177 #define DP_LMAC_PEER_ID_MSB_LEGACY 2
3178 #define DP_LMAC_PEER_ID_MSB_MLO 3
3179 
3180 static void dp_peer_get_reo_hash_be(struct dp_vdev *vdev,
3181 				    struct cdp_peer_setup_info *setup_info,
3182 				    enum cdp_host_reo_dest_ring *reo_dest,
3183 				    bool *hash_based,
3184 				    uint8_t *lmac_peer_id_msb)
3185 {
3186 	struct dp_soc *soc = vdev->pdev->soc;
3187 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
3188 
3189 	if (!be_soc->mlo_enabled)
3190 		return dp_vdev_get_default_reo_hash(vdev, reo_dest,
3191 						    hash_based);
3192 
3193 	*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3194 	*reo_dest = vdev->pdev->reo_dest;
3195 
3196 	/* Not a ML link peer use non-mlo */
3197 	if (!setup_info) {
3198 		*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_LEGACY;
3199 		return;
3200 	}
3201 
3202 	/* For STA ML VAP we do not have num links info at this point
3203 	 * use MLO case always
3204 	 */
3205 	if (vdev->opmode == wlan_op_mode_sta) {
3206 		*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_MLO;
3207 		return;
3208 	}
3209 
3210 	/* For AP ML VAP consider the peer as ML only it associates with
3211 	 * multiple links
3212 	 */
3213 	if (setup_info->num_links == 1) {
3214 		*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_LEGACY;
3215 		return;
3216 	}
3217 
3218 	*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_MLO;
3219 }
3220 
3221 static bool dp_reo_remap_config_be(struct dp_soc *soc,
3222 				   uint32_t *remap0,
3223 				   uint32_t *remap1,
3224 				   uint32_t *remap2)
3225 {
3226 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
3227 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
3228 	uint32_t reo_mlo_config =
3229 		wlan_cfg_mlo_rx_ring_map_get(soc->wlan_cfg_ctx);
3230 
3231 	if (!be_soc->mlo_enabled)
3232 		return dp_reo_remap_config(soc, remap0, remap1, remap2);
3233 
3234 	*remap0 = hal_reo_ix_remap_value_get_be(soc->hal_soc, reo_mlo_config);
3235 	*remap1 = hal_reo_ix_remap_value_get_be(soc->hal_soc, reo_config);
3236 	*remap2 = hal_reo_ix_remap_value_get_be(soc->hal_soc, reo_mlo_config);
3237 
3238 	return true;
3239 }
3240 #else
3241 static void dp_peer_get_reo_hash_be(struct dp_vdev *vdev,
3242 				    struct cdp_peer_setup_info *setup_info,
3243 				    enum cdp_host_reo_dest_ring *reo_dest,
3244 				    bool *hash_based,
3245 				    uint8_t *lmac_peer_id_msb)
3246 {
3247 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
3248 }
3249 
3250 static bool dp_reo_remap_config_be(struct dp_soc *soc,
3251 				   uint32_t *remap0,
3252 				   uint32_t *remap1,
3253 				   uint32_t *remap2)
3254 {
3255 	return dp_reo_remap_config(soc, remap0, remap1, remap2);
3256 }
3257 #endif
3258 
3259 #ifdef CONFIG_MLO_SINGLE_DEV
3260 static inline
3261 void dp_initialize_arch_ops_be_single_dev(struct dp_arch_ops *arch_ops)
3262 {
3263 	arch_ops->dp_tx_mlo_mcast_send = dp_tx_mlo_mcast_send_be;
3264 }
3265 #else
3266 static inline
3267 void dp_initialize_arch_ops_be_single_dev(struct dp_arch_ops *arch_ops)
3268 {
3269 }
3270 #endif
3271 
3272 #ifdef IPA_OFFLOAD
3273 static int8_t dp_ipa_get_bank_id_be(struct dp_soc *soc)
3274 {
3275 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
3276 
3277 	return be_soc->ipa_bank_id;
3278 }
3279 
3280 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL
3281 static void dp_ipa_get_wdi_version_be(uint8_t *wdi_ver)
3282 {
3283 	*wdi_ver = IPA_WDI_4;
3284 }
3285 #else
3286 static inline void dp_ipa_get_wdi_version_be(uint8_t *wdi_ver)
3287 {
3288 }
3289 #endif
3290 
3291 static inline void dp_initialize_arch_ops_be_ipa(struct dp_arch_ops *arch_ops)
3292 {
3293 	arch_ops->ipa_get_bank_id = dp_ipa_get_bank_id_be;
3294 	arch_ops->ipa_get_wdi_ver = dp_ipa_get_wdi_version_be;
3295 }
3296 #else /* !IPA_OFFLOAD */
3297 static inline void dp_initialize_arch_ops_be_ipa(struct dp_arch_ops *arch_ops)
3298 {
3299 }
3300 #endif /* IPA_OFFLOAD */
3301 
3302 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
3303 {
3304 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3305 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_be;
3306 	arch_ops->dp_rx_process = dp_rx_process_be;
3307 	arch_ops->dp_tx_send_fast = dp_tx_fast_send_be;
3308 	arch_ops->tx_comp_get_params_from_hal_desc =
3309 		dp_tx_comp_get_params_from_hal_desc_be;
3310 	arch_ops->dp_tx_process_htt_completion =
3311 				dp_tx_process_htt_completion_be;
3312 	arch_ops->dp_tx_desc_pool_alloc = dp_tx_desc_pool_alloc_be;
3313 	arch_ops->dp_tx_desc_pool_free = dp_tx_desc_pool_free_be;
3314 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_be;
3315 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_be;
3316 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_be;
3317 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_be;
3318 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
3319 				dp_wbm_get_rx_desc_from_hal_desc_be;
3320 	arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_be;
3321 	arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_be;
3322 	arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_be;
3323 	arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_be;
3324 #endif
3325 	arch_ops->txrx_get_context_size = dp_get_context_size_be;
3326 #ifdef WIFI_MONITOR_SUPPORT
3327 	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_be;
3328 #endif
3329 	arch_ops->dp_rx_desc_cookie_2_va =
3330 			dp_rx_desc_cookie_2_va_be;
3331 	arch_ops->dp_rx_intrabss_mcast_handler =
3332 				dp_rx_intrabss_mcast_handler_be;
3333 	arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_be;
3334 
3335 	arch_ops->txrx_soc_attach = dp_soc_attach_be;
3336 	arch_ops->txrx_soc_detach = dp_soc_detach_be;
3337 	arch_ops->txrx_soc_init = dp_soc_init_be;
3338 	arch_ops->txrx_soc_deinit = dp_soc_deinit_be_wrapper;
3339 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_be;
3340 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_be;
3341 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_be;
3342 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_be;
3343 	arch_ops->txrx_pdev_attach = dp_pdev_attach_be;
3344 	arch_ops->txrx_pdev_detach = dp_pdev_detach_be;
3345 	arch_ops->txrx_vdev_attach = dp_vdev_attach_be;
3346 	arch_ops->txrx_vdev_detach = dp_vdev_detach_be;
3347 	arch_ops->txrx_peer_setup = dp_peer_setup_be;
3348 	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_be;
3349 	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_be;
3350 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be;
3351 	arch_ops->dp_rx_peer_metadata_peer_id_get =
3352 					dp_rx_peer_metadata_peer_id_get_be;
3353 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_be;
3354 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_be;
3355 	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_be;
3356 	dp_initialize_arch_ops_be_mlo(arch_ops);
3357 #ifdef WLAN_MLO_MULTI_CHIP
3358 	arch_ops->dp_get_soc_by_chip_id = dp_get_soc_by_chip_id_be;
3359 	arch_ops->dp_mlo_print_ptnr_info = dp_mlo_debug_print_ptnr_info;
3360 #endif
3361 	arch_ops->dp_soc_get_num_soc = dp_soc_get_num_soc_be;
3362 	arch_ops->dp_peer_rx_reorder_queue_setup =
3363 					dp_peer_rx_reorder_queue_setup_be;
3364 	arch_ops->dp_rx_peer_set_link_id = dp_rx_set_link_id_be;
3365 	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_be;
3366 #if defined(DP_UMAC_HW_HARD_RESET) && defined(DP_UMAC_HW_RESET_SUPPORT)
3367 	arch_ops->dp_bank_reconfig = dp_bank_reconfig_be;
3368 	arch_ops->dp_reconfig_tx_vdev_mcast_ctrl =
3369 					dp_reconfig_tx_vdev_mcast_ctrl_be;
3370 	arch_ops->dp_cc_reg_cfg_init = dp_cc_reg_cfg_init;
3371 #endif
3372 
3373 #ifdef WLAN_SUPPORT_PPEDS
3374 	arch_ops->ppeds_handle_attached = dp_ppeds_handle_attached;
3375 	arch_ops->dp_txrx_ppeds_rings_status = dp_ppeds_rings_status;
3376 	arch_ops->txrx_soc_ppeds_start = dp_ppeds_start_soc_be;
3377 	arch_ops->txrx_soc_ppeds_stop = dp_ppeds_stop_soc_be;
3378 	arch_ops->dp_register_ppeds_interrupts = dp_register_ppeds_interrupts;
3379 	arch_ops->dp_free_ppeds_interrupts = dp_free_ppeds_interrupts;
3380 	arch_ops->dp_tx_ppeds_inuse_desc = dp_ppeds_inuse_desc;
3381 	arch_ops->dp_ppeds_clear_stats = dp_ppeds_clear_stats;
3382 	arch_ops->dp_txrx_ppeds_rings_stats = dp_ppeds_rings_stats;
3383 	arch_ops->dp_txrx_ppeds_clear_rings_stats = dp_ppeds_clear_rings_stats;
3384 	arch_ops->dp_tx_ppeds_cfg_astidx_cache_mapping =
3385 				dp_tx_ppeds_cfg_astidx_cache_mapping;
3386 #ifdef DP_UMAC_HW_RESET_SUPPORT
3387 	arch_ops->txrx_soc_ppeds_interrupt_stop = dp_ppeds_interrupt_stop_be;
3388 	arch_ops->txrx_soc_ppeds_interrupt_start = dp_ppeds_interrupt_start_be;
3389 	arch_ops->txrx_soc_ppeds_service_status_update =
3390 					dp_ppeds_service_status_update_be;
3391 	arch_ops->txrx_soc_ppeds_enabled_check = dp_ppeds_is_enabled_on_soc;
3392 	arch_ops->txrx_soc_ppeds_txdesc_pool_reset =
3393 					dp_ppeds_tx_desc_pool_reset;
3394 #endif
3395 #endif
3396 	dp_init_near_full_arch_ops_be(arch_ops);
3397 	arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_be;
3398 	arch_ops->get_rx_hash_key = dp_get_rx_hash_key_be;
3399 	arch_ops->dp_set_rx_fst = dp_set_rx_fst_be;
3400 	arch_ops->dp_get_rx_fst = dp_get_rx_fst_be;
3401 	arch_ops->dp_rx_fst_deref = dp_rx_fst_release_ref_be;
3402 	arch_ops->dp_rx_fst_ref = dp_rx_fst_get_ref_be;
3403 	arch_ops->print_mlo_ast_stats = dp_print_mlo_ast_stats_be;
3404 	arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_be;
3405 	arch_ops->reo_remap_config = dp_reo_remap_config_be;
3406 	arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_be;
3407 	arch_ops->txrx_srng_init = dp_srng_init_be;
3408 	arch_ops->dp_get_vdev_stats_for_unmap_peer =
3409 					dp_get_vdev_stats_for_unmap_peer_be;
3410 #ifdef WLAN_MLO_MULTI_CHIP
3411 	arch_ops->dp_get_interface_stats = dp_get_interface_stats_be;
3412 #endif
3413 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
3414 	arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
3415 #endif
3416 	arch_ops->dp_flush_tx_ring = dp_flush_tcl_ring;
3417 	dp_initialize_arch_ops_be_ipa(arch_ops);
3418 	dp_initialize_arch_ops_be_single_dev(arch_ops);
3419 	dp_initialize_arch_ops_be_fisa(arch_ops);
3420 }
3421 
3422 #ifdef QCA_SUPPORT_PRIMARY_LINK_MIGRATE
3423 static void
3424 dp_primary_link_migration(struct dp_soc *soc, void *cb_ctxt,
3425 			  union hal_reo_status *reo_status)
3426 {
3427 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
3428 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
3429 	struct dp_soc *pr_soc = NULL;
3430 	struct dp_peer_info *pr_peer_info = (struct dp_peer_info *)cb_ctxt;
3431 	struct dp_peer *new_primary_peer = NULL;
3432 	struct dp_peer *mld_peer = NULL;
3433 	uint8_t primary_vdev_id;
3434 	struct cdp_txrx_peer_params_update params = {0};
3435 	uint8_t tid;
3436 	uint8_t is_wds = 0;
3437 	uint16_t hw_peer_id;
3438 	uint16_t ast_hash;
3439 
3440 	pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, pr_peer_info->chip_id);
3441 	if (!pr_soc) {
3442 		dp_htt_err("Invalid soc");
3443 		qdf_mem_free(pr_peer_info);
3444 		return;
3445 	}
3446 
3447 	new_primary_peer = pr_soc->peer_id_to_obj_map[
3448 				pr_peer_info->primary_peer_id];
3449 	if (!new_primary_peer) {
3450 		dp_htt_err("New primary peer is NULL");
3451 		qdf_mem_free(pr_peer_info);
3452 		return;
3453 	}
3454 
3455 	mld_peer = DP_GET_MLD_PEER_FROM_PEER(new_primary_peer);
3456 	if (!mld_peer) {
3457 		dp_htt_err("MLD peer is NULL");
3458 		qdf_mem_free(pr_peer_info);
3459 		return;
3460 	}
3461 
3462 	new_primary_peer->primary_link = 1;
3463 
3464 	hw_peer_id = pr_peer_info->hw_peer_id;
3465 	ast_hash = pr_peer_info->ast_hash;
3466 	/* Add ast enteries for new primary peer */
3467 	if (pr_soc->ast_offload_support && pr_soc->host_ast_db_enable) {
3468 		dp_peer_host_add_map_ast(pr_soc, mld_peer->peer_id, mld_peer->mac_addr.raw,
3469 					 hw_peer_id, new_primary_peer->vdev->vdev_id,
3470 					 ast_hash, is_wds);
3471 	}
3472 
3473 	/*
3474 	 * Check if reo_qref_table_en is set and if
3475 	 * rx_tid qdesc for tid 0 is already setup and perform
3476 	 * qref write to LUT for Tid 0 and 16.
3477 	 *
3478 	 */
3479 	if (hal_reo_shared_qaddr_is_enable(pr_soc->hal_soc) &&
3480 	    mld_peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
3481 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
3482 			hal_reo_shared_qaddr_write(pr_soc->hal_soc,
3483 						   mld_peer->peer_id,
3484 						   tid,
3485 						   mld_peer->rx_tid[tid].hw_qdesc_paddr);
3486 	}
3487 
3488 	if (pr_soc && pr_soc->cdp_soc.ol_ops->update_primary_link)
3489 		pr_soc->cdp_soc.ol_ops->update_primary_link(pr_soc->ctrl_psoc,
3490 						new_primary_peer->mac_addr.raw);
3491 
3492 	primary_vdev_id = new_primary_peer->vdev->vdev_id;
3493 
3494 	dp_vdev_unref_delete(soc, mld_peer->vdev, DP_MOD_ID_CHILD);
3495 	mld_peer->vdev = dp_vdev_get_ref_by_id(pr_soc, primary_vdev_id,
3496 			 DP_MOD_ID_CHILD);
3497 	mld_peer->txrx_peer->vdev = mld_peer->vdev;
3498 
3499 	params.vdev_id = new_primary_peer->vdev->vdev_id;
3500 	params.peer_mac = mld_peer->mac_addr.raw;
3501 	params.chip_id = pr_peer_info->chip_id;
3502 	params.pdev_id = new_primary_peer->vdev->pdev->pdev_id;
3503 
3504 	if (new_primary_peer->vdev->opmode == wlan_op_mode_sta) {
3505 		dp_wdi_event_handler(
3506 				WDI_EVENT_STA_PRIMARY_UMAC_UPDATE,
3507 				pr_soc, (void *)&params,
3508 				new_primary_peer->peer_id,
3509 				WDI_NO_VAL, params.pdev_id);
3510 	} else {
3511 		dp_wdi_event_handler(
3512 				WDI_EVENT_PEER_PRIMARY_UMAC_UPDATE,
3513 				pr_soc, (void *)&params,
3514 				new_primary_peer->peer_id,
3515 				WDI_NO_VAL, params.pdev_id);
3516 	}
3517 	qdf_mem_free(pr_peer_info);
3518 }
3519 
3520 #ifdef WLAN_SUPPORT_PPEDS
3521 static QDF_STATUS dp_get_ppe_info_for_vap(struct dp_soc *pr_soc,
3522 					  struct dp_peer *pr_peer,
3523 					  uint16_t *src_info)
3524 {
3525 	struct dp_soc_be *be_soc_mld = NULL;
3526 	struct cdp_ds_vp_params vp_params = {0};
3527 	struct dp_ppe_vp_profile *ppe_vp_profile;
3528 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
3529 	struct cdp_soc_t *cdp_soc = &pr_soc->cdp_soc;
3530 
3531 	/*
3532 	 * Extract the VP profile from the VAP
3533 	 */
3534 	if (!cdp_soc->ol_ops->get_ppeds_profile_info_for_vap) {
3535 		dp_err("%pK: Register get ppeds profile info first", cdp_soc);
3536 		return QDF_STATUS_E_NULL_VALUE;
3537 	}
3538 
3539 	/*
3540 	 * Check if PPE DS routing is enabled on the associated vap.
3541 	 */
3542 	qdf_status = cdp_soc->ol_ops->get_ppeds_profile_info_for_vap(
3543 							pr_soc->ctrl_psoc,
3544 							pr_peer->vdev->vdev_id,
3545 							&vp_params);
3546 
3547 	if (QDF_IS_STATUS_ERROR(qdf_status)) {
3548 		dp_err("Could not find ppeds profile info");
3549 		return QDF_STATUS_E_NULL_VALUE;
3550 	}
3551 
3552 	/* Check if PPE DS routing is enabled on
3553 	 * the associated vap.
3554 	 */
3555 	if (vp_params.ppe_vp_type != PPE_VP_USER_TYPE_DS)
3556 		return qdf_status;
3557 
3558 	be_soc_mld = dp_get_be_soc_from_dp_soc(pr_soc);
3559 	ppe_vp_profile = &be_soc_mld->ppe_vp_profile[
3560 				vp_params.ppe_vp_profile_idx];
3561 	*src_info = ppe_vp_profile->vp_num;
3562 
3563 	return qdf_status;
3564 }
3565 #else
3566 static QDF_STATUS dp_get_ppe_info_for_vap(struct dp_soc *pr_soc,
3567 					  struct dp_peer *pr_peer,
3568 					  uint16_t *src_info)
3569 {
3570 	return QDF_STATUS_E_NOSUPPORT;
3571 }
3572 #endif
3573 
3574 QDF_STATUS dp_htt_reo_migration(struct dp_soc *soc, uint16_t peer_id,
3575 				uint16_t ml_peer_id, uint16_t vdev_id,
3576 				uint8_t pdev_id, uint8_t chip_id)
3577 {
3578 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
3579 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
3580 	uint16_t mld_peer_id = dp_gen_ml_peer_id(soc, ml_peer_id);
3581 	struct dp_soc *pr_soc = NULL;
3582 	struct dp_soc *current_pr_soc = NULL;
3583 	struct hal_reo_cmd_params params;
3584 	struct dp_rx_tid *rx_tid;
3585 	struct dp_peer *pr_peer = NULL;
3586 	struct dp_peer *mld_peer = NULL;
3587 	struct dp_soc *mld_soc = NULL;
3588 	struct dp_peer *current_pr_peer = NULL;
3589 	struct dp_peer_info *peer_info;
3590 	struct dp_vdev_be *be_vdev;
3591 	uint16_t src_info = 0;
3592 	QDF_STATUS status;
3593 	struct dp_ast_entry *ast_entry;
3594 	uint16_t hw_peer_id;
3595 	uint16_t ast_hash;
3596 
3597 	if (!dp_mlo) {
3598 		dp_htt_err("Invalid dp_mlo ctxt");
3599 		return QDF_STATUS_E_FAILURE;
3600 	}
3601 
3602 	pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, chip_id);
3603 	if (!pr_soc) {
3604 		dp_htt_err("Invalid soc");
3605 		return QDF_STATUS_E_FAILURE;
3606 	}
3607 
3608 	pr_peer = pr_soc->peer_id_to_obj_map[peer_id];
3609 	if (!pr_peer || !(IS_MLO_DP_LINK_PEER(pr_peer))) {
3610 		dp_htt_err("Invalid peer");
3611 		return QDF_STATUS_E_FAILURE;
3612 	}
3613 
3614 	mld_peer = DP_GET_MLD_PEER_FROM_PEER(pr_peer);
3615 
3616 	if (!mld_peer || (mld_peer->peer_id != mld_peer_id)) {
3617 		dp_htt_err("Invalid mld peer");
3618 		return QDF_STATUS_E_FAILURE;
3619 	}
3620 
3621 	be_vdev = dp_get_be_vdev_from_dp_vdev(pr_peer->vdev);
3622 	if (!be_vdev) {
3623 		dp_htt_err("Invalid be vdev");
3624 		return QDF_STATUS_E_FAILURE;
3625 	}
3626 
3627 	mld_soc = mld_peer->vdev->pdev->soc;
3628 	status = dp_get_ppe_info_for_vap(pr_soc, pr_peer, &src_info);
3629 	if (status == QDF_STATUS_E_NULL_VALUE) {
3630 		dp_htt_err("Invalid ppe info for the vdev");
3631 		return QDF_STATUS_E_FAILURE;
3632 	}
3633 
3634 	current_pr_peer = dp_get_primary_link_peer_by_id(
3635 						pr_soc,
3636 						mld_peer->peer_id,
3637 						DP_MOD_ID_HTT);
3638 	/* Making existing primary peer as non primary */
3639 	if (current_pr_peer) {
3640 		current_pr_peer->primary_link = 0;
3641 		dp_peer_unref_delete(current_pr_peer, DP_MOD_ID_HTT);
3642 	}
3643 
3644 	current_pr_soc = mld_peer->vdev->pdev->soc;
3645 	dp_peer_rx_reo_shared_qaddr_delete(current_pr_soc, mld_peer);
3646 
3647 	/* delete ast entry for current primary peer */
3648 	qdf_spin_lock_bh(&current_pr_soc->ast_lock);
3649 	ast_entry = dp_peer_ast_hash_find_soc(current_pr_soc, mld_peer->mac_addr.raw);
3650 	if (!ast_entry) {
3651 		dp_htt_err("Invalid ast entry");
3652 		qdf_spin_unlock_bh(&current_pr_soc->ast_lock);
3653 		return QDF_STATUS_E_FAILURE;
3654 	}
3655 
3656 	hw_peer_id = ast_entry->ast_idx;
3657 	ast_hash = ast_entry->ast_hash_value;
3658 	dp_peer_unlink_ast_entry(current_pr_soc, ast_entry, mld_peer);
3659 
3660 	if (ast_entry->is_mapped)
3661 		current_pr_soc->ast_table[ast_entry->ast_idx] = NULL;
3662 
3663 	dp_peer_free_ast_entry(current_pr_soc, ast_entry);
3664 
3665 	mld_peer->self_ast_entry = NULL;
3666 	qdf_spin_unlock_bh(&current_pr_soc->ast_lock);
3667 
3668 	peer_info = qdf_mem_malloc(sizeof(struct dp_peer_info));
3669 	if (!peer_info) {
3670 		dp_htt_err("Malloc failed");
3671 		return QDF_STATUS_E_FAILURE;
3672 	}
3673 
3674 	peer_info->primary_peer_id = peer_id;
3675 	peer_info->chip_id = chip_id;
3676 	peer_info->hw_peer_id = hw_peer_id;
3677 	peer_info->ast_hash = ast_hash;
3678 
3679 	qdf_mem_zero(&params, sizeof(params));
3680 
3681 	rx_tid = &mld_peer->rx_tid[0];
3682 	params.std.need_status = 1;
3683 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3684 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3685 	params.u.fl_cache_params.flush_no_inval = 0;
3686 	params.u.fl_cache_params.flush_entire_cache = 1;
3687 	status = dp_reo_send_cmd(current_pr_soc, CMD_FLUSH_CACHE, &params,
3688 				 dp_primary_link_migration,
3689 				 (void *)peer_info);
3690 
3691 	if (status != QDF_STATUS_SUCCESS) {
3692 		dp_htt_err("Reo flush failed");
3693 		qdf_mem_free(peer_info);
3694 		dp_h2t_ptqm_migration_msg_send(pr_soc, vdev_id, pdev_id,
3695 					       chip_id, peer_id, ml_peer_id,
3696 					       src_info, QDF_STATUS_E_FAILURE);
3697 	}
3698 
3699 	qdf_mem_zero(&params, sizeof(params));
3700 	params.std.need_status = 0;
3701 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
3702 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3703 	params.u.unblk_cache_params.type = UNBLOCK_CACHE;
3704 	dp_reo_send_cmd(current_pr_soc, CMD_UNBLOCK_CACHE, &params, NULL, NULL);
3705 
3706 	dp_h2t_ptqm_migration_msg_send(pr_soc, vdev_id, pdev_id,
3707 				       chip_id, peer_id, ml_peer_id,
3708 				       src_info, QDF_STATUS_SUCCESS);
3709 	return QDF_STATUS_SUCCESS;
3710 }
3711 #endif
3712