xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_utility.h>
21 #include <dp_internal.h>
22 #include <dp_htt.h>
23 #include "dp_be.h"
24 #include "dp_be_tx.h"
25 #include "dp_be_rx.h"
26 #ifdef WIFI_MONITOR_SUPPORT
27 #if !defined(DISABLE_MON_CONFIG) && defined(QCA_MONITOR_2_0_SUPPORT)
28 #include "dp_mon_2.0.h"
29 #endif
30 #include "dp_mon.h"
31 #endif
32 #include <hal_be_api.h>
33 #ifdef WLAN_SUPPORT_PPEDS
34 #include "be/dp_ppeds.h"
35 #endif
36 
37 /* Generic AST entry aging timer value */
38 #define DP_AST_AGING_TIMER_DEFAULT_MS	5000
39 
40 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
41 #define DP_TX_VDEV_ID_CHECK_ENABLE 0
42 
43 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
44 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
45 	{1, 4, HAL_BE_WBM_SW4_BM_ID, 0},
46 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
47 #ifdef QCA_WIFI_KIWI_V2
48 	{3, 5, HAL_BE_WBM_SW5_BM_ID, 0},
49 	{4, 6, HAL_BE_WBM_SW6_BM_ID, 0}
50 #else
51 	{3, 6, HAL_BE_WBM_SW5_BM_ID, 0},
52 	{4, 7, HAL_BE_WBM_SW6_BM_ID, 0}
53 #endif
54 };
55 #else
56 #define DP_TX_VDEV_ID_CHECK_ENABLE 1
57 
58 static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
59 	{.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_BE_WBM_SW0_BM_ID, .for_ipa = 0},
60 	{1, 1, HAL_BE_WBM_SW1_BM_ID, 0},
61 	{2, 2, HAL_BE_WBM_SW2_BM_ID, 0},
62 	{3, 3, HAL_BE_WBM_SW3_BM_ID, 0},
63 	{4, 4, HAL_BE_WBM_SW4_BM_ID, 0}
64 };
65 #endif
66 
67 #ifdef WLAN_SUPPORT_PPEDS
68 static struct cdp_ppe_txrx_ops dp_ops_ppe_be = {
69 	.ppeds_entry_attach = dp_ppeds_attach_vdev_be,
70 	.ppeds_entry_detach = dp_ppeds_detach_vdev_be,
71 	.ppeds_set_int_pri2tid = dp_ppeds_set_int_pri2tid_be,
72 	.ppeds_update_int_pri2tid = dp_ppeds_update_int_pri2tid_be,
73 	.ppeds_entry_dump = dp_ppeds_dump_ppe_vp_tbl_be,
74 	.ppeds_enable_pri2tid = dp_ppeds_vdev_enable_pri2tid_be,
75 };
76 
77 static void dp_ppeds_rings_status(struct dp_soc *soc)
78 {
79 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
80 
81 	dp_print_ring_stat_from_hal(soc, &be_soc->reo2ppe_ring, REO2PPE);
82 	dp_print_ring_stat_from_hal(soc, &be_soc->ppe2tcl_ring, PPE2TCL);
83 }
84 #endif
85 
86 static void dp_soc_cfg_attach_be(struct dp_soc *soc)
87 {
88 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
89 
90 	wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
91 
92 	soc->wlan_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
93 
94 	/* this is used only when dmac mode is enabled */
95 	soc->num_rx_refill_buf_rings = 1;
96 
97 	soc->wlan_cfg_ctx->notify_frame_support =
98 				DP_MARK_NOTIFY_FRAME_SUPPORT;
99 }
100 
101 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type)
102 {
103 	switch (context_type) {
104 	case DP_CONTEXT_TYPE_SOC:
105 		return sizeof(struct dp_soc_be);
106 	case DP_CONTEXT_TYPE_PDEV:
107 		return sizeof(struct dp_pdev_be);
108 	case DP_CONTEXT_TYPE_VDEV:
109 		return sizeof(struct dp_vdev_be);
110 	case DP_CONTEXT_TYPE_PEER:
111 		return sizeof(struct dp_peer_be);
112 	default:
113 		return 0;
114 	}
115 }
116 
117 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
118 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
119 /**
120  * dp_cc_wbm_sw_en_cfg() - configure HW cookie conversion enablement
121 			   per wbm2sw ring
122  * @cc_cfg: HAL HW cookie conversion configuration structure pointer
123  *
124  * Return: None
125  */
126 static inline
127 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
128 {
129 	cc_cfg->wbm2sw6_cc_en = 1;
130 	cc_cfg->wbm2sw5_cc_en = 1;
131 	cc_cfg->wbm2sw4_cc_en = 1;
132 	cc_cfg->wbm2sw3_cc_en = 1;
133 	cc_cfg->wbm2sw2_cc_en = 1;
134 	/* disable wbm2sw1 hw cc as it's for FW */
135 	cc_cfg->wbm2sw1_cc_en = 0;
136 	cc_cfg->wbm2sw0_cc_en = 1;
137 	cc_cfg->wbm2fw_cc_en = 0;
138 }
139 #else
140 static inline
141 void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
142 {
143 	cc_cfg->wbm2sw6_cc_en = 1;
144 	cc_cfg->wbm2sw5_cc_en = 1;
145 	cc_cfg->wbm2sw4_cc_en = 1;
146 	cc_cfg->wbm2sw3_cc_en = 1;
147 	cc_cfg->wbm2sw2_cc_en = 1;
148 	cc_cfg->wbm2sw1_cc_en = 1;
149 	cc_cfg->wbm2sw0_cc_en = 1;
150 	cc_cfg->wbm2fw_cc_en = 0;
151 }
152 #endif
153 
154 #if defined(WLAN_SUPPORT_RX_FISA)
155 static QDF_STATUS dp_fisa_fst_cmem_addr_init(struct dp_soc *soc)
156 {
157 	dp_info("cmem base 0x%llx, total size 0x%llx avail_size 0x%llx",
158 		soc->cmem_base, soc->cmem_total_size, soc->cmem_avail_size);
159 	/* get CMEM for cookie conversion */
160 	if (soc->cmem_avail_size < DP_CMEM_FST_SIZE) {
161 		dp_err("cmem_size 0x%llx bytes < 16K", soc->cmem_avail_size);
162 		return QDF_STATUS_E_NOMEM;
163 	}
164 
165 	soc->fst_cmem_size = DP_CMEM_FST_SIZE;
166 
167 	soc->fst_cmem_base = soc->cmem_base +
168 			     (soc->cmem_total_size - soc->cmem_avail_size);
169 	soc->cmem_avail_size -= soc->fst_cmem_size;
170 
171 	dp_info("fst_cmem_base 0x%llx, fst_cmem_size 0x%llx",
172 		soc->fst_cmem_base, soc->fst_cmem_size);
173 
174 	return QDF_STATUS_SUCCESS;
175 }
176 #else /* !WLAN_SUPPORT_RX_FISA */
177 static QDF_STATUS dp_fisa_fst_cmem_addr_init(struct dp_soc *soc)
178 {
179 	return QDF_STATUS_SUCCESS;
180 }
181 #endif
182 
183 /**
184  * dp_cc_reg_cfg_init() - initialize and configure HW cookie
185 			  conversion register
186  * @soc: SOC handle
187  * @is_4k_align: page address 4k aligned
188  *
189  * Return: None
190  */
191 static void dp_cc_reg_cfg_init(struct dp_soc *soc,
192 			       bool is_4k_align)
193 {
194 	struct hal_hw_cc_config cc_cfg = { 0 };
195 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
196 
197 	if (soc->cdp_soc.ol_ops->get_con_mode &&
198 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
199 		return;
200 
201 	if (!soc->wlan_cfg_ctx->hw_cc_enabled) {
202 		dp_info("INI skip HW CC register setting");
203 		return;
204 	}
205 
206 	cc_cfg.lut_base_addr_31_0 = be_soc->cc_cmem_base;
207 	cc_cfg.cc_global_en = true;
208 	cc_cfg.page_4k_align = is_4k_align;
209 	cc_cfg.cookie_offset_msb = DP_CC_DESC_ID_SPT_VA_OS_MSB;
210 	cc_cfg.cookie_page_msb = DP_CC_DESC_ID_PPT_PAGE_OS_MSB;
211 	/* 36th bit should be 1 then HW know this is CMEM address */
212 	cc_cfg.lut_base_addr_39_32 = 0x10;
213 
214 	cc_cfg.error_path_cookie_conv_en = true;
215 	cc_cfg.release_path_cookie_conv_en = true;
216 	dp_cc_wbm_sw_en_cfg(&cc_cfg);
217 
218 	hal_cookie_conversion_reg_cfg_be(soc->hal_soc, &cc_cfg);
219 }
220 
221 /**
222  * dp_hw_cc_cmem_write() - DP wrapper function for CMEM buffer writing
223  * @hal_soc_hdl: HAL SOC handle
224  * @offset: CMEM address
225  * @value: value to write
226  *
227  * Return: None.
228  */
229 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
230 				       uint32_t offset,
231 				       uint32_t value)
232 {
233 	hal_cmem_write(hal_soc_hdl, offset, value);
234 }
235 
236 /**
237  * dp_hw_cc_cmem_addr_init() - Check and initialize CMEM base address for
238 			       HW cookie conversion
239  * @soc: SOC handle
240  * @cc_ctx: cookie conversion context pointer
241  *
242  * Return: 0 in case of success, else error value
243  */
244 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(struct dp_soc *soc)
245 {
246 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
247 
248 	dp_info("cmem base 0x%llx, total size 0x%llx avail_size 0x%llx",
249 		soc->cmem_base, soc->cmem_total_size, soc->cmem_avail_size);
250 	/* get CMEM for cookie conversion */
251 	if (soc->cmem_avail_size < DP_CC_PPT_MEM_SIZE) {
252 		dp_err("cmem_size 0x%llx bytes < 4K", soc->cmem_avail_size);
253 		return QDF_STATUS_E_RESOURCES;
254 	}
255 	be_soc->cc_cmem_base = (uint32_t)(soc->cmem_base +
256 					  DP_CC_MEM_OFFSET_IN_CMEM);
257 
258 	soc->cmem_avail_size -= DP_CC_PPT_MEM_SIZE;
259 
260 	dp_info("cc_cmem_base 0x%x, cmem_avail_size 0x%llx",
261 		be_soc->cc_cmem_base, soc->cmem_avail_size);
262 	return QDF_STATUS_SUCCESS;
263 }
264 
265 static QDF_STATUS dp_get_cmem_allocation(struct dp_soc *soc,
266 					 uint8_t for_feature)
267 {
268 	QDF_STATUS status = QDF_STATUS_E_NOMEM;
269 
270 	switch (for_feature) {
271 	case COOKIE_CONVERSION:
272 		status = dp_hw_cc_cmem_addr_init(soc);
273 		break;
274 	case FISA_FST:
275 		status = dp_fisa_fst_cmem_addr_init(soc);
276 		break;
277 	default:
278 		dp_err("Invalid CMEM request");
279 	}
280 
281 	return status;
282 }
283 
284 #else
285 
286 static inline void dp_cc_reg_cfg_init(struct dp_soc *soc,
287 				      bool is_4k_align) {}
288 
289 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
290 				       uint32_t offset,
291 				       uint32_t value)
292 { }
293 
294 static inline QDF_STATUS dp_hw_cc_cmem_addr_init(struct dp_soc *soc)
295 {
296 	return QDF_STATUS_SUCCESS;
297 }
298 
299 static QDF_STATUS dp_get_cmem_allocation(struct dp_soc *soc,
300 					 uint8_t for_feature)
301 {
302 	return QDF_STATUS_SUCCESS;
303 }
304 
305 #endif
306 
307 QDF_STATUS
308 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
309 			       struct dp_hw_cookie_conversion_t *cc_ctx,
310 			       uint32_t num_descs,
311 			       enum dp_desc_type desc_type,
312 			       uint8_t desc_pool_id)
313 {
314 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
315 	uint32_t num_spt_pages, i = 0;
316 	struct dp_spt_page_desc *spt_desc;
317 	struct qdf_mem_dma_page_t *dma_page;
318 	uint8_t chip_id;
319 
320 	/* estimate how many SPT DDR pages needed */
321 	num_spt_pages = num_descs / DP_CC_SPT_PAGE_MAX_ENTRIES;
322 	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
323 					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
324 	dp_info("num_spt_pages needed %d", num_spt_pages);
325 
326 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_CC_SPT_PAGE_TYPE,
327 				      &cc_ctx->page_pool, qdf_page_size,
328 				      num_spt_pages, 0, false);
329 	if (!cc_ctx->page_pool.dma_pages) {
330 		dp_err("spt ddr pages allocation failed");
331 		return QDF_STATUS_E_RESOURCES;
332 	}
333 	cc_ctx->page_desc_base = qdf_mem_malloc(
334 			num_spt_pages * sizeof(struct dp_spt_page_desc));
335 	if (!cc_ctx->page_desc_base) {
336 		dp_err("spt page descs allocation failed");
337 		goto fail_0;
338 	}
339 
340 	chip_id = dp_mlo_get_chip_id(soc);
341 	cc_ctx->cmem_offset = dp_desc_pool_get_cmem_base(chip_id, desc_pool_id,
342 							 desc_type);
343 
344 	/* initial page desc */
345 	spt_desc = cc_ctx->page_desc_base;
346 	dma_page = cc_ctx->page_pool.dma_pages;
347 	while (i < num_spt_pages) {
348 		/* check if page address 4K aligned */
349 		if (qdf_unlikely(dma_page[i].page_p_addr & 0xFFF)) {
350 			dp_err("non-4k aligned pages addr %pK",
351 			       (void *)dma_page[i].page_p_addr);
352 			goto fail_1;
353 		}
354 
355 		spt_desc[i].page_v_addr =
356 					dma_page[i].page_v_addr_start;
357 		spt_desc[i].page_p_addr =
358 					dma_page[i].page_p_addr;
359 		i++;
360 	}
361 
362 	cc_ctx->total_page_num = num_spt_pages;
363 	qdf_spinlock_create(&cc_ctx->cc_lock);
364 
365 	return QDF_STATUS_SUCCESS;
366 fail_1:
367 	qdf_mem_free(cc_ctx->page_desc_base);
368 fail_0:
369 	dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
370 				     &cc_ctx->page_pool, 0, false);
371 
372 	return QDF_STATUS_E_FAILURE;
373 }
374 
375 QDF_STATUS
376 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
377 			       struct dp_hw_cookie_conversion_t *cc_ctx)
378 {
379 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
380 
381 	qdf_mem_free(cc_ctx->page_desc_base);
382 	dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
383 				     &cc_ctx->page_pool, 0, false);
384 	qdf_spinlock_destroy(&cc_ctx->cc_lock);
385 
386 	return QDF_STATUS_SUCCESS;
387 }
388 
389 QDF_STATUS
390 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
391 			     struct dp_hw_cookie_conversion_t *cc_ctx)
392 {
393 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
394 	uint32_t i = 0;
395 	struct dp_spt_page_desc *spt_desc;
396 	uint32_t ppt_index;
397 	uint32_t ppt_id_start;
398 
399 	if (!cc_ctx->total_page_num) {
400 		dp_err("total page num is 0");
401 		return QDF_STATUS_E_INVAL;
402 	}
403 
404 	ppt_id_start = DP_CMEM_OFFSET_TO_PPT_ID(cc_ctx->cmem_offset);
405 	spt_desc = cc_ctx->page_desc_base;
406 	while (i < cc_ctx->total_page_num) {
407 		/* write page PA to CMEM */
408 		dp_hw_cc_cmem_write(soc->hal_soc,
409 				    (cc_ctx->cmem_offset + be_soc->cc_cmem_base
410 				     + (i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)),
411 				    (spt_desc[i].page_p_addr >>
412 				     DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED));
413 
414 		ppt_index = ppt_id_start + i;
415 
416 		if (ppt_index >= DP_CC_PPT_MAX_ENTRIES)
417 			qdf_assert_always(0);
418 
419 		spt_desc[i].ppt_index = ppt_index;
420 
421 		be_soc->page_desc_base[ppt_index].page_v_addr =
422 				spt_desc[i].page_v_addr;
423 		i++;
424 	}
425 	return QDF_STATUS_SUCCESS;
426 }
427 
428 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
429 QDF_STATUS
430 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
431 			       struct dp_hw_cookie_conversion_t *cc_ctx)
432 {
433 	uint32_t ppt_index;
434 	struct dp_spt_page_desc *spt_desc;
435 	int i = 0;
436 
437 	spt_desc = cc_ctx->page_desc_base;
438 	while (i < cc_ctx->total_page_num) {
439 		ppt_index = spt_desc[i].ppt_index;
440 		be_soc->page_desc_base[ppt_index].page_v_addr = NULL;
441 		i++;
442 	}
443 	return QDF_STATUS_SUCCESS;
444 }
445 #else
446 QDF_STATUS
447 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
448 			       struct dp_hw_cookie_conversion_t *cc_ctx)
449 {
450 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
451 	uint32_t ppt_index;
452 	struct dp_spt_page_desc *spt_desc;
453 	int i = 0;
454 
455 	spt_desc = cc_ctx->page_desc_base;
456 	while (i < cc_ctx->total_page_num) {
457 		/* reset PA in CMEM to NULL */
458 		dp_hw_cc_cmem_write(soc->hal_soc,
459 				    (cc_ctx->cmem_offset + be_soc->cc_cmem_base
460 				     + (i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)),
461 				    0);
462 
463 		ppt_index = spt_desc[i].ppt_index;
464 		be_soc->page_desc_base[ppt_index].page_v_addr = NULL;
465 		i++;
466 	}
467 	return QDF_STATUS_SUCCESS;
468 }
469 #endif
470 
471 #ifdef WLAN_SUPPORT_PPEDS
472 static QDF_STATUS dp_soc_ppe_attach_be(struct dp_soc *soc)
473 {
474 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
475 	struct cdp_ops *cdp_ops = soc->cdp_soc.ops;
476 
477 	/*
478 	 * Check if PPE DS is enabled.
479 	 */
480 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc->wlan_cfg_ctx))
481 		return QDF_STATUS_SUCCESS;
482 
483 	if (dp_ppeds_attach_soc_be(be_soc) != QDF_STATUS_SUCCESS)
484 		return QDF_STATUS_SUCCESS;
485 
486 	cdp_ops->ppe_ops = &dp_ops_ppe_be;
487 
488 	return QDF_STATUS_SUCCESS;
489 }
490 
491 static QDF_STATUS dp_soc_ppe_detach_be(struct dp_soc *soc)
492 {
493 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
494 	struct cdp_ops *cdp_ops = soc->cdp_soc.ops;
495 
496 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc->wlan_cfg_ctx))
497 		return QDF_STATUS_E_FAILURE;
498 
499 	dp_ppeds_detach_soc_be(be_soc);
500 
501 	cdp_ops->ppe_ops = NULL;
502 
503 	return QDF_STATUS_SUCCESS;
504 }
505 #else
506 static QDF_STATUS dp_ppeds_init_soc_be(struct dp_soc *soc)
507 {
508 	return QDF_STATUS_SUCCESS;
509 }
510 
511 static QDF_STATUS dp_ppeds_deinit_soc_be(struct dp_soc *soc)
512 {
513 	return QDF_STATUS_SUCCESS;
514 }
515 
516 static inline QDF_STATUS dp_soc_ppe_attach_be(struct dp_soc *soc)
517 {
518 	return QDF_STATUS_SUCCESS;
519 }
520 
521 static inline QDF_STATUS dp_soc_ppe_detach_be(struct dp_soc *soc)
522 {
523 	return QDF_STATUS_SUCCESS;
524 }
525 #endif /* WLAN_SUPPORT_PPEDS */
526 
527 static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc)
528 {
529 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
530 	int i = 0;
531 
532 	dp_soc_ppe_detach_be(soc);
533 
534 	for (i = 0; i < MAX_TXDESC_POOLS; i++)
535 		dp_hw_cookie_conversion_detach(be_soc,
536 					       &be_soc->tx_cc_ctx[i]);
537 
538 	for (i = 0; i < MAX_RXDESC_POOLS; i++)
539 		dp_hw_cookie_conversion_detach(be_soc,
540 					       &be_soc->rx_cc_ctx[i]);
541 
542 	qdf_mem_free(be_soc->page_desc_base);
543 	be_soc->page_desc_base = NULL;
544 
545 	return QDF_STATUS_SUCCESS;
546 }
547 
548 #ifdef WLAN_MLO_MULTI_CHIP
549 #ifdef WLAN_MCAST_MLO
550 static inline void
551 dp_mlo_mcast_init(struct dp_soc *soc, struct dp_vdev *vdev)
552 {
553 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
554 
555 	be_vdev->mcast_primary = false;
556 	be_vdev->seq_num = 0;
557 	dp_tx_mcast_mlo_reinject_routing_set(soc,
558 					     (void *)&be_vdev->mcast_primary);
559 	if (vdev->opmode == wlan_op_mode_ap) {
560 		if (vdev->mlo_vdev)
561 			hal_tx_vdev_mcast_ctrl_set(vdev->pdev->soc->hal_soc,
562 						   vdev->vdev_id,
563 						   HAL_TX_MCAST_CTRL_DROP);
564 		else
565 			hal_tx_vdev_mcast_ctrl_set(vdev->pdev->soc->hal_soc,
566 						   vdev->vdev_id,
567 						   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
568 	}
569 }
570 
571 static inline void
572 dp_mlo_mcast_deinit(struct dp_soc *soc, struct dp_vdev *vdev)
573 {
574 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
575 
576 	be_vdev->seq_num = 0;
577 	be_vdev->mcast_primary = false;
578 	vdev->mlo_vdev = false;
579 }
580 #else
581 static inline void
582 dp_mlo_mcast_init(struct dp_soc *soc, struct dp_vdev *vdev)
583 {
584 }
585 
586 static inline void
587 dp_mlo_mcast_deinit(struct dp_soc *soc, struct dp_vdev *vdev)
588 {
589 }
590 #endif
591 static void dp_mlo_init_ptnr_list(struct dp_vdev *vdev)
592 {
593 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
594 
595 	qdf_mem_set(be_vdev->partner_vdev_list,
596 		    WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC,
597 		    CDP_INVALID_VDEV_ID);
598 }
599 
600 static void dp_get_rx_hash_key_be(struct dp_soc *soc,
601 				  struct cdp_lro_hash_config *lro_hash)
602 {
603 	dp_mlo_get_rx_hash_key(soc, lro_hash);
604 }
605 #else
606 static inline void
607 dp_mlo_mcast_init(struct dp_soc *soc, struct dp_vdev *vdev)
608 {
609 }
610 
611 static inline void
612 dp_mlo_mcast_deinit(struct dp_soc *soc, struct dp_vdev *vdev)
613 {
614 }
615 
616 static void dp_mlo_init_ptnr_list(struct dp_vdev *vdev)
617 {
618 }
619 
620 static void dp_get_rx_hash_key_be(struct dp_soc *soc,
621 				  struct cdp_lro_hash_config *lro_hash)
622 {
623 	dp_get_rx_hash_key_bytes(lro_hash);
624 }
625 #endif
626 
627 static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc,
628 				   struct cdp_soc_attach_params *params)
629 {
630 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
631 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
632 	uint32_t max_tx_rx_desc_num, num_spt_pages;
633 	uint32_t num_entries;
634 	int i = 0;
635 
636 	max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS +
637 		WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS +
638 		WLAN_CFG_NUM_PPEDS_TX_DESC_MAX * MAX_PPE_TXDESC_POOLS;
639 	/* estimate how many SPT DDR pages needed */
640 	num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES;
641 	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
642 					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
643 
644 	be_soc->page_desc_base = qdf_mem_malloc(
645 		DP_CC_PPT_MAX_ENTRIES * sizeof(struct dp_spt_page_desc));
646 	if (!be_soc->page_desc_base) {
647 		dp_err("spt page descs allocation failed");
648 		return QDF_STATUS_E_NOMEM;
649 	}
650 
651 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
652 
653 	qdf_status = dp_get_cmem_allocation(soc, COOKIE_CONVERSION);
654 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
655 		goto fail;
656 
657 	dp_soc_mlo_fill_params(soc, params);
658 
659 	qdf_status = dp_soc_ppe_attach_be(soc);
660 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
661 		goto fail;
662 
663 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
664 		num_entries = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
665 		qdf_status =
666 			dp_hw_cookie_conversion_attach(be_soc,
667 						       &be_soc->tx_cc_ctx[i],
668 						       num_entries,
669 						       DP_TX_DESC_TYPE, i);
670 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
671 			goto fail;
672 	}
673 
674 	qdf_status = dp_get_cmem_allocation(soc, FISA_FST);
675 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
676 		goto fail;
677 
678 	for (i = 0; i < MAX_RXDESC_POOLS; i++) {
679 		num_entries =
680 			wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
681 		qdf_status =
682 			dp_hw_cookie_conversion_attach(be_soc,
683 						       &be_soc->rx_cc_ctx[i],
684 						       num_entries,
685 						       DP_RX_DESC_BUF_TYPE, i);
686 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
687 			goto fail;
688 	}
689 
690 	return qdf_status;
691 fail:
692 	dp_soc_detach_be(soc);
693 	return qdf_status;
694 }
695 
696 static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc)
697 {
698 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
699 	int i = 0;
700 
701 	dp_tx_deinit_bank_profiles(be_soc);
702 	for (i = 0; i < MAX_TXDESC_POOLS; i++)
703 		dp_hw_cookie_conversion_deinit(be_soc,
704 					       &be_soc->tx_cc_ctx[i]);
705 
706 	for (i = 0; i < MAX_RXDESC_POOLS; i++)
707 		dp_hw_cookie_conversion_deinit(be_soc,
708 					       &be_soc->rx_cc_ctx[i]);
709 
710 	dp_ppeds_deinit_soc_be(soc);
711 
712 	return QDF_STATUS_SUCCESS;
713 }
714 
715 static QDF_STATUS dp_soc_init_be(struct dp_soc *soc)
716 {
717 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
718 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
719 	int i = 0;
720 
721 	dp_ppeds_init_soc_be(soc);
722 
723 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
724 		qdf_status =
725 			dp_hw_cookie_conversion_init(be_soc,
726 						     &be_soc->tx_cc_ctx[i]);
727 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
728 			goto fail;
729 	}
730 
731 	for (i = 0; i < MAX_RXDESC_POOLS; i++) {
732 		qdf_status =
733 			dp_hw_cookie_conversion_init(be_soc,
734 						     &be_soc->rx_cc_ctx[i]);
735 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
736 			goto fail;
737 	}
738 
739 	/* route vdev_id mismatch notification via FW completion */
740 	hal_tx_vdev_mismatch_routing_set(soc->hal_soc,
741 					 HAL_TX_VDEV_MISMATCH_FW_NOTIFY);
742 
743 	qdf_status = dp_tx_init_bank_profiles(be_soc);
744 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
745 		goto fail;
746 
747 	/* write WBM/REO cookie conversion CFG register */
748 	dp_cc_reg_cfg_init(soc, true);
749 
750 	return qdf_status;
751 fail:
752 	dp_soc_deinit_be(soc);
753 	return qdf_status;
754 }
755 
756 static QDF_STATUS dp_pdev_attach_be(struct dp_pdev *pdev,
757 				    struct cdp_pdev_attach_params *params)
758 {
759 	dp_pdev_mlo_fill_params(pdev, params);
760 	dp_mlo_update_link_to_pdev_map(pdev->soc, pdev);
761 
762 	return QDF_STATUS_SUCCESS;
763 }
764 
765 static QDF_STATUS dp_pdev_detach_be(struct dp_pdev *pdev)
766 {
767 	dp_mlo_update_link_to_pdev_unmap(pdev->soc, pdev);
768 
769 	return QDF_STATUS_SUCCESS;
770 }
771 
772 static QDF_STATUS dp_vdev_attach_be(struct dp_soc *soc, struct dp_vdev *vdev)
773 {
774 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
775 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
776 	struct dp_pdev *pdev = vdev->pdev;
777 
778 	if (vdev->opmode == wlan_op_mode_monitor)
779 		return QDF_STATUS_SUCCESS;
780 
781 	be_vdev->vdev_id_check_en = DP_TX_VDEV_ID_CHECK_ENABLE;
782 
783 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
784 	vdev->bank_id = be_vdev->bank_id;
785 
786 	if (be_vdev->bank_id == DP_BE_INVALID_BANK_ID) {
787 		QDF_BUG(0);
788 		return QDF_STATUS_E_FAULT;
789 	}
790 
791 	if (vdev->opmode == wlan_op_mode_sta) {
792 		if (soc->cdp_soc.ol_ops->set_mec_timer)
793 			soc->cdp_soc.ol_ops->set_mec_timer(
794 					soc->ctrl_psoc,
795 					vdev->vdev_id,
796 					DP_AST_AGING_TIMER_DEFAULT_MS);
797 
798 		if (pdev->isolation)
799 			hal_tx_vdev_mcast_ctrl_set(soc->hal_soc, vdev->vdev_id,
800 						   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
801 		else
802 			hal_tx_vdev_mcast_ctrl_set(soc->hal_soc, vdev->vdev_id,
803 						   HAL_TX_MCAST_CTRL_MEC_NOTIFY);
804 	}
805 
806 	dp_mlo_mcast_init(soc, vdev);
807 	dp_mlo_init_ptnr_list(vdev);
808 
809 	return QDF_STATUS_SUCCESS;
810 }
811 
812 static QDF_STATUS dp_vdev_detach_be(struct dp_soc *soc, struct dp_vdev *vdev)
813 {
814 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
815 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
816 
817 	if (vdev->opmode == wlan_op_mode_monitor)
818 		return QDF_STATUS_SUCCESS;
819 
820 	if (vdev->opmode == wlan_op_mode_ap)
821 		dp_mlo_mcast_deinit(soc, vdev);
822 
823 	dp_tx_put_bank_profile(be_soc, be_vdev);
824 	dp_clr_mlo_ptnr_list(soc, vdev);
825 
826 	return QDF_STATUS_SUCCESS;
827 }
828 
829 qdf_size_t dp_get_soc_context_size_be(void)
830 {
831 	return sizeof(struct dp_soc_be);
832 }
833 
834 #ifdef CONFIG_WORD_BASED_TLV
835 /**
836  * dp_rxdma_ring_wmask_cfg_be() - Setup RXDMA ring word mask config
837  * @soc: Common DP soc handle
838  * @htt_tlv_filter: Rx SRNG TLV and filter setting
839  *
840  * Return: none
841  */
842 static inline void
843 dp_rxdma_ring_wmask_cfg_be(struct dp_soc *soc,
844 			   struct htt_rx_ring_tlv_filter *htt_tlv_filter)
845 {
846 	htt_tlv_filter->rx_msdu_end_wmask =
847 				 hal_rx_msdu_end_wmask_get(soc->hal_soc);
848 	htt_tlv_filter->rx_mpdu_start_wmask =
849 				 hal_rx_mpdu_start_wmask_get(soc->hal_soc);
850 }
851 #else
852 static inline void
853 dp_rxdma_ring_wmask_cfg_be(struct dp_soc *soc,
854 			   struct htt_rx_ring_tlv_filter *htt_tlv_filter)
855 {
856 }
857 #endif
858 
859 #ifdef NO_RX_PKT_HDR_TLV
860 /**
861  * dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config
862  * @soc: Common DP soc handle
863  *
864  * Return: QDF_STATUS
865  */
866 static QDF_STATUS
867 dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc)
868 {
869 	int i;
870 	int mac_id;
871 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
872 	struct dp_srng *rx_mac_srng;
873 	QDF_STATUS status = QDF_STATUS_SUCCESS;
874 
875 	/*
876 	 * In Beryllium chipset msdu_start, mpdu_end
877 	 * and rx_attn are part of msdu_end/mpdu_start
878 	 */
879 	htt_tlv_filter.msdu_start = 0;
880 	htt_tlv_filter.mpdu_end = 0;
881 	htt_tlv_filter.attention = 0;
882 	htt_tlv_filter.mpdu_start = 1;
883 	htt_tlv_filter.msdu_end = 1;
884 	htt_tlv_filter.packet = 1;
885 	htt_tlv_filter.packet_header = 0;
886 
887 	htt_tlv_filter.ppdu_start = 0;
888 	htt_tlv_filter.ppdu_end = 0;
889 	htt_tlv_filter.ppdu_end_user_stats = 0;
890 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
891 	htt_tlv_filter.ppdu_end_status_done = 0;
892 	htt_tlv_filter.enable_fp = 1;
893 	htt_tlv_filter.enable_md = 0;
894 	htt_tlv_filter.enable_md = 0;
895 	htt_tlv_filter.enable_mo = 0;
896 
897 	htt_tlv_filter.fp_mgmt_filter = 0;
898 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
899 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
900 					 FILTER_DATA_MCAST |
901 					 FILTER_DATA_DATA);
902 	htt_tlv_filter.mo_mgmt_filter = 0;
903 	htt_tlv_filter.mo_ctrl_filter = 0;
904 	htt_tlv_filter.mo_data_filter = 0;
905 	htt_tlv_filter.md_data_filter = 0;
906 
907 	htt_tlv_filter.offset_valid = true;
908 
909 	/* Not subscribing to mpdu_end, msdu_start and rx_attn */
910 	htt_tlv_filter.rx_mpdu_end_offset = 0;
911 	htt_tlv_filter.rx_msdu_start_offset = 0;
912 	htt_tlv_filter.rx_attn_offset = 0;
913 
914 	/*
915 	 * For monitor mode, the packet hdr tlv is enabled later during
916 	 * filter update
917 	 */
918 	if (soc->cdp_soc.ol_ops->get_con_mode &&
919 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)
920 		htt_tlv_filter.rx_packet_offset = soc->rx_mon_pkt_tlv_size;
921 	else
922 		htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
923 
924 	/*Not subscribing rx_pkt_header*/
925 	htt_tlv_filter.rx_header_offset = 0;
926 	htt_tlv_filter.rx_mpdu_start_offset =
927 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
928 	htt_tlv_filter.rx_msdu_end_offset =
929 				hal_rx_msdu_end_offset_get(soc->hal_soc);
930 
931 	dp_rxdma_ring_wmask_cfg_be(soc, &htt_tlv_filter);
932 
933 	for (i = 0; i < MAX_PDEV_CNT; i++) {
934 		struct dp_pdev *pdev = soc->pdev_list[i];
935 
936 		if (!pdev)
937 			continue;
938 
939 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
940 			int mac_for_pdev =
941 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
942 			/*
943 			 * Obtain lmac id from pdev to access the LMAC ring
944 			 * in soc context
945 			 */
946 			int lmac_id =
947 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
948 							   pdev->pdev_id);
949 
950 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
951 
952 			if (!rx_mac_srng->hal_srng)
953 				continue;
954 
955 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
956 					    rx_mac_srng->hal_srng,
957 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
958 					    &htt_tlv_filter);
959 		}
960 	}
961 	return status;
962 }
963 #else
964 /**
965  * dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config
966  * @soc: Common DP soc handle
967  *
968  * Return: QDF_STATUS
969  */
970 static QDF_STATUS
971 dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc)
972 {
973 	int i;
974 	int mac_id;
975 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
976 	struct dp_srng *rx_mac_srng;
977 	QDF_STATUS status = QDF_STATUS_SUCCESS;
978 
979 	/*
980 	 * In Beryllium chipset msdu_start, mpdu_end
981 	 * and rx_attn are part of msdu_end/mpdu_start
982 	 */
983 	htt_tlv_filter.msdu_start = 0;
984 	htt_tlv_filter.mpdu_end = 0;
985 	htt_tlv_filter.attention = 0;
986 	htt_tlv_filter.mpdu_start = 1;
987 	htt_tlv_filter.msdu_end = 1;
988 	htt_tlv_filter.packet = 1;
989 	htt_tlv_filter.packet_header = 1;
990 
991 	htt_tlv_filter.ppdu_start = 0;
992 	htt_tlv_filter.ppdu_end = 0;
993 	htt_tlv_filter.ppdu_end_user_stats = 0;
994 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
995 	htt_tlv_filter.ppdu_end_status_done = 0;
996 	htt_tlv_filter.enable_fp = 1;
997 	htt_tlv_filter.enable_md = 0;
998 	htt_tlv_filter.enable_md = 0;
999 	htt_tlv_filter.enable_mo = 0;
1000 
1001 	htt_tlv_filter.fp_mgmt_filter = 0;
1002 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
1003 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
1004 					 FILTER_DATA_MCAST |
1005 					 FILTER_DATA_DATA);
1006 	htt_tlv_filter.mo_mgmt_filter = 0;
1007 	htt_tlv_filter.mo_ctrl_filter = 0;
1008 	htt_tlv_filter.mo_data_filter = 0;
1009 	htt_tlv_filter.md_data_filter = 0;
1010 
1011 	htt_tlv_filter.offset_valid = true;
1012 
1013 	/* Not subscribing to mpdu_end, msdu_start and rx_attn */
1014 	htt_tlv_filter.rx_mpdu_end_offset = 0;
1015 	htt_tlv_filter.rx_msdu_start_offset = 0;
1016 	htt_tlv_filter.rx_attn_offset = 0;
1017 
1018 	/*
1019 	 * For monitor mode, the packet hdr tlv is enabled later during
1020 	 * filter update
1021 	 */
1022 	if (soc->cdp_soc.ol_ops->get_con_mode &&
1023 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)
1024 		htt_tlv_filter.rx_packet_offset = soc->rx_mon_pkt_tlv_size;
1025 	else
1026 		htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
1027 
1028 	htt_tlv_filter.rx_header_offset =
1029 				hal_rx_pkt_tlv_offset_get(soc->hal_soc);
1030 	htt_tlv_filter.rx_mpdu_start_offset =
1031 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
1032 	htt_tlv_filter.rx_msdu_end_offset =
1033 				hal_rx_msdu_end_offset_get(soc->hal_soc);
1034 
1035 	dp_info("TLV subscription\n"
1036 		"msdu_start %d, mpdu_end %d, attention %d"
1037 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n"
1038 		"TLV offsets\n"
1039 		"msdu_start %d, mpdu_end %d, attention %d"
1040 		"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n",
1041 		htt_tlv_filter.msdu_start,
1042 		htt_tlv_filter.mpdu_end,
1043 		htt_tlv_filter.attention,
1044 		htt_tlv_filter.mpdu_start,
1045 		htt_tlv_filter.msdu_end,
1046 		htt_tlv_filter.packet_header,
1047 		htt_tlv_filter.packet,
1048 		htt_tlv_filter.rx_msdu_start_offset,
1049 		htt_tlv_filter.rx_mpdu_end_offset,
1050 		htt_tlv_filter.rx_attn_offset,
1051 		htt_tlv_filter.rx_mpdu_start_offset,
1052 		htt_tlv_filter.rx_msdu_end_offset,
1053 		htt_tlv_filter.rx_header_offset,
1054 		htt_tlv_filter.rx_packet_offset);
1055 
1056 	for (i = 0; i < MAX_PDEV_CNT; i++) {
1057 		struct dp_pdev *pdev = soc->pdev_list[i];
1058 
1059 		if (!pdev)
1060 			continue;
1061 
1062 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1063 			int mac_for_pdev =
1064 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
1065 			/*
1066 			 * Obtain lmac id from pdev to access the LMAC ring
1067 			 * in soc context
1068 			 */
1069 			int lmac_id =
1070 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
1071 							   pdev->pdev_id);
1072 
1073 			rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
1074 
1075 			if (!rx_mac_srng->hal_srng)
1076 				continue;
1077 
1078 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
1079 					    rx_mac_srng->hal_srng,
1080 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
1081 					    &htt_tlv_filter);
1082 		}
1083 	}
1084 	return status;
1085 
1086 }
1087 #endif
1088 
1089 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1090 /**
1091  * dp_service_near_full_srngs_be() - Main bottom half callback for the
1092  *				near-full IRQs.
1093  * @soc: Datapath SoC handle
1094  * @int_ctx: Interrupt context
1095  * @dp_budget: Budget of the work that can be done in the bottom half
1096  *
1097  * Return: work done in the handler
1098  */
1099 static uint32_t
1100 dp_service_near_full_srngs_be(struct dp_soc *soc, struct dp_intr *int_ctx,
1101 			      uint32_t dp_budget)
1102 {
1103 	int ring = 0;
1104 	int budget = dp_budget;
1105 	uint32_t work_done  = 0;
1106 	uint32_t remaining_quota = dp_budget;
1107 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1108 	int tx_ring_near_full_mask = int_ctx->tx_ring_near_full_mask;
1109 	int rx_near_full_grp_1_mask = int_ctx->rx_near_full_grp_1_mask;
1110 	int rx_near_full_grp_2_mask = int_ctx->rx_near_full_grp_2_mask;
1111 	int rx_near_full_mask = rx_near_full_grp_1_mask |
1112 				rx_near_full_grp_2_mask;
1113 
1114 	dp_verbose_debug("rx_ring_near_full 0x%x tx_ring_near_full 0x%x",
1115 			 rx_near_full_mask,
1116 			 tx_ring_near_full_mask);
1117 
1118 	if (rx_near_full_mask) {
1119 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1120 			if (!(rx_near_full_mask & (1 << ring)))
1121 				continue;
1122 
1123 			work_done = dp_rx_nf_process(int_ctx,
1124 					soc->reo_dest_ring[ring].hal_srng,
1125 					ring, remaining_quota);
1126 			if (work_done) {
1127 				intr_stats->num_rx_ring_near_full_masks[ring]++;
1128 				dp_verbose_debug("rx NF mask 0x%x ring %d, work_done %d budget %d",
1129 						 rx_near_full_mask, ring,
1130 						 work_done,
1131 						 budget);
1132 				budget -=  work_done;
1133 				if (budget <= 0)
1134 					goto budget_done;
1135 				remaining_quota = budget;
1136 			}
1137 		}
1138 	}
1139 
1140 	if (tx_ring_near_full_mask) {
1141 		for (ring = 0; ring < soc->num_tcl_data_rings; ring++) {
1142 			if (!(tx_ring_near_full_mask & (1 << ring)))
1143 				continue;
1144 
1145 			work_done = dp_tx_comp_nf_handler(int_ctx, soc,
1146 					soc->tx_comp_ring[ring].hal_srng,
1147 					ring, remaining_quota);
1148 			if (work_done) {
1149 				intr_stats->num_tx_comp_ring_near_full_masks[ring]++;
1150 				dp_verbose_debug("tx NF mask 0x%x ring %d, work_done %d budget %d",
1151 						 tx_ring_near_full_mask, ring,
1152 						 work_done, budget);
1153 				budget -=  work_done;
1154 				if (budget <= 0)
1155 					break;
1156 				remaining_quota = budget;
1157 			}
1158 		}
1159 	}
1160 
1161 	intr_stats->num_near_full_masks++;
1162 
1163 budget_done:
1164 	return dp_budget - budget;
1165 }
1166 
1167 /**
1168  * dp_srng_test_and_update_nf_params_be() - Check if the srng is in near full
1169  *				state and set the reap_limit appropriately
1170  *				as per the near full state
1171  * @soc: Datapath soc handle
1172  * @dp_srng: Datapath handle for SRNG
1173  * @max_reap_limit: [Output Buffer] Buffer to set the max reap limit as per
1174  *			the srng near-full state
1175  *
1176  * Return: 1, if the srng is in near-full state
1177  *	   0, if the srng is not in near-full state
1178  */
1179 static int
1180 dp_srng_test_and_update_nf_params_be(struct dp_soc *soc,
1181 				     struct dp_srng *dp_srng,
1182 				     int *max_reap_limit)
1183 {
1184 	return _dp_srng_test_and_update_nf_params(soc, dp_srng, max_reap_limit);
1185 }
1186 
1187 /**
1188  * dp_init_near_full_arch_ops_be() - Initialize the arch ops handler for the
1189  *			near full IRQ handling operations.
1190  * @arch_ops: arch ops handle
1191  *
1192  * Return: none
1193  */
1194 static inline void
1195 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
1196 {
1197 	arch_ops->dp_service_near_full_srngs = dp_service_near_full_srngs_be;
1198 	arch_ops->dp_srng_test_and_update_nf_params =
1199 					dp_srng_test_and_update_nf_params_be;
1200 }
1201 
1202 #else
1203 static inline void
1204 dp_init_near_full_arch_ops_be(struct dp_arch_ops *arch_ops)
1205 {
1206 }
1207 #endif
1208 
1209 #ifdef WLAN_SUPPORT_PPEDS
1210 static void dp_soc_ppe_srng_deinit(struct dp_soc *soc)
1211 {
1212 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1213 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1214 
1215 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1216 
1217 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
1218 		return;
1219 
1220 	dp_srng_deinit(soc, &be_soc->ppe_release_ring, PPE_RELEASE, 0);
1221 	wlan_minidump_remove(be_soc->ppe_release_ring.base_vaddr_unaligned,
1222 			     be_soc->ppe_release_ring.alloc_size,
1223 			     soc->ctrl_psoc,
1224 			     WLAN_MD_DP_SRNG_PPE_RELEASE,
1225 			     "ppe_release_ring");
1226 
1227 	dp_srng_deinit(soc, &be_soc->ppe2tcl_ring, PPE2TCL, 0);
1228 	wlan_minidump_remove(be_soc->ppe2tcl_ring.base_vaddr_unaligned,
1229 			     be_soc->ppe2tcl_ring.alloc_size,
1230 			     soc->ctrl_psoc,
1231 			     WLAN_MD_DP_SRNG_PPE2TCL,
1232 			     "ppe2tcl_ring");
1233 
1234 	dp_srng_deinit(soc, &be_soc->reo2ppe_ring, REO2PPE, 0);
1235 	wlan_minidump_remove(be_soc->reo2ppe_ring.base_vaddr_unaligned,
1236 			     be_soc->reo2ppe_ring.alloc_size,
1237 			     soc->ctrl_psoc,
1238 			     WLAN_MD_DP_SRNG_REO2PPE,
1239 			     "reo2ppe_ring");
1240 }
1241 
1242 static void dp_soc_ppe_srng_free(struct dp_soc *soc)
1243 {
1244 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1245 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1246 
1247 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1248 
1249 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
1250 		return;
1251 
1252 	dp_srng_free(soc, &be_soc->ppe_release_ring);
1253 
1254 	dp_srng_free(soc, &be_soc->ppe2tcl_ring);
1255 
1256 	dp_srng_free(soc, &be_soc->reo2ppe_ring);
1257 }
1258 
1259 static QDF_STATUS dp_soc_ppe_srng_alloc(struct dp_soc *soc)
1260 {
1261 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1262 	uint32_t entries;
1263 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1264 
1265 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1266 
1267 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
1268 		return QDF_STATUS_SUCCESS;
1269 
1270 	entries = wlan_cfg_get_dp_soc_reo2ppe_ring_size(soc_cfg_ctx);
1271 
1272 	if (dp_srng_alloc(soc, &be_soc->reo2ppe_ring, REO2PPE,
1273 			  entries, 0)) {
1274 		dp_err("%pK: dp_srng_alloc failed for reo2ppe", soc);
1275 		goto fail;
1276 	}
1277 
1278 	entries = wlan_cfg_get_dp_soc_ppe2tcl_ring_size(soc_cfg_ctx);
1279 	if (dp_srng_alloc(soc, &be_soc->ppe2tcl_ring, PPE2TCL,
1280 			  entries, 0)) {
1281 		dp_err("%pK: dp_srng_alloc failed for ppe2tcl_ring", soc);
1282 		goto fail;
1283 	}
1284 
1285 	entries = wlan_cfg_get_dp_soc_ppe_release_ring_size(soc_cfg_ctx);
1286 	if (dp_srng_alloc(soc, &be_soc->ppe_release_ring, PPE_RELEASE,
1287 			  entries, 0)) {
1288 		dp_err("%pK: dp_srng_alloc failed for ppe_release_ring", soc);
1289 		goto fail;
1290 	}
1291 
1292 	return QDF_STATUS_SUCCESS;
1293 fail:
1294 	dp_soc_ppe_srng_free(soc);
1295 	return QDF_STATUS_E_NOMEM;
1296 }
1297 
1298 static QDF_STATUS dp_soc_ppe_srng_init(struct dp_soc *soc)
1299 {
1300 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1301 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1302 	hal_soc_handle_t hal_soc = soc->hal_soc;
1303 
1304 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1305 
1306 	if (!wlan_cfg_get_dp_soc_is_ppe_enabled(soc_cfg_ctx))
1307 		return QDF_STATUS_SUCCESS;
1308 
1309 	if (dp_srng_init(soc, &be_soc->reo2ppe_ring, REO2PPE, 0, 0)) {
1310 		dp_err("%pK: dp_srng_init failed for reo2ppe", soc);
1311 		goto fail;
1312 	}
1313 
1314 	wlan_minidump_log(be_soc->reo2ppe_ring.base_vaddr_unaligned,
1315 			  be_soc->reo2ppe_ring.alloc_size,
1316 			  soc->ctrl_psoc,
1317 			  WLAN_MD_DP_SRNG_REO2PPE,
1318 			  "reo2ppe_ring");
1319 
1320 	hal_reo_config_reo2ppe_dest_info(hal_soc);
1321 
1322 	if (dp_srng_init(soc, &be_soc->ppe2tcl_ring, PPE2TCL, 0, 0)) {
1323 		dp_err("%pK: dp_srng_init failed for ppe2tcl_ring", soc);
1324 		goto fail;
1325 	}
1326 
1327 	wlan_minidump_log(be_soc->ppe2tcl_ring.base_vaddr_unaligned,
1328 			  be_soc->ppe2tcl_ring.alloc_size,
1329 			  soc->ctrl_psoc,
1330 			  WLAN_MD_DP_SRNG_PPE2TCL,
1331 			  "ppe2tcl_ring");
1332 
1333 	if (dp_srng_init(soc, &be_soc->ppe_release_ring, PPE_RELEASE, 0, 0)) {
1334 		dp_err("%pK: dp_srng_init failed for ppe_release_ring", soc);
1335 		goto fail;
1336 	}
1337 
1338 	wlan_minidump_log(be_soc->ppe_release_ring.base_vaddr_unaligned,
1339 			  be_soc->ppe_release_ring.alloc_size,
1340 			  soc->ctrl_psoc,
1341 			  WLAN_MD_DP_SRNG_PPE_RELEASE,
1342 			  "ppe_release_ring");
1343 #ifdef WLAN_SUPPORT_PPEDS
1344 	if (dp_ppeds_register_soc_be(be_soc)) {
1345 		dp_err("%pK: ppeds registration failed", soc);
1346 		goto fail;
1347 	}
1348 #endif
1349 
1350 	return QDF_STATUS_SUCCESS;
1351 fail:
1352 	dp_soc_ppe_srng_deinit(soc);
1353 	return QDF_STATUS_E_NOMEM;
1354 }
1355 #else
1356 static void dp_soc_ppe_srng_deinit(struct dp_soc *soc)
1357 {
1358 }
1359 
1360 static void dp_soc_ppe_srng_free(struct dp_soc *soc)
1361 {
1362 }
1363 
1364 static QDF_STATUS dp_soc_ppe_srng_alloc(struct dp_soc *soc)
1365 {
1366 	return QDF_STATUS_SUCCESS;
1367 }
1368 
1369 static QDF_STATUS dp_soc_ppe_srng_init(struct dp_soc *soc)
1370 {
1371 	return QDF_STATUS_SUCCESS;
1372 }
1373 #endif
1374 
1375 static void dp_soc_srng_deinit_be(struct dp_soc *soc)
1376 {
1377 	uint32_t i;
1378 
1379 	dp_soc_ppe_srng_deinit(soc);
1380 
1381 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
1382 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++) {
1383 			dp_srng_deinit(soc, &soc->rx_refill_buf_ring[i],
1384 				       RXDMA_BUF, 0);
1385 		}
1386 	}
1387 }
1388 
1389 static void dp_soc_srng_free_be(struct dp_soc *soc)
1390 {
1391 	uint32_t i;
1392 
1393 	dp_soc_ppe_srng_free(soc);
1394 
1395 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
1396 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++)
1397 			dp_srng_free(soc, &soc->rx_refill_buf_ring[i]);
1398 	}
1399 }
1400 
1401 static QDF_STATUS dp_soc_srng_alloc_be(struct dp_soc *soc)
1402 {
1403 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1404 	uint32_t ring_size;
1405 	uint32_t i;
1406 
1407 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1408 
1409 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
1410 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
1411 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++) {
1412 			if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[i],
1413 					  RXDMA_BUF, ring_size, 0)) {
1414 				dp_err("%pK: dp_srng_alloc failed refill ring",
1415 				       soc);
1416 				goto fail;
1417 			}
1418 		}
1419 	}
1420 
1421 	if (dp_soc_ppe_srng_alloc(soc)) {
1422 		dp_err("%pK: ppe rings alloc failed",
1423 		       soc);
1424 		goto fail;
1425 	}
1426 
1427 	return QDF_STATUS_SUCCESS;
1428 fail:
1429 	dp_soc_srng_free_be(soc);
1430 	return QDF_STATUS_E_NOMEM;
1431 }
1432 
1433 static QDF_STATUS dp_soc_srng_init_be(struct dp_soc *soc)
1434 {
1435 	int i = 0;
1436 
1437 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
1438 		for (i = 0; i < soc->num_rx_refill_buf_rings; i++) {
1439 			if (dp_srng_init(soc, &soc->rx_refill_buf_ring[i],
1440 					 RXDMA_BUF, 0, 0)) {
1441 				dp_err("%pK: dp_srng_init failed refill ring",
1442 				       soc);
1443 				goto fail;
1444 			}
1445 		}
1446 	}
1447 
1448 	if (dp_soc_ppe_srng_init(soc)) {
1449 		dp_err("%pK: ppe rings init failed",
1450 		       soc);
1451 		goto fail;
1452 	}
1453 
1454 	return QDF_STATUS_SUCCESS;
1455 fail:
1456 	dp_soc_srng_deinit_be(soc);
1457 	return QDF_STATUS_E_NOMEM;
1458 }
1459 
1460 #ifdef WLAN_FEATURE_11BE_MLO
1461 static inline unsigned
1462 dp_mlo_peer_find_hash_index(dp_mld_peer_hash_obj_t mld_hash_obj,
1463 			    union dp_align_mac_addr *mac_addr)
1464 {
1465 	uint32_t index;
1466 
1467 	index =
1468 		mac_addr->align2.bytes_ab ^
1469 		mac_addr->align2.bytes_cd ^
1470 		mac_addr->align2.bytes_ef;
1471 
1472 	index ^= index >> mld_hash_obj->mld_peer_hash.idx_bits;
1473 	index &= mld_hash_obj->mld_peer_hash.mask;
1474 
1475 	return index;
1476 }
1477 
1478 QDF_STATUS
1479 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
1480 				int hash_elems)
1481 {
1482 	int i, log2;
1483 
1484 	if (!mld_hash_obj)
1485 		return QDF_STATUS_E_FAILURE;
1486 
1487 	hash_elems *= DP_PEER_HASH_LOAD_MULT;
1488 	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
1489 	log2 = dp_log2_ceil(hash_elems);
1490 	hash_elems = 1 << log2;
1491 
1492 	mld_hash_obj->mld_peer_hash.mask = hash_elems - 1;
1493 	mld_hash_obj->mld_peer_hash.idx_bits = log2;
1494 	/* allocate an array of TAILQ peer object lists */
1495 	mld_hash_obj->mld_peer_hash.bins = qdf_mem_malloc(
1496 		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
1497 	if (!mld_hash_obj->mld_peer_hash.bins)
1498 		return QDF_STATUS_E_NOMEM;
1499 
1500 	for (i = 0; i < hash_elems; i++)
1501 		TAILQ_INIT(&mld_hash_obj->mld_peer_hash.bins[i]);
1502 
1503 	qdf_spinlock_create(&mld_hash_obj->mld_peer_hash_lock);
1504 
1505 	return QDF_STATUS_SUCCESS;
1506 }
1507 
1508 void
1509 dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj)
1510 {
1511 	if (!mld_hash_obj)
1512 		return;
1513 
1514 	if (mld_hash_obj->mld_peer_hash.bins) {
1515 		qdf_mem_free(mld_hash_obj->mld_peer_hash.bins);
1516 		mld_hash_obj->mld_peer_hash.bins = NULL;
1517 		qdf_spinlock_destroy(&mld_hash_obj->mld_peer_hash_lock);
1518 	}
1519 }
1520 
1521 #ifdef WLAN_MLO_MULTI_CHIP
1522 static QDF_STATUS dp_mlo_peer_find_hash_attach_wrapper(struct dp_soc *soc)
1523 {
1524 	/* In case of MULTI chip MLO peer hash table when MLO global object
1525 	 * is created, avoid from SOC attach path
1526 	 */
1527 	return QDF_STATUS_SUCCESS;
1528 }
1529 
1530 static void dp_mlo_peer_find_hash_detach_wrapper(struct dp_soc *soc)
1531 {
1532 }
1533 #else
1534 static QDF_STATUS dp_mlo_peer_find_hash_attach_wrapper(struct dp_soc *soc)
1535 {
1536 	dp_mld_peer_hash_obj_t mld_hash_obj;
1537 
1538 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
1539 
1540 	if (!mld_hash_obj)
1541 		return QDF_STATUS_E_FAILURE;
1542 
1543 	return dp_mlo_peer_find_hash_attach_be(mld_hash_obj, soc->max_peers);
1544 }
1545 
1546 static void dp_mlo_peer_find_hash_detach_wrapper(struct dp_soc *soc)
1547 {
1548 	dp_mld_peer_hash_obj_t mld_hash_obj;
1549 
1550 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
1551 
1552 	if (!mld_hash_obj)
1553 		return;
1554 
1555 	return dp_mlo_peer_find_hash_detach_be(mld_hash_obj);
1556 }
1557 #endif
1558 
1559 static struct dp_peer *
1560 dp_mlo_peer_find_hash_find_be(struct dp_soc *soc,
1561 			      uint8_t *peer_mac_addr,
1562 			      int mac_addr_is_aligned,
1563 			      enum dp_mod_id mod_id,
1564 			      uint8_t vdev_id)
1565 {
1566 	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1567 	uint32_t index;
1568 	struct dp_peer *peer;
1569 	struct dp_vdev *vdev;
1570 	dp_mld_peer_hash_obj_t mld_hash_obj;
1571 
1572 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
1573 	if (!mld_hash_obj)
1574 		return NULL;
1575 
1576 	if (!mld_hash_obj->mld_peer_hash.bins)
1577 		return NULL;
1578 
1579 	if (mac_addr_is_aligned) {
1580 		mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1581 	} else {
1582 		qdf_mem_copy(
1583 			&local_mac_addr_aligned.raw[0],
1584 			peer_mac_addr, QDF_MAC_ADDR_SIZE);
1585 		mac_addr = &local_mac_addr_aligned;
1586 	}
1587 
1588 	if (vdev_id != DP_VDEV_ALL) {
1589 		vdev = dp_vdev_get_ref_by_id(soc, vdev_id, mod_id);
1590 		if (!vdev) {
1591 			dp_err("vdev is null\n");
1592 			return NULL;
1593 		}
1594 	} else {
1595 		vdev = NULL;
1596 	}
1597 	/* search mld peer table if no link peer for given mac address */
1598 	index = dp_mlo_peer_find_hash_index(mld_hash_obj, mac_addr);
1599 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
1600 	TAILQ_FOREACH(peer, &mld_hash_obj->mld_peer_hash.bins[index],
1601 		      hash_list_elem) {
1602 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1603 			if ((vdev_id == DP_VDEV_ALL) || (
1604 				dp_peer_find_mac_addr_cmp(
1605 						&peer->vdev->mld_mac_addr,
1606 						&vdev->mld_mac_addr) == 0)) {
1607 				/* take peer reference before returning */
1608 				if (dp_peer_get_ref(NULL, peer, mod_id) !=
1609 						QDF_STATUS_SUCCESS)
1610 					peer = NULL;
1611 
1612 				if (vdev)
1613 					dp_vdev_unref_delete(soc, vdev, mod_id);
1614 
1615 				qdf_spin_unlock_bh(
1616 					&mld_hash_obj->mld_peer_hash_lock);
1617 				return peer;
1618 			}
1619 		}
1620 	}
1621 
1622 	if (vdev)
1623 		dp_vdev_unref_delete(soc, vdev, mod_id);
1624 
1625 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
1626 
1627 	return NULL; /* failure */
1628 }
1629 
1630 static void
1631 dp_mlo_peer_find_hash_remove_be(struct dp_soc *soc, struct dp_peer *peer)
1632 {
1633 	uint32_t index;
1634 	struct dp_peer *tmppeer = NULL;
1635 	int found = 0;
1636 	dp_mld_peer_hash_obj_t mld_hash_obj;
1637 
1638 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
1639 
1640 	if (!mld_hash_obj)
1641 		return;
1642 
1643 	index = dp_mlo_peer_find_hash_index(mld_hash_obj, &peer->mac_addr);
1644 	QDF_ASSERT(!TAILQ_EMPTY(&mld_hash_obj->mld_peer_hash.bins[index]));
1645 
1646 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
1647 	TAILQ_FOREACH(tmppeer, &mld_hash_obj->mld_peer_hash.bins[index],
1648 		      hash_list_elem) {
1649 		if (tmppeer == peer) {
1650 			found = 1;
1651 			break;
1652 		}
1653 	}
1654 	QDF_ASSERT(found);
1655 	TAILQ_REMOVE(&mld_hash_obj->mld_peer_hash.bins[index], peer,
1656 		     hash_list_elem);
1657 
1658 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
1659 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
1660 }
1661 
1662 static void
1663 dp_mlo_peer_find_hash_add_be(struct dp_soc *soc, struct dp_peer *peer)
1664 {
1665 	uint32_t index;
1666 	dp_mld_peer_hash_obj_t mld_hash_obj;
1667 
1668 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
1669 
1670 	if (!mld_hash_obj)
1671 		return;
1672 
1673 	index = dp_mlo_peer_find_hash_index(mld_hash_obj, &peer->mac_addr);
1674 
1675 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
1676 
1677 	if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(NULL, peer,
1678 						DP_MOD_ID_CONFIG))) {
1679 		dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
1680 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1681 		qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
1682 		return;
1683 	}
1684 	TAILQ_INSERT_TAIL(&mld_hash_obj->mld_peer_hash.bins[index], peer,
1685 			  hash_list_elem);
1686 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
1687 }
1688 
1689 void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
1690 {
1691 	uint32_t index;
1692 	struct dp_peer *peer;
1693 	dp_mld_peer_hash_obj_t mld_hash_obj;
1694 
1695 	mld_hash_obj = dp_mlo_get_peer_hash_obj(soc);
1696 
1697 	if (!mld_hash_obj)
1698 		return;
1699 
1700 	qdf_spin_lock_bh(&mld_hash_obj->mld_peer_hash_lock);
1701 	for (index = 0; index < mld_hash_obj->mld_peer_hash.mask; index++) {
1702 		TAILQ_FOREACH(peer, &mld_hash_obj->mld_peer_hash.bins[index],
1703 			      hash_list_elem) {
1704 			dp_print_peer_ast_entries(soc, peer, NULL);
1705 		}
1706 	}
1707 	qdf_spin_unlock_bh(&mld_hash_obj->mld_peer_hash_lock);
1708 }
1709 
1710 #endif
1711 
1712 #if defined(DP_UMAC_HW_HARD_RESET) && defined(DP_UMAC_HW_RESET_SUPPORT)
1713 static void dp_reconfig_tx_vdev_mcast_ctrl_be(struct dp_soc *soc,
1714 					      struct dp_vdev *vdev)
1715 {
1716 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1717 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1718 	hal_soc_handle_t hal_soc = soc->hal_soc;
1719 	uint8_t vdev_id = vdev->vdev_id;
1720 
1721 	if (vdev->opmode == wlan_op_mode_sta) {
1722 		if (vdev->pdev->isolation)
1723 			hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
1724 						HAL_TX_MCAST_CTRL_FW_EXCEPTION);
1725 		else
1726 			hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
1727 						HAL_TX_MCAST_CTRL_MEC_NOTIFY);
1728 	} else if (vdev->opmode == wlan_op_mode_ap) {
1729 		if (vdev->mlo_vdev) {
1730 			if (be_vdev->mcast_primary) {
1731 				hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
1732 					   HAL_TX_MCAST_CTRL_NO_SPECIAL);
1733 				hal_tx_vdev_mcast_ctrl_set(hal_soc,
1734 						vdev_id + 128,
1735 						HAL_TX_MCAST_CTRL_FW_EXCEPTION);
1736 				dp_mcast_mlo_iter_ptnr_soc(be_soc,
1737 					dp_tx_mcast_mlo_reinject_routing_set,
1738 					(void *)&be_vdev->mcast_primary);
1739 			} else {
1740 				hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
1741 							HAL_TX_MCAST_CTRL_DROP);
1742 			}
1743 		} else {
1744 			hal_tx_vdev_mcast_ctrl_set(vdev->pdev->soc->hal_soc,
1745 						   vdev_id,
1746 						   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
1747 		}
1748 	}
1749 }
1750 
1751 static void dp_bank_reconfig_be(struct dp_soc *soc, struct dp_vdev *vdev)
1752 {
1753 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1754 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1755 	union hal_tx_bank_config *bank_config;
1756 
1757 	if (!be_vdev || be_vdev->bank_id == DP_BE_INVALID_BANK_ID)
1758 		return;
1759 
1760 	bank_config = &be_soc->bank_profiles[be_vdev->bank_id].bank_config;
1761 
1762 	hal_tx_populate_bank_register(be_soc->soc.hal_soc, bank_config,
1763 				      be_vdev->bank_id);
1764 }
1765 
1766 #endif
1767 
1768 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1769 	defined(WLAN_MCAST_MLO)
1770 static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
1771 					struct dp_vdev_be *be_vdev,
1772 					cdp_config_param_type val)
1773 {
1774 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(
1775 						be_vdev->vdev.pdev->soc);
1776 	hal_soc_handle_t hal_soc = be_vdev->vdev.pdev->soc->hal_soc;
1777 	uint8_t vdev_id = be_vdev->vdev.vdev_id;
1778 
1779 	be_vdev->mcast_primary = val.cdp_vdev_param_mcast_vdev;
1780 
1781 	if (be_vdev->mcast_primary) {
1782 		hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
1783 					   HAL_TX_MCAST_CTRL_NO_SPECIAL);
1784 		hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id + 128,
1785 					   HAL_TX_MCAST_CTRL_FW_EXCEPTION);
1786 		dp_mcast_mlo_iter_ptnr_soc(be_soc,
1787 					   dp_tx_mcast_mlo_reinject_routing_set,
1788 					   (void *)&be_vdev->mcast_primary);
1789 	} else {
1790 		hal_tx_vdev_mcast_ctrl_set(hal_soc, vdev_id,
1791 					   HAL_TX_MCAST_CTRL_DROP);
1792 	}
1793 }
1794 #else
1795 static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
1796 					struct dp_vdev_be *be_vdev,
1797 					cdp_config_param_type val)
1798 {
1799 }
1800 #endif
1801 
1802 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
1803 static void dp_tx_implicit_rbm_set_be(struct dp_soc *soc,
1804 				      uint8_t tx_ring_id,
1805 				      uint8_t bm_id)
1806 {
1807 	hal_tx_config_rbm_mapping_be(soc->hal_soc,
1808 				     soc->tcl_data_ring[tx_ring_id].hal_srng,
1809 				     bm_id);
1810 }
1811 #else
1812 static void dp_tx_implicit_rbm_set_be(struct dp_soc *soc,
1813 				      uint8_t tx_ring_id,
1814 				      uint8_t bm_id)
1815 {
1816 }
1817 #endif
1818 
1819 QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
1820 				     struct dp_vdev *vdev,
1821 				     enum cdp_vdev_param_type param,
1822 				     cdp_config_param_type val)
1823 {
1824 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1825 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1826 
1827 	switch (param) {
1828 	case CDP_TX_ENCAP_TYPE:
1829 	case CDP_UPDATE_DSCP_TO_TID_MAP:
1830 	case CDP_UPDATE_TDLS_FLAGS:
1831 		dp_tx_update_bank_profile(be_soc, be_vdev);
1832 		break;
1833 	case CDP_ENABLE_CIPHER:
1834 		if (vdev->tx_encap_type == htt_cmn_pkt_type_raw)
1835 			dp_tx_update_bank_profile(be_soc, be_vdev);
1836 		break;
1837 	case CDP_SET_MCAST_VDEV:
1838 		dp_txrx_set_mlo_mcast_primary_vdev_param_be(be_vdev, val);
1839 		break;
1840 	default:
1841 		dp_warn("invalid param %d", param);
1842 		break;
1843 	}
1844 
1845 	return QDF_STATUS_SUCCESS;
1846 }
1847 
1848 #ifdef WLAN_FEATURE_11BE_MLO
1849 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
1850 static inline void
1851 dp_soc_max_peer_id_set(struct dp_soc *soc)
1852 {
1853 	soc->peer_id_shift = dp_log2_ceil(soc->max_peers);
1854 	soc->peer_id_mask = (1 << soc->peer_id_shift) - 1;
1855 	/*
1856 	 * Double the peers since we use ML indication bit
1857 	 * alongwith peer_id to find peers.
1858 	 */
1859 	soc->max_peer_id = 1 << (soc->peer_id_shift + 1);
1860 }
1861 #else
1862 static inline void
1863 dp_soc_max_peer_id_set(struct dp_soc *soc)
1864 {
1865 	soc->max_peer_id =
1866 		(1 << (HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S + 1)) - 1;
1867 }
1868 #endif /* DP_USE_REDUCED_PEER_ID_FIELD_WIDTH */
1869 #else
1870 static inline void
1871 dp_soc_max_peer_id_set(struct dp_soc *soc)
1872 {
1873 	soc->max_peer_id = soc->max_peers;
1874 }
1875 #endif /* WLAN_FEATURE_11BE_MLO */
1876 
1877 static void dp_peer_map_detach_be(struct dp_soc *soc)
1878 {
1879 	if (soc->host_ast_db_enable)
1880 		dp_peer_ast_hash_detach(soc);
1881 }
1882 
1883 static QDF_STATUS dp_peer_map_attach_be(struct dp_soc *soc)
1884 {
1885 	QDF_STATUS status;
1886 
1887 	if (soc->host_ast_db_enable) {
1888 		status = dp_peer_ast_hash_attach(soc);
1889 		if (QDF_IS_STATUS_ERROR(status))
1890 			return status;
1891 	}
1892 
1893 	dp_soc_max_peer_id_set(soc);
1894 
1895 	return QDF_STATUS_SUCCESS;
1896 }
1897 
1898 static struct dp_peer *dp_find_peer_by_destmac_be(struct dp_soc *soc,
1899 						  uint8_t *dest_mac,
1900 						  uint8_t vdev_id)
1901 {
1902 	struct dp_peer *peer = NULL;
1903 	struct dp_peer *tgt_peer = NULL;
1904 	struct dp_ast_entry *ast_entry = NULL;
1905 	uint16_t peer_id;
1906 
1907 	qdf_spin_lock_bh(&soc->ast_lock);
1908 	ast_entry = dp_peer_ast_hash_find_soc(soc, dest_mac);
1909 	if (!ast_entry) {
1910 		qdf_spin_unlock_bh(&soc->ast_lock);
1911 		dp_err("NULL ast entry");
1912 		return NULL;
1913 	}
1914 
1915 	peer_id = ast_entry->peer_id;
1916 	qdf_spin_unlock_bh(&soc->ast_lock);
1917 
1918 	if (peer_id == HTT_INVALID_PEER)
1919 		return NULL;
1920 
1921 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_SAWF);
1922 	if (!peer) {
1923 		dp_err("NULL peer for peer_id:%d", peer_id);
1924 		return NULL;
1925 	}
1926 
1927 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
1928 
1929 	/*
1930 	 * Once tgt_peer is obtained,
1931 	 * release the ref taken for original peer.
1932 	 */
1933 	dp_peer_get_ref(NULL, tgt_peer, DP_MOD_ID_SAWF);
1934 	dp_peer_unref_delete(peer, DP_MOD_ID_SAWF);
1935 
1936 	return tgt_peer;
1937 }
1938 
1939 #ifdef WLAN_FEATURE_11BE_MLO
1940 #ifdef WLAN_MCAST_MLO
1941 static inline void
1942 dp_initialize_arch_ops_be_mcast_mlo(struct dp_arch_ops *arch_ops)
1943 {
1944 	arch_ops->dp_tx_mcast_handler = dp_tx_mlo_mcast_handler_be;
1945 	arch_ops->dp_rx_mcast_handler = dp_rx_mlo_igmp_handler;
1946 }
1947 #else /* WLAN_MCAST_MLO */
1948 static inline void
1949 dp_initialize_arch_ops_be_mcast_mlo(struct dp_arch_ops *arch_ops)
1950 {
1951 }
1952 #endif /* WLAN_MCAST_MLO */
1953 
1954 #ifdef WLAN_MLO_MULTI_CHIP
1955 static inline void
1956 dp_initialize_arch_ops_be_mlo_ptnr_chip(struct dp_arch_ops *arch_ops)
1957 {
1958 	arch_ops->dp_partner_chips_map = dp_mlo_partner_chips_map;
1959 	arch_ops->dp_partner_chips_unmap = dp_mlo_partner_chips_unmap;
1960 }
1961 #else
1962 static inline void
1963 dp_initialize_arch_ops_be_mlo_ptnr_chip(struct dp_arch_ops *arch_ops)
1964 {
1965 }
1966 #endif
1967 
1968 static inline void
1969 dp_initialize_arch_ops_be_mlo(struct dp_arch_ops *arch_ops)
1970 {
1971 	dp_initialize_arch_ops_be_mcast_mlo(arch_ops);
1972 	dp_initialize_arch_ops_be_mlo_ptnr_chip(arch_ops);
1973 	arch_ops->mlo_peer_find_hash_detach =
1974 	dp_mlo_peer_find_hash_detach_wrapper;
1975 	arch_ops->mlo_peer_find_hash_attach =
1976 	dp_mlo_peer_find_hash_attach_wrapper;
1977 	arch_ops->mlo_peer_find_hash_add = dp_mlo_peer_find_hash_add_be;
1978 	arch_ops->mlo_peer_find_hash_remove = dp_mlo_peer_find_hash_remove_be;
1979 	arch_ops->mlo_peer_find_hash_find = dp_mlo_peer_find_hash_find_be;
1980 }
1981 #else /* WLAN_FEATURE_11BE_MLO */
1982 static inline void
1983 dp_initialize_arch_ops_be_mlo(struct dp_arch_ops *arch_ops)
1984 {
1985 }
1986 #endif /* WLAN_FEATURE_11BE_MLO */
1987 
1988 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
1989 #define DP_LMAC_PEER_ID_MSB_LEGACY 2
1990 #define DP_LMAC_PEER_ID_MSB_MLO 3
1991 
1992 static void dp_peer_get_reo_hash_be(struct dp_vdev *vdev,
1993 				    struct cdp_peer_setup_info *setup_info,
1994 				    enum cdp_host_reo_dest_ring *reo_dest,
1995 				    bool *hash_based,
1996 				    uint8_t *lmac_peer_id_msb)
1997 {
1998 	struct dp_soc *soc = vdev->pdev->soc;
1999 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2000 
2001 	if (!be_soc->mlo_enabled)
2002 		return dp_vdev_get_default_reo_hash(vdev, reo_dest,
2003 						    hash_based);
2004 
2005 	*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
2006 	*reo_dest = vdev->pdev->reo_dest;
2007 
2008 	/* Not a ML link peer use non-mlo */
2009 	if (!setup_info) {
2010 		*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_LEGACY;
2011 		return;
2012 	}
2013 
2014 	/* For STA ML VAP we do not have num links info at this point
2015 	 * use MLO case always
2016 	 */
2017 	if (vdev->opmode == wlan_op_mode_sta) {
2018 		*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_MLO;
2019 		return;
2020 	}
2021 
2022 	/* For AP ML VAP consider the peer as ML only it associates with
2023 	 * multiple links
2024 	 */
2025 	if (setup_info->num_links == 1) {
2026 		*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_LEGACY;
2027 		return;
2028 	}
2029 
2030 	*lmac_peer_id_msb = DP_LMAC_PEER_ID_MSB_MLO;
2031 }
2032 
2033 static bool dp_reo_remap_config_be(struct dp_soc *soc,
2034 				   uint32_t *remap0,
2035 				   uint32_t *remap1,
2036 				   uint32_t *remap2)
2037 {
2038 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2039 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
2040 	uint32_t reo_mlo_config =
2041 		wlan_cfg_mlo_rx_ring_map_get(soc->wlan_cfg_ctx);
2042 
2043 	if (!be_soc->mlo_enabled)
2044 		return dp_reo_remap_config(soc, remap0, remap1, remap2);
2045 
2046 	*remap0 = hal_reo_ix_remap_value_get_be(soc->hal_soc, reo_mlo_config);
2047 	*remap1 = hal_reo_ix_remap_value_get_be(soc->hal_soc, reo_config);
2048 	*remap2 = hal_reo_ix_remap_value_get_be(soc->hal_soc, reo_mlo_config);
2049 
2050 	return true;
2051 }
2052 #else
2053 static void dp_peer_get_reo_hash_be(struct dp_vdev *vdev,
2054 				    struct cdp_peer_setup_info *setup_info,
2055 				    enum cdp_host_reo_dest_ring *reo_dest,
2056 				    bool *hash_based,
2057 				    uint8_t *lmac_peer_id_msb)
2058 {
2059 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
2060 }
2061 
2062 static bool dp_reo_remap_config_be(struct dp_soc *soc,
2063 				   uint32_t *remap0,
2064 				   uint32_t *remap1,
2065 				   uint32_t *remap2)
2066 {
2067 	return dp_reo_remap_config(soc, remap0, remap1, remap2);
2068 }
2069 #endif
2070 
2071 #ifdef IPA_OFFLOAD
2072 static int8_t dp_ipa_get_bank_id_be(struct dp_soc *soc)
2073 {
2074 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
2075 
2076 	return be_soc->ipa_bank_id;
2077 }
2078 
2079 static inline void dp_initialize_arch_ops_be_ipa(struct dp_arch_ops *arch_ops)
2080 {
2081 	arch_ops->ipa_get_bank_id = dp_ipa_get_bank_id_be;
2082 }
2083 #else /* !IPA_OFFLOAD */
2084 static inline void dp_initialize_arch_ops_be_ipa(struct dp_arch_ops *arch_ops)
2085 {
2086 }
2087 #endif /* IPA_OFFLOAD */
2088 
2089 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
2090 {
2091 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2092 	arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_be;
2093 	arch_ops->dp_rx_process = dp_rx_process_be;
2094 	arch_ops->dp_tx_send_fast = dp_tx_fast_send_be;
2095 	arch_ops->tx_comp_get_params_from_hal_desc =
2096 		dp_tx_comp_get_params_from_hal_desc_be;
2097 	arch_ops->dp_tx_process_htt_completion =
2098 				dp_tx_process_htt_completion_be;
2099 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_be;
2100 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_be;
2101 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_be;
2102 	arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_be;
2103 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
2104 				dp_wbm_get_rx_desc_from_hal_desc_be;
2105 	arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_be;
2106 	arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_be;
2107 #endif
2108 	arch_ops->txrx_get_context_size = dp_get_context_size_be;
2109 #ifdef WIFI_MONITOR_SUPPORT
2110 	arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_be;
2111 #endif
2112 	arch_ops->dp_rx_desc_cookie_2_va =
2113 			dp_rx_desc_cookie_2_va_be;
2114 	arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_be;
2115 	arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_be;
2116 
2117 	arch_ops->txrx_soc_attach = dp_soc_attach_be;
2118 	arch_ops->txrx_soc_detach = dp_soc_detach_be;
2119 	arch_ops->txrx_soc_init = dp_soc_init_be;
2120 	arch_ops->txrx_soc_deinit = dp_soc_deinit_be;
2121 	arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_be;
2122 	arch_ops->txrx_soc_srng_init = dp_soc_srng_init_be;
2123 	arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_be;
2124 	arch_ops->txrx_soc_srng_free = dp_soc_srng_free_be;
2125 	arch_ops->txrx_pdev_attach = dp_pdev_attach_be;
2126 	arch_ops->txrx_pdev_detach = dp_pdev_detach_be;
2127 	arch_ops->txrx_vdev_attach = dp_vdev_attach_be;
2128 	arch_ops->txrx_vdev_detach = dp_vdev_detach_be;
2129 	arch_ops->txrx_peer_map_attach = dp_peer_map_attach_be;
2130 	arch_ops->txrx_peer_map_detach = dp_peer_map_detach_be;
2131 	arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be;
2132 	arch_ops->dp_rx_peer_metadata_peer_id_get =
2133 					dp_rx_peer_metadata_peer_id_get_be;
2134 	arch_ops->soc_cfg_attach = dp_soc_cfg_attach_be;
2135 	arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_be;
2136 	arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_be;
2137 	dp_initialize_arch_ops_be_mlo(arch_ops);
2138 	arch_ops->dp_peer_rx_reorder_queue_setup =
2139 					dp_peer_rx_reorder_queue_setup_be;
2140 	arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_be;
2141 	arch_ops->dp_find_peer_by_destmac = dp_find_peer_by_destmac_be;
2142 #if defined(DP_UMAC_HW_HARD_RESET) && defined(DP_UMAC_HW_RESET_SUPPORT)
2143 	arch_ops->dp_bank_reconfig = dp_bank_reconfig_be;
2144 	arch_ops->dp_reconfig_tx_vdev_mcast_ctrl =
2145 					dp_reconfig_tx_vdev_mcast_ctrl_be;
2146 	arch_ops->dp_cc_reg_cfg_init = dp_cc_reg_cfg_init;
2147 #endif
2148 
2149 #ifdef WLAN_SUPPORT_PPEDS
2150 	arch_ops->dp_txrx_ppeds_rings_status = dp_ppeds_rings_status;
2151 	arch_ops->txrx_soc_ppeds_start = dp_ppeds_start_soc_be;
2152 	arch_ops->txrx_soc_ppeds_stop = dp_ppeds_stop_soc_be;
2153 #else
2154 	arch_ops->dp_txrx_ppeds_rings_status = NULL;
2155 	arch_ops->txrx_soc_ppeds_start = NULL;
2156 	arch_ops->txrx_soc_ppeds_stop = NULL;
2157 #endif
2158 
2159 	dp_init_near_full_arch_ops_be(arch_ops);
2160 	arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_be;
2161 	arch_ops->get_rx_hash_key = dp_get_rx_hash_key_be;
2162 	arch_ops->print_mlo_ast_stats = dp_print_mlo_ast_stats_be;
2163 	arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_be;
2164 	arch_ops->reo_remap_config = dp_reo_remap_config_be;
2165 	dp_initialize_arch_ops_be_ipa(arch_ops);
2166 }
2167