xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rings_main.c (revision 3efaabd70475270fea7fcc46621defb016797d6e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_rings.h"
34 #include "dp_internal.h"
35 #include "dp_tx.h"
36 #include "dp_tx_desc.h"
37 #include "dp_rx.h"
38 #ifdef DP_RATETABLE_SUPPORT
39 #include "dp_ratetable.h"
40 #endif
41 #include <cdp_txrx_handle.h>
42 #include <wlan_cfg.h>
43 #include <wlan_utility.h>
44 #include "cdp_txrx_cmn_struct.h"
45 #include "cdp_txrx_stats_struct.h"
46 #include "cdp_txrx_cmn_reg.h"
47 #include <qdf_util.h>
48 #include "dp_peer.h"
49 #include "htt_stats.h"
50 #include "dp_htt.h"
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 #include <wlan_module_ids.h>
55 
56 #ifdef WIFI_MONITOR_SUPPORT
57 #include <dp_mon.h>
58 #endif
59 #include "qdf_ssr_driver_dump.h"
60 
61 #ifdef WLAN_FEATURE_STATS_EXT
62 #define INIT_RX_HW_STATS_LOCK(_soc) \
63 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
64 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
65 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
66 #else
67 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
68 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
69 #endif
70 
71 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
72 						uint8_t index);
73 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
74 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
75 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
76 						 uint8_t index);
77 
78 /* default_dscp_tid_map - Default DSCP-TID mapping
79  *
80  * DSCP        TID
81  * 000000      0
82  * 001000      1
83  * 010000      2
84  * 011000      3
85  * 100000      4
86  * 101000      5
87  * 110000      6
88  * 111000      7
89  */
90 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
91 	0, 0, 0, 0, 0, 0, 0, 0,
92 	1, 1, 1, 1, 1, 1, 1, 1,
93 	2, 2, 2, 2, 2, 2, 2, 2,
94 	3, 3, 3, 3, 3, 3, 3, 3,
95 	4, 4, 4, 4, 4, 4, 4, 4,
96 	5, 5, 5, 5, 5, 5, 5, 5,
97 	6, 6, 6, 6, 6, 6, 6, 6,
98 	7, 7, 7, 7, 7, 7, 7, 7,
99 };
100 
101 /* default_pcp_tid_map - Default PCP-TID mapping
102  *
103  * PCP     TID
104  * 000      0
105  * 001      1
106  * 010      2
107  * 011      3
108  * 100      4
109  * 101      5
110  * 110      6
111  * 111      7
112  */
113 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
114 	0, 1, 2, 3, 4, 5, 6, 7,
115 };
116 
117 uint8_t
118 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
119 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
120 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
121 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
122 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
123 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
124 #ifdef WLAN_TX_PKT_CAPTURE_ENH
125 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
126 #endif
127 };
128 
129 qdf_export_symbol(dp_cpu_ring_map);
130 
131 /**
132  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
133  * @soc: DP soc handle
134  * @ring_type: ring type
135  * @ring_num: ring_num
136  *
137  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
138  */
139 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
140 					    enum hal_ring_type ring_type,
141 					    int ring_num)
142 {
143 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
144 	uint8_t status = 0;
145 
146 	switch (ring_type) {
147 	case WBM2SW_RELEASE:
148 	case REO_DST:
149 	case RXDMA_BUF:
150 	case REO_EXCEPTION:
151 		status = ((nss_config) & (1 << ring_num));
152 		break;
153 	default:
154 		break;
155 	}
156 
157 	return status;
158 }
159 
160 #if !defined(DP_CON_MON)
161 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
162 {
163 	int i;
164 
165 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
166 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
167 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
168 	}
169 }
170 
171 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
172 
173 void dp_service_lmac_rings(void *arg)
174 {
175 	struct dp_soc *soc = (struct dp_soc *)arg;
176 	int ring = 0, i;
177 	struct dp_pdev *pdev = NULL;
178 	union dp_rx_desc_list_elem_t *desc_list = NULL;
179 	union dp_rx_desc_list_elem_t *tail = NULL;
180 
181 	/* Process LMAC interrupts */
182 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
183 		int mac_for_pdev = ring;
184 		struct dp_srng *rx_refill_buf_ring;
185 
186 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
187 		if (!pdev)
188 			continue;
189 
190 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
191 
192 		dp_monitor_process(soc, NULL, mac_for_pdev,
193 				   QCA_NAPI_BUDGET);
194 
195 		for (i = 0;
196 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
197 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
198 					     mac_for_pdev,
199 					     QCA_NAPI_BUDGET);
200 
201 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
202 						  mac_for_pdev))
203 			dp_rx_buffers_replenish(soc, mac_for_pdev,
204 						rx_refill_buf_ring,
205 						&soc->rx_desc_buf[mac_for_pdev],
206 						0, &desc_list, &tail, false);
207 	}
208 
209 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
210 }
211 
212 #endif
213 
214 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
215 /**
216  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
217  *				rx_near_full_grp1 mask
218  * @soc: Datapath SoC Handle
219  * @ring_num: REO ring number
220  *
221  * Return: 1 if the ring_num belongs to reo_nf_grp1,
222  *	   0, otherwise.
223  */
224 static inline int
225 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
226 {
227 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
228 }
229 
230 /**
231  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
232  *				rx_near_full_grp2 mask
233  * @soc: Datapath SoC Handle
234  * @ring_num: REO ring number
235  *
236  * Return: 1 if the ring_num belongs to reo_nf_grp2,
237  *	   0, otherwise.
238  */
239 static inline int
240 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
241 {
242 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
243 }
244 
245 /**
246  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
247  *				ring type and number
248  * @soc: Datapath SoC handle
249  * @ring_type: SRNG type
250  * @ring_num: ring num
251  *
252  * Return: near-full irq mask pointer
253  */
254 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
255 					enum hal_ring_type ring_type,
256 					int ring_num)
257 {
258 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
259 	uint8_t wbm2_sw_rx_rel_ring_id;
260 	uint8_t *nf_irq_mask = NULL;
261 
262 	switch (ring_type) {
263 	case WBM2SW_RELEASE:
264 		wbm2_sw_rx_rel_ring_id =
265 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
266 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
267 			nf_irq_mask = &soc->wlan_cfg_ctx->
268 					int_tx_ring_near_full_irq_mask[0];
269 		}
270 		break;
271 	case REO_DST:
272 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
273 			nf_irq_mask =
274 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
275 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
276 			nf_irq_mask =
277 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
278 		else
279 			qdf_assert(0);
280 		break;
281 	default:
282 		break;
283 	}
284 
285 	return nf_irq_mask;
286 }
287 
288 /**
289  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
290  * @soc: Datapath SoC handle
291  * @ring_params: srng params handle
292  * @msi2_addr: MSI2 addr to be set for the SRNG
293  * @msi2_data: MSI2 data to be set for the SRNG
294  *
295  * Return: None
296  */
297 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
298 				  struct hal_srng_params *ring_params,
299 				  qdf_dma_addr_t msi2_addr,
300 				  uint32_t msi2_data)
301 {
302 	ring_params->msi2_addr = msi2_addr;
303 	ring_params->msi2_data = msi2_data;
304 }
305 
306 /**
307  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
308  * @soc: Datapath SoC handle
309  * @ring_params: ring_params for SRNG
310  * @ring_type: SENG type
311  * @ring_num: ring number for the SRNG
312  * @nf_msi_grp_num: near full msi group number
313  *
314  * Return: None
315  */
316 void dp_srng_msi2_setup(struct dp_soc *soc,
317 			struct hal_srng_params *ring_params,
318 			int ring_type, int ring_num, int nf_msi_grp_num)
319 {
320 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
321 	int msi_data_count, ret;
322 
323 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
324 					  &msi_data_count, &msi_data_start,
325 					  &msi_irq_start);
326 	if (ret)
327 		return;
328 
329 	if (nf_msi_grp_num < 0) {
330 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
331 			     soc, ring_type, ring_num);
332 		ring_params->msi2_addr = 0;
333 		ring_params->msi2_data = 0;
334 		return;
335 	}
336 
337 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
338 					   msi_data_count)) {
339 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
340 			     soc, nf_msi_grp_num);
341 		QDF_ASSERT(0);
342 	}
343 
344 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
345 
346 	ring_params->nf_irq_support = 1;
347 	ring_params->msi2_addr = addr_low;
348 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
349 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
350 		+ msi_data_start;
351 	ring_params->flags |= HAL_SRNG_MSI_INTR;
352 }
353 
354 /* Percentage of ring entries considered as nearly full */
355 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
356 /* Percentage of ring entries considered as critically full */
357 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
358 /* Percentage of ring entries considered as safe threshold */
359 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
360 
361 /**
362  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
363  *			near full irq
364  * @soc: Datapath SoC handle
365  * @ring_params: ring params for SRNG
366  * @ring_type: ring type
367  */
368 void
369 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
370 					  struct hal_srng_params *ring_params,
371 					  int ring_type)
372 {
373 	if (ring_params->nf_irq_support) {
374 		ring_params->high_thresh = (ring_params->num_entries *
375 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
376 		ring_params->crit_thresh = (ring_params->num_entries *
377 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
378 		ring_params->safe_thresh = (ring_params->num_entries *
379 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
380 	}
381 }
382 
383 /**
384  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
385  *			structure from the ring params
386  * @soc: Datapath SoC handle
387  * @srng: SRNG handle
388  * @ring_params: ring params for a SRNG
389  *
390  * Return: None
391  */
392 static inline void
393 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
394 			  struct hal_srng_params *ring_params)
395 {
396 	srng->crit_thresh = ring_params->crit_thresh;
397 	srng->safe_thresh = ring_params->safe_thresh;
398 }
399 
400 #else
401 static inline void
402 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
403 			  struct hal_srng_params *ring_params)
404 {
405 }
406 #endif
407 
408 /**
409  * dp_get_num_msi_available()- API to get number of MSIs available
410  * @soc: DP soc Handle
411  * @interrupt_mode: Mode of interrupts
412  *
413  * Return: Number of MSIs available or 0 in case of integrated
414  */
415 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
416 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
417 {
418 	return 0;
419 }
420 #else
421 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
422 {
423 	int msi_data_count;
424 	int msi_data_start;
425 	int msi_irq_start;
426 	int ret;
427 
428 	if (interrupt_mode == DP_INTR_INTEGRATED) {
429 		return 0;
430 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
431 		   DP_INTR_POLL) {
432 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
433 						  &msi_data_count,
434 						  &msi_data_start,
435 						  &msi_irq_start);
436 		if (ret) {
437 			qdf_err("Unable to get DP MSI assignment %d",
438 				interrupt_mode);
439 			return -EINVAL;
440 		}
441 		return msi_data_count;
442 	}
443 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
444 	return -EINVAL;
445 }
446 #endif
447 
448 /**
449  * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
450  * update threshold value from wlan_cfg_ctx
451  * @soc: device handle
452  * @ring_params: per ring specific parameters
453  * @ring_type: Ring type
454  * @ring_num: Ring number for a given ring type
455  * @num_entries: number of entries to fill
456  *
457  * Fill the ring params with the pointer update threshold
458  * configuration parameters available in wlan_cfg_ctx
459  *
460  * Return: None
461  */
462 static void
463 dp_srng_configure_pointer_update_thresholds(
464 				struct dp_soc *soc,
465 				struct hal_srng_params *ring_params,
466 				int ring_type, int ring_num,
467 				int num_entries)
468 {
469 	if (ring_type == REO_DST) {
470 		ring_params->pointer_timer_threshold =
471 			wlan_cfg_get_pointer_timer_threshold_rx(
472 						soc->wlan_cfg_ctx);
473 		ring_params->pointer_num_threshold =
474 			wlan_cfg_get_pointer_num_threshold_rx(
475 						soc->wlan_cfg_ctx);
476 	}
477 }
478 
479 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
480 			    int ring_type, int ring_num, int mac_id,
481 			    uint32_t idx)
482 {
483 	bool idle_check;
484 
485 	hal_soc_handle_t hal_soc = soc->hal_soc;
486 	struct hal_srng_params ring_params;
487 
488 	if (srng->hal_srng) {
489 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
490 			    soc, ring_type, ring_num);
491 		return QDF_STATUS_SUCCESS;
492 	}
493 
494 	/* memset the srng ring to zero */
495 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
496 
497 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
498 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
499 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
500 
501 	ring_params.num_entries = srng->num_entries;
502 
503 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
504 		ring_type, ring_num,
505 		(void *)ring_params.ring_base_vaddr,
506 		(void *)ring_params.ring_base_paddr,
507 		ring_params.num_entries);
508 
509 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
510 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
511 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
512 				 ring_type, ring_num);
513 	} else {
514 		ring_params.msi_data = 0;
515 		ring_params.msi_addr = 0;
516 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
517 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
518 				 ring_type, ring_num);
519 	}
520 
521 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
522 					       ring_type, ring_num,
523 					       srng->num_entries);
524 
525 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
526 	dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
527 						    ring_type, ring_num,
528 						    srng->num_entries);
529 
530 	if (srng->cached)
531 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
532 
533 	idle_check = dp_check_umac_reset_in_progress(soc);
534 
535 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
536 					    mac_id, &ring_params, idle_check,
537 					    idx);
538 
539 	if (!srng->hal_srng) {
540 		dp_srng_free(soc, srng);
541 		return QDF_STATUS_E_FAILURE;
542 	}
543 
544 	return QDF_STATUS_SUCCESS;
545 }
546 
547 qdf_export_symbol(dp_srng_init_idx);
548 
549 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
550 /**
551  * dp_service_near_full_srngs() - Bottom half handler to process the near
552  *				full IRQ on a SRNG
553  * @dp_ctx: Datapath SoC handle
554  * @dp_budget: Number of SRNGs which can be processed in a single attempt
555  *		without rescheduling
556  * @cpu: cpu id
557  *
558  * Return: remaining budget/quota for the soc device
559  */
560 static
561 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
562 {
563 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
564 	struct dp_soc *soc = int_ctx->soc;
565 
566 	/*
567 	 * dp_service_near_full_srngs arch ops should be initialized always
568 	 * if the NEAR FULL IRQ feature is enabled.
569 	 */
570 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
571 							dp_budget);
572 }
573 #endif
574 
575 #ifndef QCA_HOST_MODE_WIFI_DISABLED
576 
577 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
578 {
579 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
580 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
581 	struct dp_soc *soc = int_ctx->soc;
582 	int ring = 0;
583 	int index;
584 	uint32_t work_done  = 0;
585 	int budget = dp_budget;
586 	uint32_t remaining_quota = dp_budget;
587 	uint8_t tx_mask = 0;
588 	uint8_t rx_mask = 0;
589 	uint8_t rx_err_mask = 0;
590 	uint8_t rx_wbm_rel_mask = 0;
591 	uint8_t reo_status_mask = 0;
592 
593 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
594 
595 	tx_mask = int_ctx->tx_ring_mask;
596 	rx_mask = int_ctx->rx_ring_mask;
597 	rx_err_mask = int_ctx->rx_err_ring_mask;
598 	rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
599 	reo_status_mask = int_ctx->reo_status_ring_mask;
600 
601 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x",
602 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
603 			 reo_status_mask,
604 			 int_ctx->rx_mon_ring_mask,
605 			 int_ctx->host2rxdma_ring_mask,
606 			 int_ctx->rxdma2host_ring_mask);
607 
608 	/* Process Tx completion interrupts first to return back buffers */
609 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
610 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
611 			continue;
612 		work_done = dp_tx_comp_handler(int_ctx,
613 					       soc,
614 					       soc->tx_comp_ring[index].hal_srng,
615 					       index, remaining_quota);
616 		if (work_done) {
617 			intr_stats->num_tx_ring_masks[index]++;
618 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
619 					 tx_mask, index, budget,
620 					 work_done);
621 		}
622 		budget -= work_done;
623 		if (budget <= 0)
624 			goto budget_done;
625 
626 		remaining_quota = budget;
627 	}
628 
629 	/* Process REO Exception ring interrupt */
630 	if (rx_err_mask) {
631 		work_done = dp_rx_err_process(int_ctx, soc,
632 					      soc->reo_exception_ring.hal_srng,
633 					      remaining_quota);
634 
635 		if (work_done) {
636 			intr_stats->num_rx_err_ring_masks++;
637 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
638 					 work_done, budget);
639 		}
640 
641 		budget -=  work_done;
642 		if (budget <= 0) {
643 			goto budget_done;
644 		}
645 		remaining_quota = budget;
646 	}
647 
648 	/* Process Rx WBM release ring interrupt */
649 	if (rx_wbm_rel_mask) {
650 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
651 						  soc->rx_rel_ring.hal_srng,
652 						  remaining_quota);
653 
654 		if (work_done) {
655 			intr_stats->num_rx_wbm_rel_ring_masks++;
656 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
657 					 work_done, budget);
658 		}
659 
660 		budget -=  work_done;
661 		if (budget <= 0) {
662 			goto budget_done;
663 		}
664 		remaining_quota = budget;
665 	}
666 
667 	/* Process Rx interrupts */
668 	if (rx_mask) {
669 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
670 			if (!(rx_mask & (1 << ring)))
671 				continue;
672 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
673 						  soc->reo_dest_ring[ring].hal_srng,
674 						  ring,
675 						  remaining_quota);
676 			if (work_done) {
677 				intr_stats->num_rx_ring_masks[ring]++;
678 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
679 						 rx_mask, ring,
680 						 work_done, budget);
681 				budget -=  work_done;
682 				if (budget <= 0)
683 					goto budget_done;
684 				remaining_quota = budget;
685 			}
686 		}
687 	}
688 
689 	if (reo_status_mask) {
690 		if (dp_reo_status_ring_handler(int_ctx, soc))
691 			int_ctx->intr_stats.num_reo_status_ring_masks++;
692 	}
693 
694 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
695 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
696 		if (work_done) {
697 			budget -=  work_done;
698 			if (budget <= 0)
699 				goto budget_done;
700 			remaining_quota = budget;
701 		}
702 	}
703 
704 	qdf_lro_flush(int_ctx->lro_ctx);
705 	intr_stats->num_masks++;
706 
707 budget_done:
708 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
709 
710 	dp_umac_reset_trigger_pre_reset_notify_cb(soc);
711 
712 	return dp_budget - budget;
713 }
714 
715 #else /* QCA_HOST_MODE_WIFI_DISABLED */
716 
717 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
718 {
719 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
720 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
721 	struct dp_soc *soc = int_ctx->soc;
722 	uint32_t remaining_quota = dp_budget;
723 	uint32_t work_done  = 0;
724 	int budget = dp_budget;
725 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
726 
727 	if (reo_status_mask) {
728 		if (dp_reo_status_ring_handler(int_ctx, soc))
729 			int_ctx->intr_stats.num_reo_status_ring_masks++;
730 	}
731 
732 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
733 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
734 		if (work_done) {
735 			budget -=  work_done;
736 			if (budget <= 0)
737 				goto budget_done;
738 			remaining_quota = budget;
739 		}
740 	}
741 
742 	qdf_lro_flush(int_ctx->lro_ctx);
743 	intr_stats->num_masks++;
744 
745 budget_done:
746 	return dp_budget - budget;
747 }
748 
749 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
750 
751 QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
752 {
753 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
754 	int i;
755 	int lmac_id = 0;
756 
757 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
758 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
759 	soc->intr_mode = DP_INTR_POLL;
760 
761 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
762 		soc->intr_ctx[i].dp_intr_id = i;
763 		soc->intr_ctx[i].tx_ring_mask =
764 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
765 		soc->intr_ctx[i].rx_ring_mask =
766 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
767 		soc->intr_ctx[i].rx_mon_ring_mask =
768 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
769 		soc->intr_ctx[i].rx_err_ring_mask =
770 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
771 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
772 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
773 		soc->intr_ctx[i].reo_status_ring_mask =
774 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
775 		soc->intr_ctx[i].rxdma2host_ring_mask =
776 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
777 		soc->intr_ctx[i].soc = soc;
778 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
779 
780 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
781 			hif_event_history_init(soc->hif_handle, i);
782 			soc->mon_intr_id_lmac_map[lmac_id] = i;
783 			lmac_id++;
784 		}
785 	}
786 
787 	qdf_timer_init(soc->osdev, &soc->int_timer,
788 		       dp_interrupt_timer, (void *)soc,
789 		       QDF_TIMER_TYPE_WAKE_APPS);
790 
791 	return QDF_STATUS_SUCCESS;
792 }
793 
794 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
795 /**
796  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
797  * @soc: DP soc handle
798  * @num_irq: IRQ number
799  * @irq_id_map: IRQ map
800  * @intr_id: interrupt context ID
801  *
802  * Return: 0 for success. nonzero for failure.
803  */
804 static inline int
805 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
806 				  int irq_id_map[], int intr_id)
807 {
808 	return hif_register_ext_group(soc->hif_handle,
809 				      num_irq, irq_id_map,
810 				      dp_service_near_full_srngs,
811 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
812 				      HIF_EXEC_NAPI_TYPE,
813 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
814 }
815 #else
816 static inline int
817 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
818 				  int *irq_id_map, int intr_id)
819 {
820 	return 0;
821 }
822 #endif
823 
824 #ifdef DP_CON_MON_MSI_SKIP_SET
825 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
826 {
827 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
828 		 QDF_GLOBAL_MONITOR_MODE &&
829 		 !wlan_cfg_get_local_pkt_capture(soc->wlan_cfg_ctx));
830 }
831 #else
832 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
833 {
834 	return false;
835 }
836 #endif
837 
838 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
839 {
840 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
841 	int i;
842 
843 	if (soc->intr_mode == DP_INTR_POLL) {
844 		qdf_timer_free(&soc->int_timer);
845 	} else {
846 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
847 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
848 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
849 	}
850 
851 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
852 		soc->intr_ctx[i].tx_ring_mask = 0;
853 		soc->intr_ctx[i].rx_ring_mask = 0;
854 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
855 		soc->intr_ctx[i].rx_err_ring_mask = 0;
856 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
857 		soc->intr_ctx[i].reo_status_ring_mask = 0;
858 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
859 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
860 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
861 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
862 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
863 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
864 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
865 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
866 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
867 
868 		hif_event_history_deinit(soc->hif_handle, i);
869 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
870 	}
871 
872 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
873 		    sizeof(soc->mon_intr_id_lmac_map),
874 		    DP_MON_INVALID_LMAC_ID);
875 }
876 
877 QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
878 {
879 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
880 
881 	int i = 0;
882 	int num_irq = 0;
883 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
884 	int lmac_id = 0;
885 	int napi_scale;
886 
887 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
888 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
889 
890 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
891 		int ret = 0;
892 
893 		/* Map of IRQ ids registered with one interrupt context */
894 		int irq_id_map[HIF_MAX_GRP_IRQ];
895 
896 		int tx_mask =
897 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
898 		int rx_mask =
899 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
900 		int rx_mon_mask =
901 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
902 		int tx_mon_ring_mask =
903 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
904 		int rx_err_ring_mask =
905 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
906 		int rx_wbm_rel_ring_mask =
907 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
908 		int reo_status_ring_mask =
909 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
910 		int rxdma2host_ring_mask =
911 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
912 		int host2rxdma_ring_mask =
913 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
914 		int host2rxdma_mon_ring_mask =
915 			wlan_cfg_get_host2rxdma_mon_ring_mask(
916 				soc->wlan_cfg_ctx, i);
917 		int rx_near_full_grp_1_mask =
918 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
919 							     i);
920 		int rx_near_full_grp_2_mask =
921 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
922 							     i);
923 		int tx_ring_near_full_mask =
924 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
925 							    i);
926 		int host2txmon_ring_mask =
927 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
928 		int umac_reset_intr_mask =
929 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
930 
931 		if (dp_skip_rx_mon_ring_mask_set(soc))
932 			rx_mon_mask = 0;
933 
934 		soc->intr_ctx[i].dp_intr_id = i;
935 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
936 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
937 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
938 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
939 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
940 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
941 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
942 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
943 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
944 			 host2rxdma_mon_ring_mask;
945 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
946 						rx_near_full_grp_1_mask;
947 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
948 						rx_near_full_grp_2_mask;
949 		soc->intr_ctx[i].tx_ring_near_full_mask =
950 						tx_ring_near_full_mask;
951 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
952 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
953 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
954 
955 		soc->intr_ctx[i].soc = soc;
956 
957 		num_irq = 0;
958 
959 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
960 					       &num_irq);
961 
962 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
963 		    tx_ring_near_full_mask) {
964 			dp_soc_near_full_interrupt_attach(soc, num_irq,
965 							  irq_id_map, i);
966 		} else {
967 			napi_scale = wlan_cfg_get_napi_scale_factor(
968 							    soc->wlan_cfg_ctx);
969 			if (!napi_scale)
970 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
971 
972 			ret = hif_register_ext_group(soc->hif_handle,
973 				num_irq, irq_id_map, dp_service_srngs_wrapper,
974 				&soc->intr_ctx[i], "dp_intr",
975 				HIF_EXEC_NAPI_TYPE, napi_scale);
976 		}
977 
978 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
979 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
980 
981 		if (ret) {
982 			dp_init_err("%pK: failed, ret = %d", soc, ret);
983 			dp_soc_interrupt_detach(txrx_soc);
984 			return QDF_STATUS_E_FAILURE;
985 		}
986 
987 		hif_event_history_init(soc->hif_handle, i);
988 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
989 
990 		if (rx_err_ring_mask)
991 			rx_err_ring_intr_ctxt_id = i;
992 
993 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
994 			soc->mon_intr_id_lmac_map[lmac_id] = i;
995 			lmac_id++;
996 		}
997 	}
998 
999 	hif_configure_ext_group_interrupts(soc->hif_handle);
1000 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
1001 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
1002 						  rx_err_ring_intr_ctxt_id, 0);
1003 
1004 	return QDF_STATUS_SUCCESS;
1005 }
1006 
1007 #define AVG_MAX_MPDUS_PER_TID 128
1008 #define AVG_TIDS_PER_CLIENT 2
1009 #define AVG_FLOWS_PER_TID 2
1010 #define AVG_MSDUS_PER_FLOW 128
1011 #define AVG_MSDUS_PER_MPDU 4
1012 
1013 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
1014 {
1015 	struct qdf_mem_multi_page_t *pages;
1016 
1017 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1018 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1019 	} else {
1020 		pages = &soc->link_desc_pages;
1021 	}
1022 
1023 	if (!pages) {
1024 		dp_err("can not get link desc pages");
1025 		QDF_ASSERT(0);
1026 		return;
1027 	}
1028 
1029 	if (pages->dma_pages) {
1030 		wlan_minidump_remove((void *)
1031 				     pages->dma_pages->page_v_addr_start,
1032 				     pages->num_pages * pages->page_size,
1033 				     soc->ctrl_psoc,
1034 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1035 				     "hw_link_desc_bank");
1036 		dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE,
1037 					     pages, 0, false);
1038 	}
1039 }
1040 
1041 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
1042 
1043 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
1044 {
1045 	hal_soc_handle_t hal_soc = soc->hal_soc;
1046 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1047 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1048 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1049 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
1050 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
1051 	uint32_t num_mpdu_links_per_queue_desc =
1052 		hal_num_mpdu_links_per_queue_desc(hal_soc);
1053 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1054 	uint32_t *total_link_descs, total_mem_size;
1055 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1056 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1057 	uint32_t num_entries;
1058 	struct qdf_mem_multi_page_t *pages;
1059 	struct dp_srng *dp_srng;
1060 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
1061 
1062 	/* Only Tx queue descriptors are allocated from common link descriptor
1063 	 * pool Rx queue descriptors are not included in this because (REO queue
1064 	 * extension descriptors) they are expected to be allocated contiguously
1065 	 * with REO queue descriptors
1066 	 */
1067 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1068 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1069 		/* dp_monitor_get_link_desc_pages returns NULL only
1070 		 * if monitor SOC is  NULL
1071 		 */
1072 		if (!pages) {
1073 			dp_err("can not get link desc pages");
1074 			QDF_ASSERT(0);
1075 			return QDF_STATUS_E_FAULT;
1076 		}
1077 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
1078 		num_entries = dp_srng->alloc_size /
1079 			hal_srng_get_entrysize(soc->hal_soc,
1080 					       RXDMA_MONITOR_DESC);
1081 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
1082 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
1083 			      MINIDUMP_STR_SIZE);
1084 	} else {
1085 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1086 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1087 
1088 		num_mpdu_queue_descs = num_mpdu_link_descs /
1089 			num_mpdu_links_per_queue_desc;
1090 
1091 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1092 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1093 			num_msdus_per_link_desc;
1094 
1095 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1096 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1097 
1098 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1099 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1100 
1101 		pages = &soc->link_desc_pages;
1102 		total_link_descs = &soc->total_link_descs;
1103 		qdf_str_lcopy(minidump_str, "link_desc_bank",
1104 			      MINIDUMP_STR_SIZE);
1105 	}
1106 
1107 	/* If link descriptor banks are allocated, return from here */
1108 	if (pages->num_pages)
1109 		return QDF_STATUS_SUCCESS;
1110 
1111 	/* Round up to power of 2 */
1112 	*total_link_descs = 1;
1113 	while (*total_link_descs < num_entries)
1114 		*total_link_descs <<= 1;
1115 
1116 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
1117 		     soc, *total_link_descs, link_desc_size);
1118 	total_mem_size =  *total_link_descs * link_desc_size;
1119 	total_mem_size += link_desc_align;
1120 
1121 	dp_init_info("%pK: total_mem_size: %d",
1122 		     soc, total_mem_size);
1123 
1124 	dp_set_max_page_size(pages, max_alloc_size);
1125 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE,
1126 				      pages,
1127 				      link_desc_size,
1128 				      *total_link_descs,
1129 				      0, false);
1130 	if (!pages->num_pages) {
1131 		dp_err("Multi page alloc fail for hw link desc pool");
1132 		return QDF_STATUS_E_FAULT;
1133 	}
1134 
1135 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
1136 			  pages->num_pages * pages->page_size,
1137 			  soc->ctrl_psoc,
1138 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1139 			  "hw_link_desc_bank");
1140 
1141 	return QDF_STATUS_SUCCESS;
1142 }
1143 
1144 void dp_hw_link_desc_ring_free(struct dp_soc *soc)
1145 {
1146 	uint32_t i;
1147 	uint32_t size = soc->wbm_idle_scatter_buf_size;
1148 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
1149 	qdf_dma_addr_t paddr;
1150 
1151 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
1152 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1153 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
1154 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
1155 			if (vaddr) {
1156 				qdf_mem_free_consistent(soc->osdev,
1157 							soc->osdev->dev,
1158 							size,
1159 							vaddr,
1160 							paddr,
1161 							0);
1162 				vaddr = NULL;
1163 			}
1164 		}
1165 	} else {
1166 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
1167 				     soc->wbm_idle_link_ring.alloc_size,
1168 				     soc->ctrl_psoc,
1169 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1170 				     "wbm_idle_link_ring");
1171 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
1172 	}
1173 }
1174 
1175 QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
1176 {
1177 	uint32_t entry_size, i;
1178 	uint32_t total_mem_size;
1179 	qdf_dma_addr_t *baseaddr = NULL;
1180 	struct dp_srng *dp_srng;
1181 	uint32_t ring_type;
1182 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1183 	uint32_t tlds;
1184 
1185 	ring_type = WBM_IDLE_LINK;
1186 	dp_srng = &soc->wbm_idle_link_ring;
1187 	tlds = soc->total_link_descs;
1188 
1189 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
1190 	total_mem_size = entry_size * tlds;
1191 
1192 	if (total_mem_size <= max_alloc_size) {
1193 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
1194 			dp_init_err("%pK: Link desc idle ring setup failed",
1195 				    soc);
1196 			goto fail;
1197 		}
1198 
1199 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
1200 				  soc->wbm_idle_link_ring.alloc_size,
1201 				  soc->ctrl_psoc,
1202 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1203 				  "wbm_idle_link_ring");
1204 	} else {
1205 		uint32_t num_scatter_bufs;
1206 		uint32_t buf_size = 0;
1207 
1208 		soc->wbm_idle_scatter_buf_size =
1209 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1210 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1211 					soc->hal_soc, total_mem_size,
1212 					soc->wbm_idle_scatter_buf_size);
1213 
1214 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1215 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1216 				  FL("scatter bufs size out of bounds"));
1217 			goto fail;
1218 		}
1219 
1220 		for (i = 0; i < num_scatter_bufs; i++) {
1221 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
1222 			buf_size = soc->wbm_idle_scatter_buf_size;
1223 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1224 				qdf_mem_alloc_consistent(soc->osdev,
1225 							 soc->osdev->dev,
1226 							 buf_size,
1227 							 baseaddr);
1228 
1229 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1230 				QDF_TRACE(QDF_MODULE_ID_DP,
1231 					  QDF_TRACE_LEVEL_ERROR,
1232 					  FL("Scatter lst memory alloc fail"));
1233 				goto fail;
1234 			}
1235 		}
1236 		soc->num_scatter_bufs = num_scatter_bufs;
1237 	}
1238 	return QDF_STATUS_SUCCESS;
1239 
1240 fail:
1241 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1242 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
1243 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
1244 
1245 		if (vaddr) {
1246 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1247 						soc->wbm_idle_scatter_buf_size,
1248 						vaddr,
1249 						paddr, 0);
1250 			vaddr = NULL;
1251 		}
1252 	}
1253 	return QDF_STATUS_E_NOMEM;
1254 }
1255 
1256 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
1257 
1258 QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
1259 {
1260 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
1261 
1262 	if (dp_srng->base_vaddr_unaligned) {
1263 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
1264 			return QDF_STATUS_E_FAILURE;
1265 	}
1266 	return QDF_STATUS_SUCCESS;
1267 }
1268 
1269 void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
1270 {
1271 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
1272 }
1273 
1274 #ifdef IPA_OFFLOAD
1275 #define USE_1_IPA_RX_REO_RING 1
1276 #define USE_2_IPA_RX_REO_RINGS 2
1277 #define REO_DST_RING_SIZE_QCA6290 1023
1278 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
1279 #define REO_DST_RING_SIZE_QCA8074 1023
1280 #define REO_DST_RING_SIZE_QCN9000 2048
1281 #else
1282 #define REO_DST_RING_SIZE_QCA8074 8
1283 #define REO_DST_RING_SIZE_QCN9000 8
1284 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
1285 
1286 #ifdef IPA_WDI3_TX_TWO_PIPES
1287 #ifdef DP_MEMORY_OPT
1288 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1289 {
1290 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1291 }
1292 
1293 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1294 {
1295 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1296 }
1297 
1298 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1299 {
1300 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1301 }
1302 
1303 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1304 {
1305 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1306 }
1307 
1308 #else /* !DP_MEMORY_OPT */
1309 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1310 {
1311 	return 0;
1312 }
1313 
1314 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1315 {
1316 }
1317 
1318 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1319 {
1320 	return 0;
1321 }
1322 
1323 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1324 {
1325 }
1326 #endif /* DP_MEMORY_OPT */
1327 
1328 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1329 {
1330 	hal_tx_init_data_ring(soc->hal_soc,
1331 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
1332 }
1333 
1334 #else /* !IPA_WDI3_TX_TWO_PIPES */
1335 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1336 {
1337 	return 0;
1338 }
1339 
1340 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1341 {
1342 }
1343 
1344 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1345 {
1346 	return 0;
1347 }
1348 
1349 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1350 {
1351 }
1352 
1353 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1354 {
1355 }
1356 
1357 #endif /* IPA_WDI3_TX_TWO_PIPES */
1358 
1359 #else
1360 
1361 #define REO_DST_RING_SIZE_QCA6290 1024
1362 
1363 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1364 {
1365 	return 0;
1366 }
1367 
1368 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1369 {
1370 }
1371 
1372 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1373 {
1374 	return 0;
1375 }
1376 
1377 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1378 {
1379 }
1380 
1381 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1382 {
1383 }
1384 
1385 #endif /* IPA_OFFLOAD */
1386 
1387 /**
1388  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
1389  * @soc: Datapath soc handler
1390  *
1391  * This api resets the default cpu ring map
1392  */
1393 void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1394 {
1395 	uint8_t i;
1396 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1397 
1398 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1399 		switch (nss_config) {
1400 		case dp_nss_cfg_first_radio:
1401 			/*
1402 			 * Setting Tx ring map for one nss offloaded radio
1403 			 */
1404 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1405 			break;
1406 
1407 		case dp_nss_cfg_second_radio:
1408 			/*
1409 			 * Setting Tx ring for two nss offloaded radios
1410 			 */
1411 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1412 			break;
1413 
1414 		case dp_nss_cfg_dbdc:
1415 			/*
1416 			 * Setting Tx ring map for 2 nss offloaded radios
1417 			 */
1418 			soc->tx_ring_map[i] =
1419 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
1420 			break;
1421 
1422 		case dp_nss_cfg_dbtc:
1423 			/*
1424 			 * Setting Tx ring map for 3 nss offloaded radios
1425 			 */
1426 			soc->tx_ring_map[i] =
1427 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
1428 			break;
1429 
1430 		default:
1431 			dp_err("tx_ring_map failed due to invalid nss cfg");
1432 			break;
1433 		}
1434 	}
1435 }
1436 
1437 /**
1438  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
1439  *					  unused WMAC hw rings
1440  * @soc: DP Soc handle
1441  * @mac_num: wmac num
1442  *
1443  * Return: Return void
1444  */
1445 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
1446 						int mac_num)
1447 {
1448 	uint8_t *grp_mask = NULL;
1449 	int group_number;
1450 
1451 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1452 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1453 	if (group_number < 0)
1454 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_BUF, mac_num %d",
1455 			      soc, mac_num);
1456 	else
1457 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1458 						  group_number, 0x0);
1459 
1460 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1461 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1462 	if (group_number < 0)
1463 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_MONITOR_DST, mac_num %d",
1464 			      soc, mac_num);
1465 	else
1466 		wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
1467 					      group_number, 0x0);
1468 
1469 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1470 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1471 	if (group_number < 0)
1472 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_DST, mac_num %d",
1473 			      soc, mac_num);
1474 	else
1475 		wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
1476 						  group_number, 0x0);
1477 
1478 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1479 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1480 	if (group_number < 0)
1481 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_MONITOR_BUF, mac_num %d",
1482 			      soc, mac_num);
1483 	else
1484 		wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
1485 						      group_number, 0x0);
1486 }
1487 
1488 #ifdef IPA_OFFLOAD
1489 #ifdef IPA_WDI3_VLAN_SUPPORT
1490 /**
1491  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
1492  *                                     ring for vlan tagged traffic
1493  * @soc: DP Soc handle
1494  *
1495  * Return: Return void
1496  */
1497 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1498 {
1499 	uint8_t *grp_mask = NULL;
1500 	int group_number, mask;
1501 
1502 	if (!wlan_ipa_is_vlan_enabled())
1503 		return;
1504 
1505 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1506 
1507 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
1508 	if (group_number < 0) {
1509 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1510 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
1511 		return;
1512 	}
1513 
1514 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1515 
1516 	/* reset the interrupt mask for offloaded ring */
1517 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
1518 
1519 	/*
1520 	 * set the interrupt mask to zero for rx offloaded radio.
1521 	 */
1522 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1523 }
1524 #else
1525 inline
1526 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1527 { }
1528 #endif /* IPA_WDI3_VLAN_SUPPORT */
1529 #else
1530 inline
1531 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1532 { }
1533 #endif /* IPA_OFFLOAD */
1534 
1535 /**
1536  * dp_soc_reset_intr_mask() - reset interrupt mask
1537  * @soc: DP Soc handle
1538  *
1539  * Return: Return void
1540  */
1541 void dp_soc_reset_intr_mask(struct dp_soc *soc)
1542 {
1543 	uint8_t j;
1544 	uint8_t *grp_mask = NULL;
1545 	int group_number, mask, num_ring;
1546 
1547 	/* number of tx ring */
1548 	num_ring = soc->num_tcl_data_rings;
1549 
1550 	/*
1551 	 * group mask for tx completion  ring.
1552 	 */
1553 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1554 
1555 	/* loop and reset the mask for only offloaded ring */
1556 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
1557 		/*
1558 		 * Group number corresponding to tx offloaded ring.
1559 		 */
1560 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1561 		if (group_number < 0) {
1562 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1563 				      soc, WBM2SW_RELEASE, j);
1564 			continue;
1565 		}
1566 
1567 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1568 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
1569 		    (!mask)) {
1570 			continue;
1571 		}
1572 
1573 		/* reset the tx mask for offloaded ring */
1574 		mask &= (~(1 << j));
1575 
1576 		/*
1577 		 * reset the interrupt mask for offloaded ring.
1578 		 */
1579 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1580 	}
1581 
1582 	/* number of rx rings */
1583 	num_ring = soc->num_reo_dest_rings;
1584 
1585 	/*
1586 	 * group mask for reo destination ring.
1587 	 */
1588 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1589 
1590 	/* loop and reset the mask for only offloaded ring */
1591 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
1592 		/*
1593 		 * Group number corresponding to rx offloaded ring.
1594 		 */
1595 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1596 		if (group_number < 0) {
1597 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1598 				      soc, REO_DST, j);
1599 			continue;
1600 		}
1601 
1602 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1603 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
1604 		    (!mask)) {
1605 			continue;
1606 		}
1607 
1608 		/* reset the interrupt mask for offloaded ring */
1609 		mask &= (~(1 << j));
1610 
1611 		/*
1612 		 * set the interrupt mask to zero for rx offloaded radio.
1613 		 */
1614 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1615 	}
1616 
1617 	/*
1618 	 * group mask for Rx buffer refill ring
1619 	 */
1620 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1621 
1622 	/* loop and reset the mask for only offloaded ring */
1623 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1624 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1625 
1626 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1627 			continue;
1628 		}
1629 
1630 		/*
1631 		 * Group number corresponding to rx offloaded ring.
1632 		 */
1633 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
1634 		if (group_number < 0) {
1635 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1636 				      soc, REO_DST, lmac_id);
1637 			continue;
1638 		}
1639 
1640 		/* set the interrupt mask for offloaded ring */
1641 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1642 							  group_number);
1643 		mask &= (~(1 << lmac_id));
1644 
1645 		/*
1646 		 * set the interrupt mask to zero for rx offloaded radio.
1647 		 */
1648 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1649 						  group_number, mask);
1650 	}
1651 
1652 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1653 
1654 	for (j = 0; j < num_ring; j++) {
1655 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
1656 			continue;
1657 		}
1658 
1659 		/*
1660 		 * Group number corresponding to rx err ring.
1661 		 */
1662 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1663 		if (group_number < 0) {
1664 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1665 				      soc, REO_EXCEPTION, j);
1666 			continue;
1667 		}
1668 
1669 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
1670 					      group_number, 0);
1671 	}
1672 }
1673 
1674 #ifdef IPA_OFFLOAD
1675 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
1676 			 uint32_t *remap1, uint32_t *remap2)
1677 {
1678 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
1679 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
1680 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
1681 
1682 	switch (soc->arch_id) {
1683 	case CDP_ARCH_TYPE_BE:
1684 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1685 					      soc->num_reo_dest_rings -
1686 					      USE_2_IPA_RX_REO_RINGS, remap1,
1687 					      remap2);
1688 		break;
1689 
1690 	case CDP_ARCH_TYPE_LI:
1691 		if (wlan_ipa_is_vlan_enabled()) {
1692 			hal_compute_reo_remap_ix2_ix3(
1693 					soc->hal_soc, ring,
1694 					soc->num_reo_dest_rings -
1695 					USE_2_IPA_RX_REO_RINGS, remap1,
1696 					remap2);
1697 
1698 		} else {
1699 			hal_compute_reo_remap_ix2_ix3(
1700 					soc->hal_soc, ring,
1701 					soc->num_reo_dest_rings -
1702 					USE_1_IPA_RX_REO_RING, remap1,
1703 					remap2);
1704 		}
1705 
1706 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
1707 		break;
1708 	default:
1709 		dp_err("unknown arch_id 0x%x", soc->arch_id);
1710 		QDF_BUG(0);
1711 	}
1712 
1713 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
1714 
1715 	return true;
1716 }
1717 
1718 #ifdef IPA_WDI3_TX_TWO_PIPES
1719 static bool dp_ipa_is_alt_tx_ring(int index)
1720 {
1721 	return index == IPA_TX_ALT_RING_IDX;
1722 }
1723 
1724 static bool dp_ipa_is_alt_tx_comp_ring(int index)
1725 {
1726 	return index == IPA_TX_ALT_COMP_RING_IDX;
1727 }
1728 #else /* !IPA_WDI3_TX_TWO_PIPES */
1729 static bool dp_ipa_is_alt_tx_ring(int index)
1730 {
1731 	return false;
1732 }
1733 
1734 static bool dp_ipa_is_alt_tx_comp_ring(int index)
1735 {
1736 	return false;
1737 }
1738 #endif /* IPA_WDI3_TX_TWO_PIPES */
1739 
1740 /**
1741  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
1742  *
1743  * @tx_ring_num: Tx ring number
1744  * @tx_ipa_ring_sz: Return param only updated for IPA.
1745  * @soc_cfg_ctx: dp soc cfg context
1746  *
1747  * Return: None
1748  */
1749 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
1750 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1751 {
1752 	if (!soc_cfg_ctx->ipa_enabled)
1753 		return;
1754 
1755 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
1756 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
1757 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
1758 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
1759 }
1760 
1761 /**
1762  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
1763  *
1764  * @tx_comp_ring_num: Tx comp ring number
1765  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
1766  * @soc_cfg_ctx: dp soc cfg context
1767  *
1768  * Return: None
1769  */
1770 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
1771 					 int *tx_comp_ipa_ring_sz,
1772 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1773 {
1774 	if (!soc_cfg_ctx->ipa_enabled)
1775 		return;
1776 
1777 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
1778 		*tx_comp_ipa_ring_sz =
1779 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
1780 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
1781 		*tx_comp_ipa_ring_sz =
1782 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
1783 }
1784 #else
1785 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
1786 {
1787 	uint8_t num = 0;
1788 
1789 	switch (value) {
1790 	/* should we have all the different possible ring configs */
1791 	case 0xFF:
1792 		num = 8;
1793 		ring[0] = REO_REMAP_SW1;
1794 		ring[1] = REO_REMAP_SW2;
1795 		ring[2] = REO_REMAP_SW3;
1796 		ring[3] = REO_REMAP_SW4;
1797 		ring[4] = REO_REMAP_SW5;
1798 		ring[5] = REO_REMAP_SW6;
1799 		ring[6] = REO_REMAP_SW7;
1800 		ring[7] = REO_REMAP_SW8;
1801 		break;
1802 
1803 	case 0x3F:
1804 		num = 6;
1805 		ring[0] = REO_REMAP_SW1;
1806 		ring[1] = REO_REMAP_SW2;
1807 		ring[2] = REO_REMAP_SW3;
1808 		ring[3] = REO_REMAP_SW4;
1809 		ring[4] = REO_REMAP_SW5;
1810 		ring[5] = REO_REMAP_SW6;
1811 		break;
1812 
1813 	case 0xF:
1814 		num = 4;
1815 		ring[0] = REO_REMAP_SW1;
1816 		ring[1] = REO_REMAP_SW2;
1817 		ring[2] = REO_REMAP_SW3;
1818 		ring[3] = REO_REMAP_SW4;
1819 		break;
1820 	case 0xE:
1821 		num = 3;
1822 		ring[0] = REO_REMAP_SW2;
1823 		ring[1] = REO_REMAP_SW3;
1824 		ring[2] = REO_REMAP_SW4;
1825 		break;
1826 	case 0xD:
1827 		num = 3;
1828 		ring[0] = REO_REMAP_SW1;
1829 		ring[1] = REO_REMAP_SW3;
1830 		ring[2] = REO_REMAP_SW4;
1831 		break;
1832 	case 0xC:
1833 		num = 2;
1834 		ring[0] = REO_REMAP_SW3;
1835 		ring[1] = REO_REMAP_SW4;
1836 		break;
1837 	case 0xB:
1838 		num = 3;
1839 		ring[0] = REO_REMAP_SW1;
1840 		ring[1] = REO_REMAP_SW2;
1841 		ring[2] = REO_REMAP_SW4;
1842 		break;
1843 	case 0xA:
1844 		num = 2;
1845 		ring[0] = REO_REMAP_SW2;
1846 		ring[1] = REO_REMAP_SW4;
1847 		break;
1848 	case 0x9:
1849 		num = 2;
1850 		ring[0] = REO_REMAP_SW1;
1851 		ring[1] = REO_REMAP_SW4;
1852 		break;
1853 	case 0x8:
1854 		num = 1;
1855 		ring[0] = REO_REMAP_SW4;
1856 		break;
1857 	case 0x7:
1858 		num = 3;
1859 		ring[0] = REO_REMAP_SW1;
1860 		ring[1] = REO_REMAP_SW2;
1861 		ring[2] = REO_REMAP_SW3;
1862 		break;
1863 	case 0x6:
1864 		num = 2;
1865 		ring[0] = REO_REMAP_SW2;
1866 		ring[1] = REO_REMAP_SW3;
1867 		break;
1868 	case 0x5:
1869 		num = 2;
1870 		ring[0] = REO_REMAP_SW1;
1871 		ring[1] = REO_REMAP_SW3;
1872 		break;
1873 	case 0x4:
1874 		num = 1;
1875 		ring[0] = REO_REMAP_SW3;
1876 		break;
1877 	case 0x3:
1878 		num = 2;
1879 		ring[0] = REO_REMAP_SW1;
1880 		ring[1] = REO_REMAP_SW2;
1881 		break;
1882 	case 0x2:
1883 		num = 1;
1884 		ring[0] = REO_REMAP_SW2;
1885 		break;
1886 	case 0x1:
1887 		num = 1;
1888 		ring[0] = REO_REMAP_SW1;
1889 		break;
1890 	default:
1891 		dp_err("unknown reo ring map 0x%x", value);
1892 		QDF_BUG(0);
1893 	}
1894 	return num;
1895 }
1896 
1897 bool dp_reo_remap_config(struct dp_soc *soc,
1898 			 uint32_t *remap0,
1899 			 uint32_t *remap1,
1900 			 uint32_t *remap2)
1901 {
1902 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1903 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
1904 	uint8_t num;
1905 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
1906 	uint32_t value;
1907 
1908 	switch (offload_radio) {
1909 	case dp_nss_cfg_default:
1910 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
1911 		num = dp_reo_ring_selection(value, ring);
1912 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1913 					      num, remap1, remap2);
1914 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
1915 
1916 		break;
1917 	case dp_nss_cfg_first_radio:
1918 		value = reo_config & 0xE;
1919 		num = dp_reo_ring_selection(value, ring);
1920 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1921 					      num, remap1, remap2);
1922 
1923 		break;
1924 	case dp_nss_cfg_second_radio:
1925 		value = reo_config & 0xD;
1926 		num = dp_reo_ring_selection(value, ring);
1927 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1928 					      num, remap1, remap2);
1929 
1930 		break;
1931 	case dp_nss_cfg_dbdc:
1932 	case dp_nss_cfg_dbtc:
1933 		/* return false if both or all are offloaded to NSS */
1934 		return false;
1935 	}
1936 
1937 	dp_debug("remap1 %x remap2 %x offload_radio %u",
1938 		 *remap1, *remap2, offload_radio);
1939 	return true;
1940 }
1941 
1942 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
1943 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1944 {
1945 }
1946 
1947 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
1948 					 int *tx_comp_ipa_ring_sz,
1949 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1950 {
1951 }
1952 #endif /* IPA_OFFLOAD */
1953 
1954 /**
1955  * dp_reo_frag_dst_set() - configure reo register to set the
1956  *                        fragment destination ring
1957  * @soc: Datapath soc
1958  * @frag_dst_ring: output parameter to set fragment destination ring
1959  *
1960  * Based on offload_radio below fragment destination rings is selected
1961  * 0 - TCL
1962  * 1 - SW1
1963  * 2 - SW2
1964  * 3 - SW3
1965  * 4 - SW4
1966  * 5 - Release
1967  * 6 - FW
1968  * 7 - alternate select
1969  *
1970  * Return: void
1971  */
1972 void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
1973 {
1974 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1975 
1976 	switch (offload_radio) {
1977 	case dp_nss_cfg_default:
1978 		*frag_dst_ring = REO_REMAP_TCL;
1979 		break;
1980 	case dp_nss_cfg_first_radio:
1981 		/*
1982 		 * This configuration is valid for single band radio which
1983 		 * is also NSS offload.
1984 		 */
1985 	case dp_nss_cfg_dbdc:
1986 	case dp_nss_cfg_dbtc:
1987 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
1988 		break;
1989 	default:
1990 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
1991 		break;
1992 	}
1993 }
1994 
1995 #ifdef WLAN_FEATURE_STATS_EXT
1996 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
1997 {
1998 	qdf_event_create(&soc->rx_hw_stats_event);
1999 }
2000 #else
2001 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2002 {
2003 }
2004 #endif
2005 
2006 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
2007 {
2008 	int tcl_ring_num, wbm_ring_num;
2009 
2010 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
2011 						index,
2012 						&tcl_ring_num,
2013 						&wbm_ring_num);
2014 
2015 	if (tcl_ring_num == -1) {
2016 		dp_err("incorrect tcl ring num for index %u", index);
2017 		return;
2018 	}
2019 
2020 	dp_ssr_dump_srng_unregister("tcl_data_ring", index);
2021 	dp_ssr_dump_srng_unregister("tx_comp_ring", index);
2022 
2023 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
2024 			     soc->tcl_data_ring[index].alloc_size,
2025 			     soc->ctrl_psoc,
2026 			     WLAN_MD_DP_SRNG_TCL_DATA,
2027 			     "tcl_data_ring");
2028 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
2029 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
2030 		       tcl_ring_num);
2031 
2032 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
2033 		return;
2034 
2035 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
2036 			     soc->tx_comp_ring[index].alloc_size,
2037 			     soc->ctrl_psoc,
2038 			     WLAN_MD_DP_SRNG_TX_COMP,
2039 			     "tcl_comp_ring");
2040 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2041 		       wbm_ring_num);
2042 }
2043 
2044 /**
2045  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
2046  * ring pair
2047  * @soc: DP soc pointer
2048  * @index: index of soc->tcl_data or soc->tx_comp to initialize
2049  *
2050  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
2051  */
2052 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
2053 						uint8_t index)
2054 {
2055 	int tcl_ring_num, wbm_ring_num;
2056 	uint8_t bm_id;
2057 
2058 	if (index >= MAX_TCL_DATA_RINGS) {
2059 		dp_err("unexpected index!");
2060 		QDF_BUG(0);
2061 		goto fail1;
2062 	}
2063 
2064 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
2065 						index,
2066 						&tcl_ring_num,
2067 						&wbm_ring_num);
2068 
2069 	if (tcl_ring_num == -1) {
2070 		dp_err("incorrect tcl ring num for index %u", index);
2071 		goto fail1;
2072 	}
2073 
2074 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
2075 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
2076 			 tcl_ring_num, 0)) {
2077 		dp_err("dp_srng_init failed for tcl_data_ring");
2078 		goto fail1;
2079 	}
2080 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
2081 			  soc->tcl_data_ring[index].alloc_size,
2082 			  soc->ctrl_psoc,
2083 			  WLAN_MD_DP_SRNG_TCL_DATA,
2084 			  "tcl_data_ring");
2085 
2086 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
2087 		goto set_rbm;
2088 
2089 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2090 			 wbm_ring_num, 0)) {
2091 		dp_err("dp_srng_init failed for tx_comp_ring");
2092 		goto fail1;
2093 	}
2094 
2095 	dp_ssr_dump_srng_register("tcl_data_ring",
2096 				  &soc->tcl_data_ring[index], index);
2097 	dp_ssr_dump_srng_register("tx_comp_ring",
2098 				  &soc->tx_comp_ring[index], index);
2099 
2100 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
2101 			  soc->tx_comp_ring[index].alloc_size,
2102 			  soc->ctrl_psoc,
2103 			  WLAN_MD_DP_SRNG_TX_COMP,
2104 			  "tcl_comp_ring");
2105 set_rbm:
2106 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
2107 
2108 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
2109 
2110 	return QDF_STATUS_SUCCESS;
2111 
2112 fail1:
2113 	return QDF_STATUS_E_FAILURE;
2114 }
2115 
2116 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
2117 {
2118 	dp_debug("index %u", index);
2119 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
2120 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
2121 }
2122 
2123 /**
2124  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
2125  * ring pair for the given "index"
2126  * @soc: DP soc pointer
2127  * @index: index of soc->tcl_data or soc->tx_comp to initialize
2128  *
2129  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
2130  */
2131 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
2132 						 uint8_t index)
2133 {
2134 	int tx_ring_size;
2135 	int tx_comp_ring_size;
2136 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
2137 	int cached = 0;
2138 
2139 	if (index >= MAX_TCL_DATA_RINGS) {
2140 		dp_err("unexpected index!");
2141 		QDF_BUG(0);
2142 		goto fail1;
2143 	}
2144 
2145 	dp_debug("index %u", index);
2146 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
2147 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
2148 
2149 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
2150 			  tx_ring_size, cached)) {
2151 		dp_err("dp_srng_alloc failed for tcl_data_ring");
2152 		goto fail1;
2153 	}
2154 
2155 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2156 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
2157 	/* Enable cached TCL desc if NSS offload is disabled */
2158 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2159 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
2160 
2161 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
2162 	    INVALID_WBM_RING_NUM)
2163 		return QDF_STATUS_SUCCESS;
2164 
2165 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2166 			  tx_comp_ring_size, cached)) {
2167 		dp_err("dp_srng_alloc failed for tx_comp_ring");
2168 		goto fail1;
2169 	}
2170 
2171 	return QDF_STATUS_SUCCESS;
2172 
2173 fail1:
2174 	return QDF_STATUS_E_FAILURE;
2175 }
2176 
2177 /**
2178  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
2179  * @pdev: DP_PDEV handle
2180  *
2181  * Return: void
2182  */
2183 void
2184 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2185 {
2186 	uint8_t map_id;
2187 	struct dp_soc *soc = pdev->soc;
2188 
2189 	if (!soc)
2190 		return;
2191 
2192 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2193 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2194 			     default_dscp_tid_map,
2195 			     sizeof(default_dscp_tid_map));
2196 	}
2197 
2198 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2199 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2200 					default_dscp_tid_map,
2201 					map_id);
2202 	}
2203 }
2204 
2205 /**
2206  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
2207  * @pdev: DP_PDEV handle
2208  *
2209  * Return: void
2210  */
2211 void
2212 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
2213 {
2214 	struct dp_soc *soc = pdev->soc;
2215 
2216 	if (!soc)
2217 		return;
2218 
2219 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
2220 		     sizeof(default_pcp_tid_map));
2221 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
2222 }
2223 
2224 #ifndef DP_UMAC_HW_RESET_SUPPORT
2225 static inline
2226 #endif
2227 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2228 {
2229 	struct reo_desc_list_node *desc;
2230 	struct dp_rx_tid *rx_tid;
2231 
2232 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2233 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2234 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2235 		rx_tid = &desc->rx_tid;
2236 		qdf_mem_unmap_nbytes_single(soc->osdev,
2237 			rx_tid->hw_qdesc_paddr,
2238 			QDF_DMA_BIDIRECTIONAL,
2239 			rx_tid->hw_qdesc_alloc_size);
2240 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2241 		qdf_mem_free(desc);
2242 	}
2243 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2244 	qdf_list_destroy(&soc->reo_desc_freelist);
2245 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2246 }
2247 
2248 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2249 /**
2250  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
2251  *                                          for deferred reo desc list
2252  * @soc: Datapath soc handle
2253  *
2254  * Return: void
2255  */
2256 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
2257 {
2258 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
2259 	qdf_list_create(&soc->reo_desc_deferred_freelist,
2260 			REO_DESC_DEFERRED_FREELIST_SIZE);
2261 	soc->reo_desc_deferred_freelist_init = true;
2262 }
2263 
2264 /**
2265  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
2266  *                                           free the leftover REO QDESCs
2267  * @soc: Datapath soc handle
2268  *
2269  * Return: void
2270  */
2271 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
2272 {
2273 	struct reo_desc_deferred_freelist_node *desc;
2274 
2275 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
2276 	soc->reo_desc_deferred_freelist_init = false;
2277 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
2278 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2279 		qdf_mem_unmap_nbytes_single(soc->osdev,
2280 					    desc->hw_qdesc_paddr,
2281 					    QDF_DMA_BIDIRECTIONAL,
2282 					    desc->hw_qdesc_alloc_size);
2283 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
2284 		qdf_mem_free(desc);
2285 	}
2286 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
2287 
2288 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
2289 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
2290 }
2291 #else
2292 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
2293 {
2294 }
2295 
2296 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
2297 {
2298 }
2299 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
2300 
2301 /**
2302  * dp_soc_reset_txrx_ring_map() - reset tx ring map
2303  * @soc: DP SOC handle
2304  *
2305  */
2306 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
2307 {
2308 	uint32_t i;
2309 
2310 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
2311 		soc->tx_ring_map[i] = 0;
2312 }
2313 
2314 /**
2315  * dp_soc_deinit() - Deinitialize txrx SOC
2316  * @txrx_soc: Opaque DP SOC handle
2317  *
2318  * Return: None
2319  */
2320 void dp_soc_deinit(void *txrx_soc)
2321 {
2322 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2323 	struct htt_soc *htt_soc = soc->htt_handle;
2324 
2325 	dp_monitor_soc_deinit(soc);
2326 
2327 	/* free peer tables & AST tables allocated during peer_map_attach */
2328 	if (soc->peer_map_attach_success) {
2329 		dp_peer_find_detach(soc);
2330 		soc->arch_ops.txrx_peer_map_detach(soc);
2331 		soc->peer_map_attach_success = FALSE;
2332 	}
2333 
2334 	qdf_flush_work(&soc->htt_stats.work);
2335 	qdf_disable_work(&soc->htt_stats.work);
2336 
2337 	qdf_spinlock_destroy(&soc->htt_stats.lock);
2338 
2339 	dp_soc_reset_txrx_ring_map(soc);
2340 
2341 	dp_reo_desc_freelist_destroy(soc);
2342 	dp_reo_desc_deferred_freelist_destroy(soc);
2343 
2344 	DEINIT_RX_HW_STATS_LOCK(soc);
2345 
2346 	qdf_spinlock_destroy(&soc->ast_lock);
2347 
2348 	dp_peer_mec_spinlock_destroy(soc);
2349 
2350 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2351 
2352 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
2353 
2354 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
2355 
2356 	qdf_spinlock_destroy(&soc->vdev_map_lock);
2357 
2358 	dp_reo_cmdlist_destroy(soc);
2359 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
2360 
2361 	dp_soc_tx_desc_sw_pools_deinit(soc);
2362 
2363 	dp_soc_srng_deinit(soc);
2364 
2365 	dp_hw_link_desc_ring_deinit(soc);
2366 
2367 	dp_soc_print_inactive_objects(soc);
2368 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
2369 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
2370 
2371 	htt_soc_htc_dealloc(soc->htt_handle);
2372 
2373 	htt_soc_detach(htt_soc);
2374 
2375 	/* Free wbm sg list and reset flags in down path */
2376 	dp_rx_wbm_sg_list_deinit(soc);
2377 
2378 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
2379 			     WLAN_MD_DP_SOC, "dp_soc");
2380 }
2381 
2382 #ifdef QCA_HOST2FW_RXBUF_RING
2383 void
2384 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
2385 				int lmac_id)
2386 {
2387 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
2388 		htt_srng_setup(soc->htt_handle, mac_id,
2389 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
2390 			       RXDMA_DST);
2391 }
2392 #endif
2393 
2394 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
2395 				  enum cdp_host_reo_dest_ring *reo_dest,
2396 				  bool *hash_based)
2397 {
2398 	struct dp_soc *soc;
2399 	struct dp_pdev *pdev;
2400 
2401 	pdev = vdev->pdev;
2402 	soc = pdev->soc;
2403 	/*
2404 	 * hash based steering is disabled for Radios which are offloaded
2405 	 * to NSS
2406 	 */
2407 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
2408 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
2409 
2410 	/*
2411 	 * Below line of code will ensure the proper reo_dest ring is chosen
2412 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
2413 	 */
2414 	*reo_dest = pdev->reo_dest;
2415 }
2416 
2417 #ifdef IPA_OFFLOAD
2418 /**
2419  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
2420  * @vdev: Virtual device
2421  *
2422  * Return: true if the vdev is of subtype P2P
2423  *	   false if the vdev is of any other subtype
2424  */
2425 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
2426 {
2427 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
2428 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
2429 	    vdev->subtype == wlan_op_subtype_p2p_go)
2430 		return true;
2431 
2432 	return false;
2433 }
2434 
2435 /**
2436  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
2437  * @vdev: Datapath VDEV handle
2438  * @setup_info:
2439  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
2440  * @hash_based: pointer to hash value (enabled/disabled) to be populated
2441  * @lmac_peer_id_msb:
2442  *
2443  * If IPA is enabled in ini, for SAP mode, disable hash based
2444  * steering, use default reo_dst ring for RX. Use config values for other modes.
2445  *
2446  * Return: None
2447  */
2448 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
2449 				       struct cdp_peer_setup_info *setup_info,
2450 				       enum cdp_host_reo_dest_ring *reo_dest,
2451 				       bool *hash_based,
2452 				       uint8_t *lmac_peer_id_msb)
2453 {
2454 	struct dp_soc *soc;
2455 	struct dp_pdev *pdev;
2456 
2457 	pdev = vdev->pdev;
2458 	soc = pdev->soc;
2459 
2460 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
2461 
2462 	/* For P2P-GO interfaces we do not need to change the REO
2463 	 * configuration even if IPA config is enabled
2464 	 */
2465 	if (dp_is_vdev_subtype_p2p(vdev))
2466 		return;
2467 
2468 	/*
2469 	 * If IPA is enabled, disable hash-based flow steering and set
2470 	 * reo_dest_ring_4 as the REO ring to receive packets on.
2471 	 * IPA is configured to reap reo_dest_ring_4.
2472 	 *
2473 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
2474 	 * value enum value is from 1 - 4.
2475 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
2476 	 */
2477 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
2478 		if (dp_ipa_is_mdm_platform()) {
2479 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
2480 			if (vdev->opmode == wlan_op_mode_ap)
2481 				*hash_based = 0;
2482 		} else {
2483 			dp_debug("opt_dp: default HOST reo ring is set");
2484 		}
2485 	}
2486 }
2487 
2488 #else
2489 
2490 /**
2491  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
2492  * @vdev: Datapath VDEV handle
2493  * @setup_info:
2494  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
2495  * @hash_based: pointer to hash value (enabled/disabled) to be populated
2496  * @lmac_peer_id_msb:
2497  *
2498  * Use system config values for hash based steering.
2499  * Return: None
2500  */
2501 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
2502 				       struct cdp_peer_setup_info *setup_info,
2503 				       enum cdp_host_reo_dest_ring *reo_dest,
2504 				       bool *hash_based,
2505 				       uint8_t *lmac_peer_id_msb)
2506 {
2507 	struct dp_soc *soc = vdev->pdev->soc;
2508 
2509 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
2510 					lmac_peer_id_msb);
2511 }
2512 #endif /* IPA_OFFLOAD */
2513 
2514 /**
2515  * dp_peer_setup_wifi3() - initialize the peer
2516  * @soc_hdl: soc handle object
2517  * @vdev_id: vdev_id of vdev object
2518  * @peer_mac: Peer's mac address
2519  * @setup_info: peer setup info for MLO
2520  *
2521  * Return: QDF_STATUS
2522  */
2523 QDF_STATUS
2524 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2525 		    uint8_t *peer_mac,
2526 		    struct cdp_peer_setup_info *setup_info)
2527 {
2528 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
2529 	struct dp_pdev *pdev;
2530 	bool hash_based = 0;
2531 	enum cdp_host_reo_dest_ring reo_dest;
2532 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2533 	struct dp_vdev *vdev = NULL;
2534 	struct dp_peer *peer =
2535 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
2536 					       DP_MOD_ID_CDP);
2537 	struct dp_peer *mld_peer = NULL;
2538 	enum wlan_op_mode vdev_opmode;
2539 	uint8_t lmac_peer_id_msb = 0;
2540 
2541 	if (!peer)
2542 		return QDF_STATUS_E_FAILURE;
2543 
2544 	vdev = peer->vdev;
2545 	if (!vdev) {
2546 		status = QDF_STATUS_E_FAILURE;
2547 		goto fail;
2548 	}
2549 
2550 	/* save vdev related member in case vdev freed */
2551 	vdev_opmode = vdev->opmode;
2552 	pdev = vdev->pdev;
2553 	dp_peer_setup_get_reo_hash(vdev, setup_info,
2554 				   &reo_dest, &hash_based,
2555 				   &lmac_peer_id_msb);
2556 
2557 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
2558 					   peer, vdev, vdev->vdev_id,
2559 					   setup_info);
2560 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
2561 		"hash-based-steering:%d default-reo_dest:%u",
2562 		pdev->pdev_id, vdev->vdev_id,
2563 		vdev->opmode, peer,
2564 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
2565 
2566 	/*
2567 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
2568 	 * i.e both the devices have same MAC address. In these
2569 	 * cases we want such pkts to be processed in NULL Q handler
2570 	 * which is REO2TCL ring. for this reason we should
2571 	 * not setup reo_queues and default route for bss_peer.
2572 	 */
2573 	if (!IS_MLO_DP_MLD_PEER(peer))
2574 		dp_monitor_peer_tx_init(pdev, peer);
2575 
2576 	if (!setup_info)
2577 		if (dp_peer_legacy_setup(soc, peer) !=
2578 				QDF_STATUS_SUCCESS) {
2579 			status = QDF_STATUS_E_RESOURCES;
2580 			goto fail;
2581 		}
2582 
2583 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
2584 		status = QDF_STATUS_E_FAILURE;
2585 		goto fail;
2586 	}
2587 
2588 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
2589 		/* TODO: Check the destination ring number to be passed to FW */
2590 		soc->cdp_soc.ol_ops->peer_set_default_routing(
2591 				soc->ctrl_psoc,
2592 				peer->vdev->pdev->pdev_id,
2593 				peer->mac_addr.raw,
2594 				peer->vdev->vdev_id, hash_based, reo_dest,
2595 				lmac_peer_id_msb);
2596 	}
2597 
2598 	qdf_atomic_set(&peer->is_default_route_set, 1);
2599 
2600 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
2601 	if (QDF_IS_STATUS_ERROR(status)) {
2602 		dp_peer_err("peer mlo setup failed");
2603 		qdf_assert_always(0);
2604 	}
2605 
2606 	if (vdev_opmode != wlan_op_mode_monitor) {
2607 		/* In case of MLD peer, switch peer to mld peer and
2608 		 * do peer_rx_init.
2609 		 */
2610 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2611 		    IS_MLO_DP_LINK_PEER(peer)) {
2612 			if (setup_info && setup_info->is_first_link) {
2613 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
2614 				if (mld_peer)
2615 					dp_peer_rx_init(pdev, mld_peer);
2616 				else
2617 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
2618 			}
2619 		} else {
2620 			dp_peer_rx_init(pdev, peer);
2621 		}
2622 	}
2623 
2624 	if (!IS_MLO_DP_MLD_PEER(peer))
2625 		dp_peer_ppdu_delayed_ba_init(peer);
2626 
2627 fail:
2628 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2629 	return status;
2630 }
2631 
2632 /**
2633  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
2634  * @txrx_soc: cdp soc handle
2635  * @ac: Access category
2636  * @value: timeout value in millisec
2637  *
2638  * Return: void
2639  */
2640 void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
2641 			     uint8_t ac, uint32_t value)
2642 {
2643 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2644 
2645 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
2646 }
2647 
2648 /**
2649  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
2650  * @txrx_soc: cdp soc handle
2651  * @ac: access category
2652  * @value: timeout value in millisec
2653  *
2654  * Return: void
2655  */
2656 void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
2657 			     uint8_t ac, uint32_t *value)
2658 {
2659 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2660 
2661 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
2662 }
2663 
2664 /**
2665  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
2666  * @txrx_soc: cdp soc handle
2667  * @pdev_id: id of physical device object
2668  * @val: reo destination ring index (1 - 4)
2669  *
2670  * Return: QDF_STATUS
2671  */
2672 QDF_STATUS
2673 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
2674 		     enum cdp_host_reo_dest_ring val)
2675 {
2676 	struct dp_pdev *pdev =
2677 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
2678 						   pdev_id);
2679 
2680 	if (pdev) {
2681 		pdev->reo_dest = val;
2682 		return QDF_STATUS_SUCCESS;
2683 	}
2684 
2685 	return QDF_STATUS_E_FAILURE;
2686 }
2687 
2688 /**
2689  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
2690  * @txrx_soc: cdp soc handle
2691  * @pdev_id: id of physical device object
2692  *
2693  * Return: reo destination ring index
2694  */
2695 enum cdp_host_reo_dest_ring
2696 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
2697 {
2698 	struct dp_pdev *pdev =
2699 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
2700 						   pdev_id);
2701 
2702 	if (pdev)
2703 		return pdev->reo_dest;
2704 	else
2705 		return cdp_host_reo_dest_ring_unknown;
2706 }
2707 
2708 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2709 	union hal_reo_status *reo_status)
2710 {
2711 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
2712 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2713 
2714 	if (!dp_check_pdev_exists(soc, pdev)) {
2715 		dp_err_rl("pdev doesn't exist");
2716 		return;
2717 	}
2718 
2719 	if (!qdf_atomic_read(&soc->cmn_init_done))
2720 		return;
2721 
2722 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2723 		DP_PRINT_STATS("REO stats failure %d",
2724 			       queue_status->header.status);
2725 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
2726 		return;
2727 	}
2728 
2729 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
2730 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
2731 }
2732 
2733 /**
2734  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
2735  * @soc: dp soc.
2736  * @pdev: dp pdev.
2737  *
2738  * Return: None.
2739  */
2740 void
2741 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
2742 {
2743 	uint32_t hw_head;
2744 	uint32_t hw_tail;
2745 	struct dp_srng *srng;
2746 
2747 	if (!soc) {
2748 		dp_err("soc is NULL");
2749 		return;
2750 	}
2751 
2752 	if (!pdev) {
2753 		dp_err("pdev is NULL");
2754 		return;
2755 	}
2756 
2757 	srng = &pdev->soc->wbm_idle_link_ring;
2758 	if (!srng) {
2759 		dp_err("wbm_idle_link_ring srng is NULL");
2760 		return;
2761 	}
2762 
2763 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
2764 			&hw_tail, WBM_IDLE_LINK);
2765 
2766 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
2767 		 hw_head, hw_tail);
2768 }
2769 
2770 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
2771 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
2772 				      uint32_t rx_limit)
2773 {
2774 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
2775 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
2776 }
2777 
2778 #else
2779 
2780 static inline
2781 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
2782 			       uint32_t rx_limit)
2783 {
2784 }
2785 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
2786 
2787 /**
2788  * dp_display_srng_info() - Dump the srng HP TP info
2789  * @soc_hdl: CDP Soc handle
2790  *
2791  * This function dumps the SW hp/tp values for the important rings.
2792  * HW hp/tp values are not being dumped, since it can lead to
2793  * READ NOC error when UMAC is in low power state. MCC does not have
2794  * device force wake working yet.
2795  *
2796  * Return: none
2797  */
2798 void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
2799 {
2800 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2801 	hal_soc_handle_t hal_soc = soc->hal_soc;
2802 	uint32_t hp, tp, i;
2803 
2804 	dp_info("SRNG HP-TP data:");
2805 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
2806 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
2807 				&tp, &hp);
2808 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2809 
2810 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
2811 		    INVALID_WBM_RING_NUM)
2812 			continue;
2813 
2814 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
2815 				&tp, &hp);
2816 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2817 	}
2818 
2819 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
2820 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
2821 				&tp, &hp);
2822 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2823 	}
2824 
2825 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
2826 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
2827 
2828 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
2829 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
2830 
2831 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
2832 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
2833 }
2834 
2835 /**
2836  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
2837  * @psoc: dp soc handle
2838  * @pdev_id: id of DP_PDEV handle
2839  * @pcp: pcp value
2840  * @tid: tid value passed by the user
2841  *
2842  * Return: QDF_STATUS_SUCCESS on success
2843  */
2844 QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
2845 					 uint8_t pdev_id,
2846 					 uint8_t pcp, uint8_t tid)
2847 {
2848 	struct dp_soc *soc = (struct dp_soc *)psoc;
2849 
2850 	soc->pcp_tid_map[pcp] = tid;
2851 
2852 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
2853 	return QDF_STATUS_SUCCESS;
2854 }
2855 
2856 /**
2857  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
2858  * @soc_hdl: DP soc handle
2859  * @vdev_id: id of DP_VDEV handle
2860  * @pcp: pcp value
2861  * @tid: tid value passed by the user
2862  *
2863  * Return: QDF_STATUS_SUCCESS on success
2864  */
2865 QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
2866 					 uint8_t vdev_id,
2867 					 uint8_t pcp, uint8_t tid)
2868 {
2869 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2870 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2871 						     DP_MOD_ID_CDP);
2872 
2873 	if (!vdev)
2874 		return QDF_STATUS_E_FAILURE;
2875 
2876 	vdev->pcp_tid_map[pcp] = tid;
2877 
2878 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
2879 	return QDF_STATUS_SUCCESS;
2880 }
2881 
2882 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
2883 void dp_drain_txrx(struct cdp_soc_t *soc_handle)
2884 {
2885 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2886 	uint32_t cur_tx_limit, cur_rx_limit;
2887 	uint32_t budget = 0xffff;
2888 	uint32_t val;
2889 	int i;
2890 	int cpu = dp_srng_get_cpu();
2891 
2892 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
2893 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
2894 
2895 	/* Temporarily increase soft irq limits when going to drain
2896 	 * the UMAC/LMAC SRNGs and restore them after polling.
2897 	 * Though the budget is on higher side, the TX/RX reaping loops
2898 	 * will not execute longer as both TX and RX would be suspended
2899 	 * by the time this API is called.
2900 	 */
2901 	dp_update_soft_irq_limits(soc, budget, budget);
2902 
2903 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
2904 		soc->arch_ops.dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
2905 
2906 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
2907 
2908 	/* Do a dummy read at offset 0; this will ensure all
2909 	 * pendings writes(HP/TP) are flushed before read returns.
2910 	 */
2911 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
2912 	dp_debug("Register value at offset 0: %u", val);
2913 }
2914 #endif
2915 
2916 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
2917 /**
2918  * dp_flush_ring_hptp() - Update ring shadow
2919  *			  register HP/TP address when runtime
2920  *                        resume
2921  * @soc: DP soc context
2922  * @hal_srng: srng
2923  *
2924  * Return: None
2925  */
2926 static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
2927 {
2928 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
2929 						 HAL_SRNG_FLUSH_EVENT)) {
2930 		/* Acquire the lock */
2931 		hal_srng_access_start(soc->hal_soc, hal_srng);
2932 
2933 		hal_srng_access_end(soc->hal_soc, hal_srng);
2934 
2935 		hal_srng_set_flush_last_ts(hal_srng);
2936 
2937 		dp_debug("flushed");
2938 	}
2939 }
2940 
2941 void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
2942 {
2943 	 uint8_t i;
2944 
2945 	if (force_flush_tx) {
2946 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2947 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
2948 					   HAL_SRNG_FLUSH_EVENT);
2949 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
2950 		}
2951 
2952 		return;
2953 	}
2954 
2955 	for (i = 0; i < soc->num_tcl_data_rings; i++)
2956 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
2957 
2958 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
2959 }
2960 #endif
2961 
2962 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2963 /*
2964  * dp_flush_tcl_ring() - flush TCL ring hp
2965  * @pdev: dp pdev
2966  * @ring_id: TCL ring id
2967  *
2968  * Return: 0 on success and error code on failure
2969  */
2970 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
2971 {
2972 	struct dp_soc *soc = pdev->soc;
2973 	hal_ring_handle_t hal_ring_hdl =
2974 			soc->tcl_data_ring[ring_id].hal_srng;
2975 	int ret;
2976 
2977 	ret = hal_srng_try_access_start(soc->hal_soc, hal_ring_hdl);
2978 	if (ret)
2979 		return ret;
2980 
2981 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
2982 	if (ret) {
2983 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
2984 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
2985 		hal_srng_inc_flush_cnt(hal_ring_hdl);
2986 		return ret;
2987 	}
2988 
2989 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
2990 	hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
2991 
2992 	return ret;
2993 }
2994 #else
2995 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
2996 {
2997 	return QDF_STATUS_SUCCESS;
2998 }
2999 #endif
3000 
3001 #ifdef WLAN_FEATURE_STATS_EXT
3002 /* rx hw stats event wait timeout in ms */
3003 #define DP_REO_STATUS_STATS_TIMEOUT 100
3004 
3005 /**
3006  * dp_rx_hw_stats_cb() - request rx hw stats response callback
3007  * @soc: soc handle
3008  * @cb_ctxt: callback context
3009  * @reo_status: reo command response status
3010  *
3011  * Return: None
3012  */
3013 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
3014 			      union hal_reo_status *reo_status)
3015 {
3016 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
3017 	bool is_query_timeout;
3018 
3019 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3020 	is_query_timeout = soc->rx_hw_stats->is_query_timeout;
3021 	/* free the cb_ctxt if all pending tid stats query is received */
3022 	if (qdf_atomic_dec_and_test(&soc->rx_hw_stats->pending_tid_stats_cnt)) {
3023 		if (!is_query_timeout) {
3024 			qdf_event_set(&soc->rx_hw_stats_event);
3025 			soc->is_last_stats_ctx_init = false;
3026 		}
3027 
3028 		qdf_mem_free(soc->rx_hw_stats);
3029 		soc->rx_hw_stats = NULL;
3030 	}
3031 
3032 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
3033 		dp_info("REO stats failure %d",
3034 			queue_status->header.status);
3035 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3036 		return;
3037 	}
3038 
3039 	if (!is_query_timeout) {
3040 		soc->ext_stats.rx_mpdu_received +=
3041 					queue_status->mpdu_frms_cnt;
3042 		soc->ext_stats.rx_mpdu_missed +=
3043 					queue_status->hole_cnt;
3044 	}
3045 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3046 }
3047 
3048 /**
3049  * dp_request_rx_hw_stats() - request rx hardware stats
3050  * @soc_hdl: soc handle
3051  * @vdev_id: vdev id
3052  *
3053  * Return: None
3054  */
3055 QDF_STATUS
3056 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
3057 {
3058 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3059 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3060 						     DP_MOD_ID_CDP);
3061 	struct dp_peer *peer = NULL;
3062 	QDF_STATUS status;
3063 	int rx_stats_sent_cnt = 0;
3064 	uint32_t last_rx_mpdu_received;
3065 	uint32_t last_rx_mpdu_missed;
3066 
3067 	if (soc->rx_hw_stats) {
3068 		dp_err_rl("Stats already requested");
3069 		status = QDF_STATUS_E_ALREADY;
3070 		goto out;
3071 	}
3072 
3073 	if (!vdev) {
3074 		dp_err("vdev is null for vdev_id: %u", vdev_id);
3075 		status = QDF_STATUS_E_INVAL;
3076 		goto out;
3077 	}
3078 
3079 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
3080 
3081 	if (!peer) {
3082 		dp_err("Peer is NULL");
3083 		status = QDF_STATUS_E_INVAL;
3084 		goto out;
3085 	}
3086 
3087 	soc->rx_hw_stats = qdf_mem_malloc(sizeof(*soc->rx_hw_stats));
3088 
3089 	if (!soc->rx_hw_stats) {
3090 		dp_err("malloc failed for hw stats structure");
3091 		status = QDF_STATUS_E_INVAL;
3092 		goto out;
3093 	}
3094 
3095 	qdf_event_reset(&soc->rx_hw_stats_event);
3096 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3097 	/* save the last soc cumulative stats and reset it to 0 */
3098 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
3099 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
3100 	soc->ext_stats.rx_mpdu_received = 0;
3101 	soc->ext_stats.rx_mpdu_missed = 0;
3102 
3103 	dp_debug("HW stats query start");
3104 	rx_stats_sent_cnt =
3105 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, soc->rx_hw_stats);
3106 	if (!rx_stats_sent_cnt) {
3107 		dp_err("no tid stats sent successfully");
3108 		qdf_mem_free(soc->rx_hw_stats);
3109 		soc->rx_hw_stats = NULL;
3110 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3111 		status = QDF_STATUS_E_INVAL;
3112 		goto out;
3113 	}
3114 	qdf_atomic_set(&soc->rx_hw_stats->pending_tid_stats_cnt,
3115 		       rx_stats_sent_cnt);
3116 	soc->rx_hw_stats->is_query_timeout = false;
3117 	soc->is_last_stats_ctx_init = true;
3118 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3119 
3120 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
3121 				       DP_REO_STATUS_STATS_TIMEOUT);
3122 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
3123 
3124 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3125 	if (status != QDF_STATUS_SUCCESS) {
3126 		if (soc->rx_hw_stats) {
3127 			dp_info("partial rx hw stats event collected with %d",
3128 				qdf_atomic_read(
3129 				  &soc->rx_hw_stats->pending_tid_stats_cnt));
3130 			if (soc->is_last_stats_ctx_init)
3131 				soc->rx_hw_stats->is_query_timeout = true;
3132 		}
3133 
3134 		/*
3135 		 * If query timeout happened, use the last saved stats
3136 		 * for this time query.
3137 		 */
3138 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
3139 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
3140 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
3141 
3142 	}
3143 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3144 
3145 out:
3146 	if (peer)
3147 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3148 	if (vdev)
3149 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3150 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
3151 
3152 	return status;
3153 }
3154 
3155 /**
3156  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
3157  * @soc_hdl: soc handle
3158  *
3159  * Return: None
3160  */
3161 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
3162 {
3163 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3164 
3165 	soc->ext_stats.rx_mpdu_received = 0;
3166 	soc->ext_stats.rx_mpdu_missed = 0;
3167 }
3168 #endif /* WLAN_FEATURE_STATS_EXT */
3169 
3170 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
3171 {
3172 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3173 
3174 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
3175 }
3176 
3177 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
3178 {
3179 	uint32_t i;
3180 
3181 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3182 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
3183 	}
3184 }
3185 
3186 qdf_export_symbol(dp_soc_set_txrx_ring_map);
3187 
3188 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
3189 {
3190 	dp_init_info("DP soc Dump for Target = %d", target_type);
3191 	dp_init_info("ast_override_support = %d da_war_enabled = %d",
3192 		     soc->ast_override_support, soc->da_war_enabled);
3193 
3194 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
3195 }
3196 
3197 /**
3198  * dp_soc_cfg_init() - initialize target specific configuration
3199  *		       during dp_soc_init
3200  * @soc: dp soc handle
3201  */
3202 static void dp_soc_cfg_init(struct dp_soc *soc)
3203 {
3204 	uint32_t target_type;
3205 
3206 	target_type = hal_get_target_type(soc->hal_soc);
3207 	switch (target_type) {
3208 	case TARGET_TYPE_QCA6290:
3209 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
3210 					       REO_DST_RING_SIZE_QCA6290);
3211 		soc->ast_override_support = 1;
3212 		soc->da_war_enabled = false;
3213 		break;
3214 	case TARGET_TYPE_QCA6390:
3215 	case TARGET_TYPE_QCA6490:
3216 	case TARGET_TYPE_QCA6750:
3217 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
3218 					       REO_DST_RING_SIZE_QCA6290);
3219 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
3220 		soc->ast_override_support = 1;
3221 		if (soc->cdp_soc.ol_ops->get_con_mode &&
3222 		    soc->cdp_soc.ol_ops->get_con_mode() ==
3223 		    QDF_GLOBAL_MONITOR_MODE) {
3224 			int int_ctx;
3225 
3226 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
3227 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
3228 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
3229 			}
3230 		}
3231 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
3232 		break;
3233 	case TARGET_TYPE_KIWI:
3234 	case TARGET_TYPE_MANGO:
3235 	case TARGET_TYPE_PEACH:
3236 		soc->ast_override_support = 1;
3237 		soc->per_tid_basize_max_tid = 8;
3238 
3239 		if (soc->cdp_soc.ol_ops->get_con_mode &&
3240 		    soc->cdp_soc.ol_ops->get_con_mode() ==
3241 		    QDF_GLOBAL_MONITOR_MODE) {
3242 			int int_ctx;
3243 
3244 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
3245 			     int_ctx++) {
3246 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
3247 				if (dp_is_monitor_mode_using_poll(soc))
3248 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
3249 			}
3250 		}
3251 
3252 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
3253 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
3254 		break;
3255 	case TARGET_TYPE_QCA8074:
3256 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
3257 		soc->da_war_enabled = true;
3258 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3259 		break;
3260 	case TARGET_TYPE_QCA8074V2:
3261 	case TARGET_TYPE_QCA6018:
3262 	case TARGET_TYPE_QCA9574:
3263 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3264 		soc->ast_override_support = 1;
3265 		soc->per_tid_basize_max_tid = 8;
3266 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3267 		soc->da_war_enabled = false;
3268 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3269 		break;
3270 	case TARGET_TYPE_QCN9000:
3271 		soc->ast_override_support = 1;
3272 		soc->da_war_enabled = false;
3273 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3274 		soc->per_tid_basize_max_tid = 8;
3275 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3276 		soc->lmac_polled_mode = 0;
3277 		soc->wbm_release_desc_rx_sg_support = 1;
3278 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3279 		break;
3280 	case TARGET_TYPE_QCA5018:
3281 	case TARGET_TYPE_QCN6122:
3282 	case TARGET_TYPE_QCN9160:
3283 		soc->ast_override_support = 1;
3284 		soc->da_war_enabled = false;
3285 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3286 		soc->per_tid_basize_max_tid = 8;
3287 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
3288 		soc->disable_mac1_intr = 1;
3289 		soc->disable_mac2_intr = 1;
3290 		soc->wbm_release_desc_rx_sg_support = 1;
3291 		break;
3292 	case TARGET_TYPE_QCN9224:
3293 		soc->umac_reset_supported = true;
3294 		soc->ast_override_support = 1;
3295 		soc->da_war_enabled = false;
3296 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3297 		soc->per_tid_basize_max_tid = 8;
3298 		soc->wbm_release_desc_rx_sg_support = 1;
3299 		soc->rxdma2sw_rings_not_supported = 1;
3300 		soc->wbm_sg_last_msdu_war = 1;
3301 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
3302 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
3303 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3304 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
3305 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
3306 						  CFG_DP_HOST_AST_DB_ENABLE);
3307 		soc->features.wds_ext_ast_override_enable = true;
3308 		break;
3309 	case TARGET_TYPE_QCA5332:
3310 	case TARGET_TYPE_QCN6432:
3311 		soc->umac_reset_supported = true;
3312 		soc->ast_override_support = 1;
3313 		soc->da_war_enabled = false;
3314 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3315 		soc->per_tid_basize_max_tid = 8;
3316 		soc->wbm_release_desc_rx_sg_support = 1;
3317 		soc->rxdma2sw_rings_not_supported = 1;
3318 		soc->wbm_sg_last_msdu_war = 1;
3319 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
3320 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
3321 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
3322 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
3323 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
3324 						  CFG_DP_HOST_AST_DB_ENABLE);
3325 		soc->features.wds_ext_ast_override_enable = true;
3326 		break;
3327 	default:
3328 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
3329 		qdf_assert_always(0);
3330 		break;
3331 	}
3332 	dp_soc_cfg_dump(soc, target_type);
3333 }
3334 
3335 /**
3336  * dp_soc_get_ap_mld_mode() - store ap mld mode from ini
3337  * @soc: Opaque DP SOC handle
3338  *
3339  * Return: none
3340  */
3341 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3342 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
3343 {
3344 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
3345 		soc->mld_mode_ap =
3346 		soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3347 					CDP_CFG_MLD_NETDEV_MODE_AP);
3348 	}
3349 	dp_info("DP mld_mode_ap-%u\n", soc->mld_mode_ap);
3350 }
3351 #else
3352 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
3353 {
3354 	(void)soc;
3355 }
3356 #endif
3357 
3358 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
3359 /**
3360  * dp_soc_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_soc
3361  * @soc: Datapath soc handle
3362  *
3363  * Return: none
3364  */
3365 static inline
3366 void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
3367 {
3368 	soc->hw_txrx_stats_en =
3369 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
3370 }
3371 #else
3372 static inline
3373 void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
3374 {
3375 	soc->hw_txrx_stats_en = 0;
3376 }
3377 #endif
3378 
3379 /**
3380  * dp_soc_init() - Initialize txrx SOC
3381  * @soc: Opaque DP SOC handle
3382  * @htc_handle: Opaque HTC handle
3383  * @hif_handle: Opaque HIF handle
3384  *
3385  * Return: DP SOC handle on success, NULL on failure
3386  */
3387 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
3388 		  struct hif_opaque_softc *hif_handle)
3389 {
3390 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
3391 	bool is_monitor_mode = false;
3392 	uint8_t i;
3393 	int num_dp_msi;
3394 	bool ppeds_attached = false;
3395 
3396 	htt_soc = htt_soc_attach(soc, htc_handle);
3397 	if (!htt_soc)
3398 		goto fail1;
3399 
3400 	soc->htt_handle = htt_soc;
3401 
3402 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
3403 		goto fail2;
3404 
3405 	htt_set_htc_handle(htt_soc, htc_handle);
3406 
3407 	dp_soc_cfg_init(soc);
3408 
3409 	dp_monitor_soc_cfg_init(soc);
3410 	/* Reset/Initialize wbm sg list and flags */
3411 	dp_rx_wbm_sg_list_reset(soc);
3412 
3413 	/* Note: Any SRNG ring initialization should happen only after
3414 	 * Interrupt mode is set and followed by filling up the
3415 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
3416 	 */
3417 	dp_soc_set_interrupt_mode(soc);
3418 	if (soc->cdp_soc.ol_ops->get_con_mode &&
3419 	    soc->cdp_soc.ol_ops->get_con_mode() ==
3420 	    QDF_GLOBAL_MONITOR_MODE) {
3421 		is_monitor_mode = true;
3422 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
3423 	} else {
3424 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
3425 	}
3426 
3427 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
3428 	if (num_dp_msi < 0) {
3429 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
3430 		goto fail3;
3431 	}
3432 
3433 	if (soc->arch_ops.ppeds_handle_attached)
3434 		ppeds_attached = soc->arch_ops.ppeds_handle_attached(soc);
3435 
3436 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
3437 				     soc->intr_mode, is_monitor_mode,
3438 				     ppeds_attached,
3439 				     soc->umac_reset_supported);
3440 
3441 	/* initialize WBM_IDLE_LINK ring */
3442 	if (dp_hw_link_desc_ring_init(soc)) {
3443 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
3444 		goto fail3;
3445 	}
3446 
3447 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
3448 
3449 	if (dp_soc_srng_init(soc)) {
3450 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
3451 		goto fail4;
3452 	}
3453 
3454 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
3455 			       htt_get_htc_handle(htt_soc),
3456 			       soc->hal_soc, soc->osdev) == NULL)
3457 		goto fail5;
3458 
3459 	/* Initialize descriptors in TCL Rings */
3460 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3461 		hal_tx_init_data_ring(soc->hal_soc,
3462 				      soc->tcl_data_ring[i].hal_srng);
3463 	}
3464 
3465 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
3466 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
3467 		goto fail6;
3468 	}
3469 
3470 	if (soc->arch_ops.txrx_soc_ppeds_start) {
3471 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
3472 			dp_init_err("%pK: ppeds start failed", soc);
3473 			goto fail7;
3474 		}
3475 	}
3476 
3477 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
3478 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
3479 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3480 	wlan_cfg_set_rx_rr(soc->wlan_cfg_ctx,
3481 			   cfg_get(soc->ctrl_psoc, CFG_DP_RX_RR));
3482 #endif
3483 	soc->cce_disable = false;
3484 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
3485 
3486 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
3487 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
3488 	qdf_spinlock_create(&soc->vdev_map_lock);
3489 	qdf_atomic_init(&soc->num_tx_outstanding);
3490 	qdf_atomic_init(&soc->num_tx_exception);
3491 	soc->num_tx_allowed =
3492 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
3493 	soc->num_tx_spl_allowed =
3494 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
3495 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
3496 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
3497 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3498 				CDP_CFG_MAX_PEER_ID);
3499 
3500 		if (ret != -EINVAL)
3501 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
3502 
3503 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3504 				CDP_CFG_CCE_DISABLE);
3505 		if (ret == 1)
3506 			soc->cce_disable = true;
3507 	}
3508 
3509 	/*
3510 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
3511 	 * and IPQ5018 WMAC2 is not there in these platforms.
3512 	 */
3513 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
3514 	    soc->disable_mac2_intr)
3515 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
3516 
3517 	/*
3518 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
3519 	 * WMAC1 is not there in this platform.
3520 	 */
3521 	if (soc->disable_mac1_intr)
3522 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
3523 
3524 	/* setup the global rx defrag waitlist */
3525 	TAILQ_INIT(&soc->rx.defrag.waitlist);
3526 	soc->rx.defrag.timeout_ms =
3527 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
3528 	soc->rx.defrag.next_flush_ms = 0;
3529 	soc->rx.flags.defrag_timeout_check =
3530 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
3531 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
3532 
3533 	dp_monitor_soc_init(soc);
3534 
3535 	qdf_atomic_set(&soc->cmn_init_done, 1);
3536 
3537 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
3538 
3539 	qdf_spinlock_create(&soc->ast_lock);
3540 	dp_peer_mec_spinlock_create(soc);
3541 
3542 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
3543 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
3544 	INIT_RX_HW_STATS_LOCK(soc);
3545 
3546 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
3547 	/* fill the tx/rx cpu ring map*/
3548 	dp_soc_set_txrx_ring_map(soc);
3549 
3550 	TAILQ_INIT(&soc->inactive_peer_list);
3551 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
3552 	TAILQ_INIT(&soc->inactive_vdev_list);
3553 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
3554 	qdf_spinlock_create(&soc->htt_stats.lock);
3555 	/* initialize work queue for stats processing */
3556 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3557 
3558 	dp_reo_desc_deferred_freelist_create(soc);
3559 
3560 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
3561 		qdf_dma_mem_stats_read(),
3562 		qdf_heap_mem_stats_read(),
3563 		qdf_skb_total_mem_stats_read());
3564 
3565 	soc->vdev_stats_id_map = 0;
3566 
3567 	dp_soc_hw_txrx_stats_init(soc);
3568 
3569 	dp_soc_get_ap_mld_mode(soc);
3570 
3571 	return soc;
3572 fail7:
3573 	dp_soc_tx_desc_sw_pools_deinit(soc);
3574 fail6:
3575 	htt_soc_htc_dealloc(soc->htt_handle);
3576 fail5:
3577 	dp_soc_srng_deinit(soc);
3578 fail4:
3579 	dp_hw_link_desc_ring_deinit(soc);
3580 fail3:
3581 	htt_htc_pkt_pool_free(htt_soc);
3582 fail2:
3583 	htt_soc_detach(htt_soc);
3584 fail1:
3585 	return NULL;
3586 }
3587 
3588 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
3589 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
3590 {
3591 	QDF_STATUS status;
3592 
3593 	if (soc->init_tcl_cmd_cred_ring) {
3594 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
3595 				       TCL_CMD_CREDIT, 0, 0);
3596 		if (QDF_IS_STATUS_ERROR(status))
3597 			return status;
3598 
3599 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
3600 				  soc->tcl_cmd_credit_ring.alloc_size,
3601 				  soc->ctrl_psoc,
3602 				  WLAN_MD_DP_SRNG_TCL_CMD,
3603 				  "wbm_desc_rel_ring");
3604 	}
3605 
3606 	return QDF_STATUS_SUCCESS;
3607 }
3608 
3609 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
3610 {
3611 	if (soc->init_tcl_cmd_cred_ring) {
3612 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
3613 				     soc->tcl_cmd_credit_ring.alloc_size,
3614 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
3615 				     "wbm_desc_rel_ring");
3616 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
3617 			       TCL_CMD_CREDIT, 0);
3618 	}
3619 }
3620 
3621 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
3622 {
3623 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3624 	uint32_t entries;
3625 	QDF_STATUS status;
3626 
3627 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
3628 	if (soc->init_tcl_cmd_cred_ring) {
3629 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
3630 				       TCL_CMD_CREDIT, entries, 0);
3631 		if (QDF_IS_STATUS_ERROR(status))
3632 			return status;
3633 	}
3634 
3635 	return QDF_STATUS_SUCCESS;
3636 }
3637 
3638 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
3639 {
3640 	if (soc->init_tcl_cmd_cred_ring)
3641 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
3642 }
3643 
3644 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
3645 {
3646 	if (soc->init_tcl_cmd_cred_ring)
3647 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
3648 					    soc->tcl_cmd_credit_ring.hal_srng);
3649 }
3650 #else
3651 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
3652 {
3653 	return QDF_STATUS_SUCCESS;
3654 }
3655 
3656 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
3657 {
3658 }
3659 
3660 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
3661 {
3662 	return QDF_STATUS_SUCCESS;
3663 }
3664 
3665 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
3666 {
3667 }
3668 
3669 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
3670 {
3671 }
3672 #endif
3673 
3674 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
3675 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
3676 {
3677 	QDF_STATUS status;
3678 
3679 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
3680 	if (QDF_IS_STATUS_ERROR(status))
3681 		return status;
3682 
3683 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
3684 			  soc->tcl_status_ring.alloc_size,
3685 			  soc->ctrl_psoc,
3686 			  WLAN_MD_DP_SRNG_TCL_STATUS,
3687 			  "wbm_desc_rel_ring");
3688 
3689 	return QDF_STATUS_SUCCESS;
3690 }
3691 
3692 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
3693 {
3694 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
3695 			     soc->tcl_status_ring.alloc_size,
3696 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
3697 			     "wbm_desc_rel_ring");
3698 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3699 }
3700 
3701 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
3702 {
3703 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3704 	uint32_t entries;
3705 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3706 
3707 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
3708 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
3709 			       TCL_STATUS, entries, 0);
3710 
3711 	return status;
3712 }
3713 
3714 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
3715 {
3716 	dp_srng_free(soc, &soc->tcl_status_ring);
3717 }
3718 #else
3719 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
3720 {
3721 	return QDF_STATUS_SUCCESS;
3722 }
3723 
3724 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
3725 {
3726 }
3727 
3728 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
3729 {
3730 	return QDF_STATUS_SUCCESS;
3731 }
3732 
3733 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
3734 {
3735 }
3736 #endif
3737 
3738 /**
3739  * dp_soc_srng_deinit() - de-initialize soc srng rings
3740  * @soc: Datapath soc handle
3741  *
3742  */
3743 void dp_soc_srng_deinit(struct dp_soc *soc)
3744 {
3745 	uint32_t i;
3746 
3747 	if (soc->arch_ops.txrx_soc_srng_deinit)
3748 		soc->arch_ops.txrx_soc_srng_deinit(soc);
3749 
3750 	/* Free the ring memories */
3751 	/* Common rings */
3752 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
3753 			     soc->wbm_desc_rel_ring.alloc_size,
3754 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
3755 			     "wbm_desc_rel_ring");
3756 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3757 	dp_ssr_dump_srng_unregister("wbm_desc_rel_ring", -1);
3758 
3759 	/* Tx data rings */
3760 	for (i = 0; i < soc->num_tcl_data_rings; i++)
3761 		dp_deinit_tx_pair_by_index(soc, i);
3762 
3763 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3764 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
3765 		dp_ipa_deinit_alt_tx_ring(soc);
3766 	}
3767 
3768 	/* TCL command and status rings */
3769 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
3770 	dp_soc_tcl_status_srng_deinit(soc);
3771 
3772 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3773 		/* TODO: Get number of rings and ring sizes
3774 		 * from wlan_cfg
3775 		 */
3776 		dp_ssr_dump_srng_unregister("reo_dest_ring", i);
3777 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
3778 				     soc->reo_dest_ring[i].alloc_size,
3779 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
3780 				     "reo_dest_ring");
3781 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
3782 	}
3783 
3784 	dp_ssr_dump_srng_unregister("reo_reinject_ring", -1);
3785 	/* REO reinjection ring */
3786 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
3787 			     soc->reo_reinject_ring.alloc_size,
3788 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
3789 			     "reo_reinject_ring");
3790 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3791 
3792 	dp_ssr_dump_srng_unregister("rx_rel_ring", -1);
3793 	/* Rx release ring */
3794 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
3795 			     soc->rx_rel_ring.alloc_size,
3796 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
3797 			     "reo_release_ring");
3798 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3799 
3800 	/* Rx exception ring */
3801 	/* TODO: Better to store ring_type and ring_num in
3802 	 * dp_srng during setup
3803 	 */
3804 	dp_ssr_dump_srng_unregister("reo_exception_ring", -1);
3805 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
3806 			     soc->reo_exception_ring.alloc_size,
3807 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
3808 			     "reo_exception_ring");
3809 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3810 
3811 	/* REO command and status rings */
3812 	dp_ssr_dump_srng_unregister("reo_cmd_ring", -1);
3813 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
3814 			     soc->reo_cmd_ring.alloc_size,
3815 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
3816 			     "reo_cmd_ring");
3817 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3818 	dp_ssr_dump_srng_unregister("reo_status_ring", -1);
3819 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
3820 			     soc->reo_status_ring.alloc_size,
3821 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
3822 			     "reo_status_ring");
3823 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3824 }
3825 
3826 /**
3827  * dp_soc_srng_init() - Initialize soc level srng rings
3828  * @soc: Datapath soc handle
3829  *
3830  * Return: QDF_STATUS_SUCCESS on success
3831  *	   QDF_STATUS_E_FAILURE on failure
3832  */
3833 QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
3834 {
3835 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3836 	uint8_t i;
3837 	uint8_t wbm2_sw_rx_rel_ring_id;
3838 
3839 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3840 
3841 	dp_enable_verbose_debug(soc);
3842 
3843 	/* WBM descriptor release ring */
3844 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
3845 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
3846 		goto fail1;
3847 	}
3848 	dp_ssr_dump_srng_register("wbm_desc_rel_ring",
3849 				  &soc->wbm_desc_rel_ring, -1);
3850 
3851 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
3852 			  soc->wbm_desc_rel_ring.alloc_size,
3853 			  soc->ctrl_psoc,
3854 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
3855 			  "wbm_desc_rel_ring");
3856 
3857 	/* TCL command and status rings */
3858 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
3859 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
3860 		goto fail1;
3861 	}
3862 
3863 	if (dp_soc_tcl_status_srng_init(soc)) {
3864 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
3865 		goto fail1;
3866 	}
3867 
3868 	/* REO reinjection ring */
3869 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
3870 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
3871 		goto fail1;
3872 	}
3873 	dp_ssr_dump_srng_register("reo_reinject_ring",
3874 				  &soc->reo_reinject_ring, -1);
3875 
3876 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
3877 			  soc->reo_reinject_ring.alloc_size,
3878 			  soc->ctrl_psoc,
3879 			  WLAN_MD_DP_SRNG_REO_REINJECT,
3880 			  "reo_reinject_ring");
3881 
3882 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
3883 	/* Rx release ring */
3884 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
3885 			 wbm2_sw_rx_rel_ring_id, 0)) {
3886 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
3887 		goto fail1;
3888 	}
3889 	dp_ssr_dump_srng_register("rx_rel_ring", &soc->rx_rel_ring, -1);
3890 
3891 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
3892 			  soc->rx_rel_ring.alloc_size,
3893 			  soc->ctrl_psoc,
3894 			  WLAN_MD_DP_SRNG_RX_REL,
3895 			  "reo_release_ring");
3896 
3897 	/* Rx exception ring */
3898 	if (dp_srng_init(soc, &soc->reo_exception_ring,
3899 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
3900 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
3901 		goto fail1;
3902 	}
3903 	dp_ssr_dump_srng_register("reo_exception_ring",
3904 				  &soc->reo_exception_ring, -1);
3905 
3906 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
3907 			  soc->reo_exception_ring.alloc_size,
3908 			  soc->ctrl_psoc,
3909 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
3910 			  "reo_exception_ring");
3911 
3912 	/* REO command and status rings */
3913 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
3914 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
3915 		goto fail1;
3916 	}
3917 	dp_ssr_dump_srng_register("reo_cmd_ring", &soc->reo_cmd_ring, -1);
3918 
3919 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
3920 			  soc->reo_cmd_ring.alloc_size,
3921 			  soc->ctrl_psoc,
3922 			  WLAN_MD_DP_SRNG_REO_CMD,
3923 			  "reo_cmd_ring");
3924 
3925 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
3926 	TAILQ_INIT(&soc->rx.reo_cmd_list);
3927 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
3928 
3929 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
3930 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
3931 		goto fail1;
3932 	}
3933 	dp_ssr_dump_srng_register("reo_status_ring", &soc->reo_status_ring, -1);
3934 
3935 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
3936 			  soc->reo_status_ring.alloc_size,
3937 			  soc->ctrl_psoc,
3938 			  WLAN_MD_DP_SRNG_REO_STATUS,
3939 			  "reo_status_ring");
3940 
3941 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3942 		if (dp_init_tx_ring_pair_by_index(soc, i))
3943 			goto fail1;
3944 	}
3945 
3946 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3947 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
3948 			goto fail1;
3949 
3950 		if (dp_ipa_init_alt_tx_ring(soc))
3951 			goto fail1;
3952 	}
3953 
3954 	dp_create_ext_stats_event(soc);
3955 
3956 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3957 		/* Initialize REO destination ring */
3958 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
3959 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
3960 			goto fail1;
3961 		}
3962 
3963 		dp_ssr_dump_srng_register("reo_dest_ring",
3964 					  &soc->reo_dest_ring[i], i);
3965 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
3966 				  soc->reo_dest_ring[i].alloc_size,
3967 				  soc->ctrl_psoc,
3968 				  WLAN_MD_DP_SRNG_REO_DEST,
3969 				  "reo_dest_ring");
3970 	}
3971 
3972 	if (soc->arch_ops.txrx_soc_srng_init) {
3973 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
3974 			dp_init_err("%pK: dp_srng_init failed for arch rings",
3975 				    soc);
3976 			goto fail1;
3977 		}
3978 	}
3979 
3980 	return QDF_STATUS_SUCCESS;
3981 fail1:
3982 	/*
3983 	 * Cleanup will be done as part of soc_detach, which will
3984 	 * be called on pdev attach failure
3985 	 */
3986 	dp_soc_srng_deinit(soc);
3987 	return QDF_STATUS_E_FAILURE;
3988 }
3989 
3990 /**
3991  * dp_soc_srng_free() - free soc level srng rings
3992  * @soc: Datapath soc handle
3993  *
3994  */
3995 void dp_soc_srng_free(struct dp_soc *soc)
3996 {
3997 	uint32_t i;
3998 
3999 	if (soc->arch_ops.txrx_soc_srng_free)
4000 		soc->arch_ops.txrx_soc_srng_free(soc);
4001 
4002 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
4003 
4004 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4005 		dp_free_tx_ring_pair_by_index(soc, i);
4006 
4007 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
4008 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4009 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
4010 		dp_ipa_free_alt_tx_ring(soc);
4011 	}
4012 
4013 	dp_soc_tcl_cmd_cred_srng_free(soc);
4014 	dp_soc_tcl_status_srng_free(soc);
4015 
4016 	for (i = 0; i < soc->num_reo_dest_rings; i++)
4017 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
4018 
4019 	dp_srng_free(soc, &soc->reo_reinject_ring);
4020 	dp_srng_free(soc, &soc->rx_rel_ring);
4021 
4022 	dp_srng_free(soc, &soc->reo_exception_ring);
4023 
4024 	dp_srng_free(soc, &soc->reo_cmd_ring);
4025 	dp_srng_free(soc, &soc->reo_status_ring);
4026 }
4027 
4028 /**
4029  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
4030  * @soc: Datapath soc handle
4031  *
4032  * Return: QDF_STATUS_SUCCESS on success
4033  *	   QDF_STATUS_E_NOMEM on failure
4034  */
4035 QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
4036 {
4037 	uint32_t entries;
4038 	uint32_t i;
4039 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4040 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
4041 	uint32_t reo_dst_ring_size;
4042 
4043 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4044 
4045 	/* sw2wbm link descriptor release ring */
4046 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
4047 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
4048 			  entries, 0)) {
4049 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
4050 		goto fail1;
4051 	}
4052 
4053 	/* TCL command and status rings */
4054 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
4055 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
4056 		goto fail1;
4057 	}
4058 
4059 	if (dp_soc_tcl_status_srng_alloc(soc)) {
4060 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
4061 		goto fail1;
4062 	}
4063 
4064 	/* REO reinjection ring */
4065 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
4066 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
4067 			  entries, 0)) {
4068 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
4069 		goto fail1;
4070 	}
4071 
4072 	/* Rx release ring */
4073 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
4074 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
4075 			  entries, 0)) {
4076 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
4077 		goto fail1;
4078 	}
4079 
4080 	/* Rx exception ring */
4081 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
4082 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
4083 			  entries, 0)) {
4084 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
4085 		goto fail1;
4086 	}
4087 
4088 	/* REO command and status rings */
4089 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
4090 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
4091 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
4092 		goto fail1;
4093 	}
4094 
4095 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
4096 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
4097 			  entries, 0)) {
4098 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
4099 		goto fail1;
4100 	}
4101 
4102 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
4103 
4104 	/* Disable cached desc if NSS offload is enabled */
4105 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4106 		cached = 0;
4107 
4108 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4109 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
4110 			goto fail1;
4111 	}
4112 
4113 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
4114 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4115 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
4116 			goto fail1;
4117 
4118 		if (dp_ipa_alloc_alt_tx_ring(soc))
4119 			goto fail1;
4120 	}
4121 
4122 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4123 		/* Setup REO destination ring */
4124 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
4125 				  reo_dst_ring_size, cached)) {
4126 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
4127 			goto fail1;
4128 		}
4129 	}
4130 
4131 	if (soc->arch_ops.txrx_soc_srng_alloc) {
4132 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
4133 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
4134 				    soc);
4135 			goto fail1;
4136 		}
4137 	}
4138 
4139 	return QDF_STATUS_SUCCESS;
4140 
4141 fail1:
4142 	dp_soc_srng_free(soc);
4143 	return QDF_STATUS_E_NOMEM;
4144 }
4145 
4146 /**
4147  * dp_soc_cfg_attach() - set target specific configuration in
4148  *			 dp soc cfg.
4149  * @soc: dp soc handle
4150  */
4151 void dp_soc_cfg_attach(struct dp_soc *soc)
4152 {
4153 	int target_type;
4154 	int nss_cfg = 0;
4155 
4156 	target_type = hal_get_target_type(soc->hal_soc);
4157 	switch (target_type) {
4158 	case TARGET_TYPE_QCA6290:
4159 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4160 					       REO_DST_RING_SIZE_QCA6290);
4161 		break;
4162 	case TARGET_TYPE_QCA6390:
4163 	case TARGET_TYPE_QCA6490:
4164 	case TARGET_TYPE_QCA6750:
4165 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4166 					       REO_DST_RING_SIZE_QCA6290);
4167 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4168 		break;
4169 	case TARGET_TYPE_KIWI:
4170 	case TARGET_TYPE_MANGO:
4171 	case TARGET_TYPE_PEACH:
4172 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4173 		break;
4174 	case TARGET_TYPE_QCA8074:
4175 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4176 		break;
4177 	case TARGET_TYPE_QCA8074V2:
4178 	case TARGET_TYPE_QCA6018:
4179 	case TARGET_TYPE_QCA9574:
4180 	case TARGET_TYPE_QCN6122:
4181 	case TARGET_TYPE_QCA5018:
4182 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4183 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4184 		break;
4185 	case TARGET_TYPE_QCN9160:
4186 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4187 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4188 		break;
4189 	case TARGET_TYPE_QCN9000:
4190 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4191 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4192 		break;
4193 	case TARGET_TYPE_QCN9224:
4194 	case TARGET_TYPE_QCA5332:
4195 	case TARGET_TYPE_QCN6432:
4196 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4197 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4198 		break;
4199 	default:
4200 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
4201 		qdf_assert_always(0);
4202 		break;
4203 	}
4204 
4205 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
4206 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
4207 
4208 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
4209 
4210 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
4211 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
4212 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
4213 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
4214 		wlan_cfg_set_num_tx_spl_desc(soc->wlan_cfg_ctx, 0);
4215 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
4216 		soc->init_tcl_cmd_cred_ring = false;
4217 		soc->num_tcl_data_rings =
4218 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
4219 		soc->num_reo_dest_rings =
4220 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
4221 
4222 	} else {
4223 		soc->init_tcl_cmd_cred_ring = true;
4224 		soc->num_tx_comp_rings =
4225 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
4226 		soc->num_tcl_data_rings =
4227 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
4228 		soc->num_reo_dest_rings =
4229 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4230 	}
4231 
4232 }
4233 
4234 void dp_pdev_set_default_reo(struct dp_pdev *pdev)
4235 {
4236 	struct dp_soc *soc = pdev->soc;
4237 
4238 	switch (pdev->pdev_id) {
4239 	case 0:
4240 		pdev->reo_dest =
4241 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
4242 		break;
4243 
4244 	case 1:
4245 		pdev->reo_dest =
4246 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
4247 		break;
4248 
4249 	case 2:
4250 		pdev->reo_dest =
4251 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
4252 		break;
4253 
4254 	default:
4255 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
4256 			    soc, pdev->pdev_id);
4257 		break;
4258 	}
4259 }
4260 
4261 #ifdef WLAN_SUPPORT_DPDK
4262 void dp_soc_reset_dpdk_intr_mask(struct dp_soc *soc)
4263 {
4264 	uint8_t j;
4265 	uint8_t *grp_mask = NULL;
4266 	int group_number, mask, num_ring;
4267 
4268 	/* number of tx ring */
4269 	num_ring = soc->num_tcl_data_rings;
4270 
4271 	/*
4272 	 * group mask for tx completion  ring.
4273 	 */
4274 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4275 
4276 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4277 		/*
4278 		 * Group number corresponding to tx offloaded ring.
4279 		 */
4280 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4281 		if (group_number < 0) {
4282 			dp_init_debug("%pK: ring not part of any group; ring_type: %d, ring_num %d",
4283 				      soc, WBM2SW_RELEASE, j);
4284 			continue;
4285 		}
4286 
4287 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx,
4288 						 group_number);
4289 
4290 		/* reset the tx mask for offloaded ring */
4291 		mask &= (~(1 << j));
4292 
4293 		/*
4294 		 * reset the interrupt mask for offloaded ring.
4295 		 */
4296 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx,
4297 					  group_number, mask);
4298 	}
4299 
4300 	/* number of rx rings */
4301 	num_ring = soc->num_reo_dest_rings;
4302 
4303 	/*
4304 	 * group mask for reo destination ring.
4305 	 */
4306 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4307 
4308 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4309 		/*
4310 		 * Group number corresponding to rx offloaded ring.
4311 		 */
4312 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4313 		if (group_number < 0) {
4314 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4315 				      soc, REO_DST, j);
4316 			continue;
4317 		}
4318 
4319 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx,
4320 						  group_number);
4321 
4322 		/* reset the interrupt mask for offloaded ring */
4323 		mask &= (~(1 << j));
4324 
4325 		/*
4326 		 * set the interrupt mask to zero for rx offloaded radio.
4327 		 */
4328 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx,
4329 					  group_number, mask);
4330 	}
4331 
4332 	/*
4333 	 * group mask for Rx buffer refill ring
4334 	 */
4335 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4336 
4337 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4338 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4339 
4340 		/*
4341 		 * Group number corresponding to rx offloaded ring.
4342 		 */
4343 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4344 		if (group_number < 0) {
4345 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4346 				      soc, REO_DST, lmac_id);
4347 			continue;
4348 		}
4349 
4350 		/* set the interrupt mask for offloaded ring */
4351 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4352 							  group_number);
4353 		mask &= (~(1 << lmac_id));
4354 
4355 		/*
4356 		 * set the interrupt mask to zero for rx offloaded radio.
4357 		 */
4358 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4359 						  group_number, mask);
4360 	}
4361 
4362 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4363 
4364 	for (j = 0; j < num_ring; j++) {
4365 		/*
4366 		 * Group number corresponding to rx err ring.
4367 		 */
4368 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4369 		if (group_number < 0) {
4370 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4371 				      soc, REO_EXCEPTION, j);
4372 			continue;
4373 		}
4374 
4375 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4376 					      group_number, 0);
4377 	}
4378 }
4379 #endif
4380