xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rings_main.c (revision e11f459adedbe4ff0ee2a3365a57986d1921df18)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_rings.h"
34 #include "dp_internal.h"
35 #include "dp_tx.h"
36 #include "dp_tx_desc.h"
37 #include "dp_rx.h"
38 #ifdef DP_RATETABLE_SUPPORT
39 #include "dp_ratetable.h"
40 #endif
41 #include <cdp_txrx_handle.h>
42 #include <wlan_cfg.h>
43 #include <wlan_utility.h>
44 #include "cdp_txrx_cmn_struct.h"
45 #include "cdp_txrx_stats_struct.h"
46 #include "cdp_txrx_cmn_reg.h"
47 #include <qdf_util.h>
48 #include "dp_peer.h"
49 #include "htt_stats.h"
50 #include "dp_htt.h"
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 #include <wlan_module_ids.h>
55 
56 #ifdef WIFI_MONITOR_SUPPORT
57 #include <dp_mon.h>
58 #endif
59 
60 #ifdef WLAN_FEATURE_STATS_EXT
61 #define INIT_RX_HW_STATS_LOCK(_soc) \
62 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
63 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
64 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
65 #else
66 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
67 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
68 #endif
69 
70 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
71 #define TXCOMP_RING4_NUM 3
72 #else
73 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
74 #endif
75 
76 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
77 						uint8_t index);
78 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
79 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
80 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
81 						 uint8_t index);
82 
83 /* default_dscp_tid_map - Default DSCP-TID mapping
84  *
85  * DSCP        TID
86  * 000000      0
87  * 001000      1
88  * 010000      2
89  * 011000      3
90  * 100000      4
91  * 101000      5
92  * 110000      6
93  * 111000      7
94  */
95 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
96 	0, 0, 0, 0, 0, 0, 0, 0,
97 	1, 1, 1, 1, 1, 1, 1, 1,
98 	2, 2, 2, 2, 2, 2, 2, 2,
99 	3, 3, 3, 3, 3, 3, 3, 3,
100 	4, 4, 4, 4, 4, 4, 4, 4,
101 	5, 5, 5, 5, 5, 5, 5, 5,
102 	6, 6, 6, 6, 6, 6, 6, 6,
103 	7, 7, 7, 7, 7, 7, 7, 7,
104 };
105 
106 /* default_pcp_tid_map - Default PCP-TID mapping
107  *
108  * PCP     TID
109  * 000      0
110  * 001      1
111  * 010      2
112  * 011      3
113  * 100      4
114  * 101      5
115  * 110      6
116  * 111      7
117  */
118 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
119 	0, 1, 2, 3, 4, 5, 6, 7,
120 };
121 
122 uint8_t
123 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
124 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
125 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
126 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
127 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
128 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
129 #ifdef WLAN_TX_PKT_CAPTURE_ENH
130 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
131 #endif
132 };
133 
134 qdf_export_symbol(dp_cpu_ring_map);
135 
136 /**
137  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
138  * @soc: DP soc handle
139  * @ring_type: ring type
140  * @ring_num: ring_num
141  *
142  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
143  */
144 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
145 					    enum hal_ring_type ring_type,
146 					    int ring_num)
147 {
148 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
149 	uint8_t status = 0;
150 
151 	switch (ring_type) {
152 	case WBM2SW_RELEASE:
153 	case REO_DST:
154 	case RXDMA_BUF:
155 	case REO_EXCEPTION:
156 		status = ((nss_config) & (1 << ring_num));
157 		break;
158 	default:
159 		break;
160 	}
161 
162 	return status;
163 }
164 
165 /* MCL specific functions */
166 #if defined(DP_CON_MON)
167 
168 #ifdef DP_CON_MON_MSI_ENABLED
169 /**
170  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
171  * @soc: pointer to dp_soc handle
172  * @intr_ctx_num: interrupt context number for which mon mask is needed
173  *
174  * For MCL, monitor mode rings are being processed in timer contexts (polled).
175  * This function is returning 0, since in interrupt mode(softirq based RX),
176  * we donot want to process monitor mode rings in a softirq.
177  *
178  * So, in case packet log is enabled for SAP/STA/P2P modes,
179  * regular interrupt processing will not process monitor mode rings. It would be
180  * done in a separate timer context.
181  *
182  * Return: 0
183  */
184 static inline uint32_t
185 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
186 {
187 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
188 }
189 #else
190 /**
191  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
192  * @soc: pointer to dp_soc handle
193  * @intr_ctx_num: interrupt context number for which mon mask is needed
194  *
195  * For MCL, monitor mode rings are being processed in timer contexts (polled).
196  * This function is returning 0, since in interrupt mode(softirq based RX),
197  * we donot want to process monitor mode rings in a softirq.
198  *
199  * So, in case packet log is enabled for SAP/STA/P2P modes,
200  * regular interrupt processing will not process monitor mode rings. It would be
201  * done in a separate timer context.
202  *
203  * Return: 0
204  */
205 static inline uint32_t
206 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
207 {
208 	return 0;
209 }
210 #endif
211 
212 #else
213 
214 /**
215  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
216  * @soc: pointer to dp_soc handle
217  * @intr_ctx_num: interrupt context number for which mon mask is needed
218  *
219  * Return: mon mask value
220  */
221 static inline
222 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc,
223 						int intr_ctx_num)
224 {
225 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
226 }
227 
228 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
229 {
230 	int i;
231 
232 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
233 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
234 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
235 	}
236 }
237 
238 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
239 
240 void dp_service_lmac_rings(void *arg)
241 {
242 	struct dp_soc *soc = (struct dp_soc *)arg;
243 	int ring = 0, i;
244 	struct dp_pdev *pdev = NULL;
245 	union dp_rx_desc_list_elem_t *desc_list = NULL;
246 	union dp_rx_desc_list_elem_t *tail = NULL;
247 
248 	/* Process LMAC interrupts */
249 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
250 		int mac_for_pdev = ring;
251 		struct dp_srng *rx_refill_buf_ring;
252 
253 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
254 		if (!pdev)
255 			continue;
256 
257 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
258 
259 		dp_monitor_process(soc, NULL, mac_for_pdev,
260 				   QCA_NAPI_BUDGET);
261 
262 		for (i = 0;
263 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
264 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
265 					     mac_for_pdev,
266 					     QCA_NAPI_BUDGET);
267 
268 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
269 						  mac_for_pdev))
270 			dp_rx_buffers_replenish(soc, mac_for_pdev,
271 						rx_refill_buf_ring,
272 						&soc->rx_desc_buf[mac_for_pdev],
273 						0, &desc_list, &tail, false);
274 	}
275 
276 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
277 }
278 
279 #endif
280 
281 /**
282  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
283  * @ring_num: ring num of the ring being queried
284  * @grp_mask: the grp_mask array for the ring type in question.
285  *
286  * The grp_mask array is indexed by group number and the bit fields correspond
287  * to ring numbers.  We are finding which interrupt group a ring belongs to.
288  *
289  * Return: the index in the grp_mask array with the ring number.
290  * -QDF_STATUS_E_NOENT if no entry is found
291  */
292 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
293 {
294 	int ext_group_num;
295 	uint8_t mask = 1 << ring_num;
296 
297 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
298 	     ext_group_num++) {
299 		if (mask & grp_mask[ext_group_num])
300 			return ext_group_num;
301 	}
302 
303 	return -QDF_STATUS_E_NOENT;
304 }
305 
306 /**
307  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
308  * @soc: dp_soc
309  * @msi_group_number: MSI group number.
310  * @msi_data_count: MSI data count.
311  *
312  * Return: true if msi_group_number is invalid.
313  */
314 static bool dp_is_msi_group_number_invalid(struct dp_soc *soc,
315 					   int msi_group_number,
316 					   int msi_data_count)
317 {
318 	if (soc && soc->osdev && soc->osdev->dev &&
319 	    pld_is_one_msi(soc->osdev->dev))
320 		return false;
321 
322 	return msi_group_number > msi_data_count;
323 }
324 
325 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
326 /**
327  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
328  *				rx_near_full_grp1 mask
329  * @soc: Datapath SoC Handle
330  * @ring_num: REO ring number
331  *
332  * Return: 1 if the ring_num belongs to reo_nf_grp1,
333  *	   0, otherwise.
334  */
335 static inline int
336 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
337 {
338 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
339 }
340 
341 /**
342  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
343  *				rx_near_full_grp2 mask
344  * @soc: Datapath SoC Handle
345  * @ring_num: REO ring number
346  *
347  * Return: 1 if the ring_num belongs to reo_nf_grp2,
348  *	   0, otherwise.
349  */
350 static inline int
351 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
352 {
353 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
354 }
355 
356 /**
357  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
358  *				ring type and number
359  * @soc: Datapath SoC handle
360  * @ring_type: SRNG type
361  * @ring_num: ring num
362  *
363  * Return: near-full irq mask pointer
364  */
365 static inline
366 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
367 					enum hal_ring_type ring_type,
368 					int ring_num)
369 {
370 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
371 	uint8_t wbm2_sw_rx_rel_ring_id;
372 	uint8_t *nf_irq_mask = NULL;
373 
374 	switch (ring_type) {
375 	case WBM2SW_RELEASE:
376 		wbm2_sw_rx_rel_ring_id =
377 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
378 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
379 			nf_irq_mask = &soc->wlan_cfg_ctx->
380 					int_tx_ring_near_full_irq_mask[0];
381 		}
382 		break;
383 	case REO_DST:
384 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
385 			nf_irq_mask =
386 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
387 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
388 			nf_irq_mask =
389 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
390 		else
391 			qdf_assert(0);
392 		break;
393 	default:
394 		break;
395 	}
396 
397 	return nf_irq_mask;
398 }
399 
400 /**
401  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
402  * @soc: Datapath SoC handle
403  * @ring_params: srng params handle
404  * @msi2_addr: MSI2 addr to be set for the SRNG
405  * @msi2_data: MSI2 data to be set for the SRNG
406  *
407  * Return: None
408  */
409 static inline
410 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
411 				  struct hal_srng_params *ring_params,
412 				  qdf_dma_addr_t msi2_addr,
413 				  uint32_t msi2_data)
414 {
415 	ring_params->msi2_addr = msi2_addr;
416 	ring_params->msi2_data = msi2_data;
417 }
418 
419 /**
420  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
421  * @soc: Datapath SoC handle
422  * @ring_params: ring_params for SRNG
423  * @ring_type: SENG type
424  * @ring_num: ring number for the SRNG
425  * @nf_msi_grp_num: near full msi group number
426  *
427  * Return: None
428  */
429 static inline void
430 dp_srng_msi2_setup(struct dp_soc *soc,
431 		   struct hal_srng_params *ring_params,
432 		   int ring_type, int ring_num, int nf_msi_grp_num)
433 {
434 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
435 	int msi_data_count, ret;
436 
437 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
438 					  &msi_data_count, &msi_data_start,
439 					  &msi_irq_start);
440 	if (ret)
441 		return;
442 
443 	if (nf_msi_grp_num < 0) {
444 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
445 			     soc, ring_type, ring_num);
446 		ring_params->msi2_addr = 0;
447 		ring_params->msi2_data = 0;
448 		return;
449 	}
450 
451 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
452 					   msi_data_count)) {
453 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
454 			     soc, nf_msi_grp_num);
455 		QDF_ASSERT(0);
456 	}
457 
458 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
459 
460 	ring_params->nf_irq_support = 1;
461 	ring_params->msi2_addr = addr_low;
462 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
463 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
464 		+ msi_data_start;
465 	ring_params->flags |= HAL_SRNG_MSI_INTR;
466 }
467 
468 /* Percentage of ring entries considered as nearly full */
469 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
470 /* Percentage of ring entries considered as critically full */
471 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
472 /* Percentage of ring entries considered as safe threshold */
473 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
474 
475 /**
476  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
477  *			near full irq
478  * @soc: Datapath SoC handle
479  * @ring_params: ring params for SRNG
480  * @ring_type: ring type
481  */
482 static inline void
483 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
484 					  struct hal_srng_params *ring_params,
485 					  int ring_type)
486 {
487 	if (ring_params->nf_irq_support) {
488 		ring_params->high_thresh = (ring_params->num_entries *
489 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
490 		ring_params->crit_thresh = (ring_params->num_entries *
491 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
492 		ring_params->safe_thresh = (ring_params->num_entries *
493 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
494 	}
495 }
496 
497 /**
498  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
499  *			structure from the ring params
500  * @soc: Datapath SoC handle
501  * @srng: SRNG handle
502  * @ring_params: ring params for a SRNG
503  *
504  * Return: None
505  */
506 static inline void
507 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
508 			  struct hal_srng_params *ring_params)
509 {
510 	srng->crit_thresh = ring_params->crit_thresh;
511 	srng->safe_thresh = ring_params->safe_thresh;
512 }
513 
514 #else
515 static inline
516 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
517 					enum hal_ring_type ring_type,
518 					int ring_num)
519 {
520 	return NULL;
521 }
522 
523 static inline
524 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
525 				  struct hal_srng_params *ring_params,
526 				  qdf_dma_addr_t msi2_addr,
527 				  uint32_t msi2_data)
528 {
529 }
530 
531 static inline void
532 dp_srng_msi2_setup(struct dp_soc *soc,
533 		   struct hal_srng_params *ring_params,
534 		   int ring_type, int ring_num, int nf_msi_grp_num)
535 {
536 }
537 
538 static inline void
539 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
540 					  struct hal_srng_params *ring_params,
541 					  int ring_type)
542 {
543 }
544 
545 static inline void
546 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
547 			  struct hal_srng_params *ring_params)
548 {
549 }
550 #endif
551 
552 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
553 				       enum hal_ring_type ring_type,
554 				       int ring_num,
555 				       int *reg_msi_grp_num,
556 				       bool nf_irq_support,
557 				       int *nf_msi_grp_num)
558 {
559 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
560 	uint8_t *grp_mask, *nf_irq_mask = NULL;
561 	bool nf_irq_enabled = false;
562 	uint8_t wbm2_sw_rx_rel_ring_id;
563 
564 	switch (ring_type) {
565 	case WBM2SW_RELEASE:
566 		wbm2_sw_rx_rel_ring_id =
567 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
568 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
569 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
570 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
571 			ring_num = 0;
572 		} else if (ring_num == WBM2_SW_PPE_REL_RING_ID) {
573 			grp_mask = &cfg_ctx->int_ppeds_wbm_release_ring_mask[0];
574 			ring_num = 0;
575 		}  else { /* dp_tx_comp_handler - soc->tx_comp_ring */
576 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
577 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
578 								     ring_type,
579 								     ring_num);
580 			if (nf_irq_mask)
581 				nf_irq_enabled = true;
582 
583 			/*
584 			 * Using ring 4 as 4th tx completion ring since ring 3
585 			 * is Rx error ring
586 			 */
587 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
588 				ring_num = TXCOMP_RING4_NUM;
589 		}
590 	break;
591 
592 	case REO_EXCEPTION:
593 		/* dp_rx_err_process - &soc->reo_exception_ring */
594 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
595 	break;
596 
597 	case REO_DST:
598 		/* dp_rx_process - soc->reo_dest_ring */
599 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
600 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
601 							     ring_num);
602 		if (nf_irq_mask)
603 			nf_irq_enabled = true;
604 	break;
605 
606 	case REO_STATUS:
607 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
608 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
609 	break;
610 
611 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
612 	case RXDMA_MONITOR_STATUS:
613 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
614 	case RXDMA_MONITOR_DST:
615 		/* dp_mon_process */
616 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
617 	break;
618 	case TX_MONITOR_DST:
619 		/* dp_tx_mon_process */
620 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
621 	break;
622 	case RXDMA_DST:
623 		/* dp_rxdma_err_process */
624 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
625 	break;
626 
627 	case RXDMA_BUF:
628 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
629 	break;
630 
631 	case RXDMA_MONITOR_BUF:
632 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
633 	break;
634 
635 	case TX_MONITOR_BUF:
636 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
637 	break;
638 
639 	case REO2PPE:
640 		grp_mask = &soc->wlan_cfg_ctx->int_reo2ppe_ring_mask[0];
641 	break;
642 
643 	case PPE2TCL:
644 		grp_mask = &soc->wlan_cfg_ctx->int_ppe2tcl_ring_mask[0];
645 	break;
646 
647 	case TCL_DATA:
648 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
649 	case TCL_CMD_CREDIT:
650 	case REO_CMD:
651 	case SW2WBM_RELEASE:
652 	case WBM_IDLE_LINK:
653 		/* normally empty SW_TO_HW rings */
654 		return -QDF_STATUS_E_NOENT;
655 	break;
656 
657 	case TCL_STATUS:
658 	case REO_REINJECT:
659 		/* misc unused rings */
660 		return -QDF_STATUS_E_NOENT;
661 	break;
662 
663 	case CE_SRC:
664 	case CE_DST:
665 	case CE_DST_STATUS:
666 		/* CE_rings - currently handled by hif */
667 	default:
668 		return -QDF_STATUS_E_NOENT;
669 	break;
670 	}
671 
672 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
673 
674 	if (nf_irq_support && nf_irq_enabled) {
675 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
676 							    nf_irq_mask);
677 	}
678 
679 	return QDF_STATUS_SUCCESS;
680 }
681 
682 /**
683  * dp_get_num_msi_available()- API to get number of MSIs available
684  * @soc: DP soc Handle
685  * @interrupt_mode: Mode of interrupts
686  *
687  * Return: Number of MSIs available or 0 in case of integrated
688  */
689 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
690 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
691 {
692 	return 0;
693 }
694 #else
695 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
696 {
697 	int msi_data_count;
698 	int msi_data_start;
699 	int msi_irq_start;
700 	int ret;
701 
702 	if (interrupt_mode == DP_INTR_INTEGRATED) {
703 		return 0;
704 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
705 		   DP_INTR_POLL) {
706 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
707 						  &msi_data_count,
708 						  &msi_data_start,
709 						  &msi_irq_start);
710 		if (ret) {
711 			qdf_err("Unable to get DP MSI assignment %d",
712 				interrupt_mode);
713 			return -EINVAL;
714 		}
715 		return msi_data_count;
716 	}
717 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
718 	return -EINVAL;
719 }
720 #endif
721 
722 #if defined(IPA_OFFLOAD) && defined(IPA_WDI3_VLAN_SUPPORT)
723 static void
724 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
725 			   int ring_num)
726 {
727 	if (wlan_ipa_is_vlan_enabled()) {
728 		if ((ring_type == REO_DST) &&
729 				(ring_num == IPA_ALT_REO_DEST_RING_IDX)) {
730 			ring_params->msi_addr = 0;
731 			ring_params->msi_data = 0;
732 			ring_params->flags &= ~HAL_SRNG_MSI_INTR;
733 		}
734 	}
735 }
736 #else
737 static inline void
738 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
739 			   int ring_num)
740 {
741 }
742 #endif
743 
744 static void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng,
745 			      struct hal_srng_params *ring_params,
746 			      int ring_type, int ring_num)
747 {
748 	int reg_msi_grp_num;
749 	/*
750 	 * nf_msi_grp_num needs to be initialized with negative value,
751 	 * to avoid configuring near-full msi for WBM2SW3 ring
752 	 */
753 	int nf_msi_grp_num = -1;
754 	int msi_data_count;
755 	int ret;
756 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
757 	bool nf_irq_support;
758 	int vector;
759 
760 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
761 					  &msi_data_count, &msi_data_start,
762 					  &msi_irq_start);
763 
764 	if (ret)
765 		return;
766 
767 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
768 							     ring_type,
769 							     ring_num);
770 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
771 					  &reg_msi_grp_num,
772 					  nf_irq_support,
773 					  &nf_msi_grp_num);
774 	if (ret < 0) {
775 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
776 			     soc, ring_type, ring_num);
777 		ring_params->msi_addr = 0;
778 		ring_params->msi_data = 0;
779 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
780 		return;
781 	}
782 
783 	if (reg_msi_grp_num < 0) {
784 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
785 			     soc, ring_type, ring_num);
786 		ring_params->msi_addr = 0;
787 		ring_params->msi_data = 0;
788 		goto configure_msi2;
789 	}
790 
791 	if (dp_is_msi_group_number_invalid(soc, reg_msi_grp_num,
792 					   msi_data_count)) {
793 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
794 			     soc, reg_msi_grp_num);
795 		QDF_ASSERT(0);
796 	}
797 
798 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
799 
800 	ring_params->msi_addr = addr_low;
801 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
802 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
803 		+ msi_data_start;
804 	ring_params->flags |= HAL_SRNG_MSI_INTR;
805 
806 	dp_ipa_vlan_srng_msi_setup(ring_params, ring_type, ring_num);
807 
808 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
809 		 ring_type, ring_num, ring_params->msi_data,
810 		 (uint64_t)ring_params->msi_addr);
811 
812 	vector = msi_irq_start + (reg_msi_grp_num % msi_data_count);
813 
814 	/*
815 	 * During umac reset ppeds interrupts free is not called.
816 	 * Avoid registering interrupts again.
817 	 *
818 	 */
819 	if (dp_check_umac_reset_in_progress(soc))
820 		goto configure_msi2;
821 
822 	if (soc->arch_ops.dp_register_ppeds_interrupts)
823 		if (soc->arch_ops.dp_register_ppeds_interrupts(soc, srng,
824 							       vector,
825 							       ring_type,
826 							       ring_num))
827 			return;
828 
829 configure_msi2:
830 	if (!nf_irq_support) {
831 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
832 		return;
833 	}
834 
835 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
836 			   nf_msi_grp_num);
837 }
838 
839 /**
840  * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
841  * update threshold value from wlan_cfg_ctx
842  * @soc: device handle
843  * @ring_params: per ring specific parameters
844  * @ring_type: Ring type
845  * @ring_num: Ring number for a given ring type
846  * @num_entries: number of entries to fill
847  *
848  * Fill the ring params with the pointer update threshold
849  * configuration parameters available in wlan_cfg_ctx
850  *
851  * Return: None
852  */
853 static void
854 dp_srng_configure_pointer_update_thresholds(
855 				struct dp_soc *soc,
856 				struct hal_srng_params *ring_params,
857 				int ring_type, int ring_num,
858 				int num_entries)
859 {
860 	if (ring_type == REO_DST) {
861 		ring_params->pointer_timer_threshold =
862 			wlan_cfg_get_pointer_timer_threshold_rx(
863 						soc->wlan_cfg_ctx);
864 		ring_params->pointer_num_threshold =
865 			wlan_cfg_get_pointer_num_threshold_rx(
866 						soc->wlan_cfg_ctx);
867 	}
868 }
869 
870 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
871 /**
872  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
873  * threshold values from the wlan_srng_cfg table for each ring type
874  * @soc: device handle
875  * @ring_params: per ring specific parameters
876  * @ring_type: Ring type
877  * @ring_num: Ring number for a given ring type
878  * @num_entries: number of entries to fill
879  *
880  * Fill the ring params with the interrupt threshold
881  * configuration parameters available in the per ring type wlan_srng_cfg
882  * table.
883  *
884  * Return: None
885  */
886 static void
887 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
888 				       struct hal_srng_params *ring_params,
889 				       int ring_type, int ring_num,
890 				       int num_entries)
891 {
892 	uint8_t wbm2_sw_rx_rel_ring_id;
893 
894 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
895 
896 	if (ring_type == REO_DST) {
897 		ring_params->intr_timer_thres_us =
898 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
899 		ring_params->intr_batch_cntr_thres_entries =
900 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
901 	} else if (ring_type == WBM2SW_RELEASE &&
902 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
903 		ring_params->intr_timer_thres_us =
904 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
905 		ring_params->intr_batch_cntr_thres_entries =
906 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
907 	} else {
908 		ring_params->intr_timer_thres_us =
909 				soc->wlan_srng_cfg[ring_type].timer_threshold;
910 		ring_params->intr_batch_cntr_thres_entries =
911 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
912 	}
913 	ring_params->low_threshold =
914 			soc->wlan_srng_cfg[ring_type].low_threshold;
915 	if (ring_params->low_threshold)
916 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
917 
918 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
919 }
920 #else
921 static void
922 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
923 				       struct hal_srng_params *ring_params,
924 				       int ring_type, int ring_num,
925 				       int num_entries)
926 {
927 	uint8_t wbm2_sw_rx_rel_ring_id;
928 	bool rx_refill_lt_disable;
929 
930 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
931 
932 	if (ring_type == REO_DST || ring_type == REO2PPE) {
933 		ring_params->intr_timer_thres_us =
934 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
935 		ring_params->intr_batch_cntr_thres_entries =
936 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
937 	} else if (ring_type == WBM2SW_RELEASE &&
938 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
939 		   ring_num == WBM2SW_TXCOMP_RING4_NUM ||
940 		   ring_num == WBM2_SW_PPE_REL_RING_ID)) {
941 		ring_params->intr_timer_thres_us =
942 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
943 		ring_params->intr_batch_cntr_thres_entries =
944 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
945 	} else if (ring_type == RXDMA_BUF) {
946 		rx_refill_lt_disable =
947 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
948 							(soc->wlan_cfg_ctx);
949 		ring_params->intr_timer_thres_us =
950 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
951 
952 		if (!rx_refill_lt_disable) {
953 			ring_params->low_threshold = num_entries >> 3;
954 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
955 			ring_params->intr_batch_cntr_thres_entries = 0;
956 		}
957 	} else {
958 		ring_params->intr_timer_thres_us =
959 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
960 		ring_params->intr_batch_cntr_thres_entries =
961 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
962 	}
963 
964 	/* These rings donot require interrupt to host. Make them zero */
965 	switch (ring_type) {
966 	case REO_REINJECT:
967 	case REO_CMD:
968 	case TCL_DATA:
969 	case TCL_CMD_CREDIT:
970 	case TCL_STATUS:
971 	case WBM_IDLE_LINK:
972 	case SW2WBM_RELEASE:
973 	case SW2RXDMA_NEW:
974 		ring_params->intr_timer_thres_us = 0;
975 		ring_params->intr_batch_cntr_thres_entries = 0;
976 		break;
977 	case PPE2TCL:
978 		ring_params->intr_timer_thres_us =
979 			wlan_cfg_get_int_timer_threshold_ppe2tcl(soc->wlan_cfg_ctx);
980 		ring_params->intr_batch_cntr_thres_entries =
981 			wlan_cfg_get_int_batch_threshold_ppe2tcl(soc->wlan_cfg_ctx);
982 		break;
983 	}
984 
985 	/* Enable low threshold interrupts for rx buffer rings (regular and
986 	 * monitor buffer rings.
987 	 * TODO: See if this is required for any other ring
988 	 */
989 	if ((ring_type == RXDMA_MONITOR_BUF) ||
990 	    (ring_type == RXDMA_MONITOR_STATUS ||
991 	    (ring_type == TX_MONITOR_BUF))) {
992 		/* TODO: Setting low threshold to 1/8th of ring size
993 		 * see if this needs to be configurable
994 		 */
995 		ring_params->low_threshold = num_entries >> 3;
996 		ring_params->intr_timer_thres_us =
997 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
998 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
999 		ring_params->intr_batch_cntr_thres_entries = 0;
1000 	}
1001 
1002 	/* During initialisation monitor rings are only filled with
1003 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1004 	 * a value less than that. Low threshold value is reconfigured again
1005 	 * to 1/8th of the ring size when monitor vap is created.
1006 	 */
1007 	if (ring_type == RXDMA_MONITOR_BUF)
1008 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1009 
1010 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1011 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1012 	 * Keep batch threshold as 8 so that interrupt is received for
1013 	 * every 4 packets in MONITOR_STATUS ring
1014 	 */
1015 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1016 	    (soc->intr_mode == DP_INTR_MSI))
1017 		ring_params->intr_batch_cntr_thres_entries = 4;
1018 }
1019 #endif
1020 
1021 #ifdef DISABLE_MON_RING_MSI_CFG
1022 /**
1023  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
1024  * @soc: DP SoC context
1025  * @ring_type: sring type
1026  *
1027  * Return: True if msi cfg should be skipped for srng type else false
1028  */
1029 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1030 {
1031 	if (ring_type == RXDMA_MONITOR_STATUS)
1032 		return true;
1033 
1034 	return false;
1035 }
1036 #else
1037 #ifdef DP_CON_MON_MSI_ENABLED
1038 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1039 {
1040 	if (soc->cdp_soc.ol_ops->get_con_mode &&
1041 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
1042 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
1043 			return true;
1044 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
1045 		return true;
1046 	}
1047 
1048 	return false;
1049 }
1050 #else
1051 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1052 {
1053 	return false;
1054 }
1055 #endif /* DP_CON_MON_MSI_ENABLED */
1056 #endif /* DISABLE_MON_RING_MSI_CFG */
1057 
1058 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
1059 			    int ring_type, int ring_num, int mac_id,
1060 			    uint32_t idx)
1061 {
1062 	bool idle_check;
1063 
1064 	hal_soc_handle_t hal_soc = soc->hal_soc;
1065 	struct hal_srng_params ring_params;
1066 
1067 	if (srng->hal_srng) {
1068 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
1069 			    soc, ring_type, ring_num);
1070 		return QDF_STATUS_SUCCESS;
1071 	}
1072 
1073 	/* memset the srng ring to zero */
1074 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1075 
1076 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
1077 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1078 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1079 
1080 	ring_params.num_entries = srng->num_entries;
1081 
1082 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1083 		ring_type, ring_num,
1084 		(void *)ring_params.ring_base_vaddr,
1085 		(void *)ring_params.ring_base_paddr,
1086 		ring_params.num_entries);
1087 
1088 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
1089 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
1090 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1091 				 ring_type, ring_num);
1092 	} else {
1093 		ring_params.msi_data = 0;
1094 		ring_params.msi_addr = 0;
1095 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
1096 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1097 				 ring_type, ring_num);
1098 	}
1099 
1100 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1101 					       ring_type, ring_num,
1102 					       srng->num_entries);
1103 
1104 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
1105 	dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
1106 						    ring_type, ring_num,
1107 						    srng->num_entries);
1108 
1109 	if (srng->cached)
1110 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1111 
1112 	idle_check = dp_check_umac_reset_in_progress(soc);
1113 
1114 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
1115 					    mac_id, &ring_params, idle_check,
1116 					    idx);
1117 
1118 	if (!srng->hal_srng) {
1119 		dp_srng_free(soc, srng);
1120 		return QDF_STATUS_E_FAILURE;
1121 	}
1122 
1123 	return QDF_STATUS_SUCCESS;
1124 }
1125 
1126 qdf_export_symbol(dp_srng_init_idx);
1127 
1128 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
1129 				     struct dp_intr *int_ctx,
1130 				     int mac_for_pdev,
1131 				     int total_budget)
1132 {
1133 	uint32_t target_type;
1134 
1135 	target_type = hal_get_target_type(soc->hal_soc);
1136 	if (target_type == TARGET_TYPE_QCN9160)
1137 		return dp_monitor_process(soc, int_ctx,
1138 					  mac_for_pdev, total_budget);
1139 	else
1140 		return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
1141 					    total_budget);
1142 }
1143 
1144 /**
1145  * dp_process_lmac_rings() - Process LMAC rings
1146  * @int_ctx: interrupt context
1147  * @total_budget: budget of work which can be done
1148  *
1149  * Return: work done
1150  */
1151 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
1152 {
1153 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1154 	struct dp_soc *soc = int_ctx->soc;
1155 	uint32_t remaining_quota = total_budget;
1156 	struct dp_pdev *pdev = NULL;
1157 	uint32_t work_done  = 0;
1158 	int budget = total_budget;
1159 	int ring = 0;
1160 	bool rx_refill_lt_disable;
1161 
1162 	rx_refill_lt_disable =
1163 		wlan_cfg_get_dp_soc_rxdma_refill_lt_disable(soc->wlan_cfg_ctx);
1164 
1165 	/* Process LMAC interrupts */
1166 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1167 		int mac_for_pdev = ring;
1168 
1169 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1170 		if (!pdev)
1171 			continue;
1172 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1173 			work_done = dp_monitor_process(soc, int_ctx,
1174 						       mac_for_pdev,
1175 						       remaining_quota);
1176 			if (work_done)
1177 				intr_stats->num_rx_mon_ring_masks++;
1178 			budget -= work_done;
1179 			if (budget <= 0)
1180 				goto budget_done;
1181 			remaining_quota = budget;
1182 		}
1183 
1184 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
1185 			work_done = dp_tx_mon_process(soc, int_ctx,
1186 						      mac_for_pdev,
1187 						      remaining_quota);
1188 			if (work_done)
1189 				intr_stats->num_tx_mon_ring_masks++;
1190 			budget -= work_done;
1191 			if (budget <= 0)
1192 				goto budget_done;
1193 			remaining_quota = budget;
1194 		}
1195 
1196 		if (int_ctx->rxdma2host_ring_mask &
1197 				(1 << mac_for_pdev)) {
1198 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
1199 							      mac_for_pdev,
1200 							      remaining_quota);
1201 			if (work_done)
1202 				intr_stats->num_rxdma2host_ring_masks++;
1203 			budget -=  work_done;
1204 			if (budget <= 0)
1205 				goto budget_done;
1206 			remaining_quota = budget;
1207 		}
1208 
1209 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
1210 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1211 			union dp_rx_desc_list_elem_t *tail = NULL;
1212 			struct dp_srng *rx_refill_buf_ring;
1213 			struct rx_desc_pool *rx_desc_pool;
1214 
1215 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
1216 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1217 				rx_refill_buf_ring =
1218 					&soc->rx_refill_buf_ring[mac_for_pdev];
1219 			else
1220 				rx_refill_buf_ring =
1221 					&soc->rx_refill_buf_ring[pdev->lmac_id];
1222 
1223 			intr_stats->num_host2rxdma_ring_masks++;
1224 
1225 			if (!rx_refill_lt_disable)
1226 				dp_rx_buffers_lt_replenish_simple(soc,
1227 							  mac_for_pdev,
1228 							  rx_refill_buf_ring,
1229 							  rx_desc_pool,
1230 							  0,
1231 							  &desc_list,
1232 							  &tail);
1233 		}
1234 	}
1235 
1236 	if (int_ctx->host2rxdma_mon_ring_mask)
1237 		dp_rx_mon_buf_refill(int_ctx);
1238 
1239 	if (int_ctx->host2txmon_ring_mask)
1240 		dp_tx_mon_buf_refill(int_ctx);
1241 
1242 budget_done:
1243 	return total_budget - budget;
1244 }
1245 
1246 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1247 /**
1248  * dp_service_near_full_srngs() - Bottom half handler to process the near
1249  *				full IRQ on a SRNG
1250  * @dp_ctx: Datapath SoC handle
1251  * @dp_budget: Number of SRNGs which can be processed in a single attempt
1252  *		without rescheduling
1253  * @cpu: cpu id
1254  *
1255  * Return: remaining budget/quota for the soc device
1256  */
1257 static
1258 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1259 {
1260 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1261 	struct dp_soc *soc = int_ctx->soc;
1262 
1263 	/*
1264 	 * dp_service_near_full_srngs arch ops should be initialized always
1265 	 * if the NEAR FULL IRQ feature is enabled.
1266 	 */
1267 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
1268 							dp_budget);
1269 }
1270 #endif
1271 
1272 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1273 
1274 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1275 {
1276 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1277 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1278 	struct dp_soc *soc = int_ctx->soc;
1279 	int ring = 0;
1280 	int index;
1281 	uint32_t work_done  = 0;
1282 	int budget = dp_budget;
1283 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1284 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1285 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1286 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1287 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1288 	uint32_t remaining_quota = dp_budget;
1289 
1290 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
1291 
1292 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1293 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1294 			 reo_status_mask,
1295 			 int_ctx->rx_mon_ring_mask,
1296 			 int_ctx->host2rxdma_ring_mask,
1297 			 int_ctx->rxdma2host_ring_mask);
1298 
1299 	/* Process Tx completion interrupts first to return back buffers */
1300 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
1301 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
1302 			continue;
1303 		work_done = dp_tx_comp_handler(int_ctx,
1304 					       soc,
1305 					       soc->tx_comp_ring[index].hal_srng,
1306 					       index, remaining_quota);
1307 		if (work_done) {
1308 			intr_stats->num_tx_ring_masks[index]++;
1309 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
1310 					 tx_mask, index, budget,
1311 					 work_done);
1312 		}
1313 		budget -= work_done;
1314 		if (budget <= 0)
1315 			goto budget_done;
1316 
1317 		remaining_quota = budget;
1318 	}
1319 
1320 	/* Process REO Exception ring interrupt */
1321 	if (rx_err_mask) {
1322 		work_done = dp_rx_err_process(int_ctx, soc,
1323 					      soc->reo_exception_ring.hal_srng,
1324 					      remaining_quota);
1325 
1326 		if (work_done) {
1327 			intr_stats->num_rx_err_ring_masks++;
1328 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1329 					 work_done, budget);
1330 		}
1331 
1332 		budget -=  work_done;
1333 		if (budget <= 0) {
1334 			goto budget_done;
1335 		}
1336 		remaining_quota = budget;
1337 	}
1338 
1339 	/* Process Rx WBM release ring interrupt */
1340 	if (rx_wbm_rel_mask) {
1341 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1342 						  soc->rx_rel_ring.hal_srng,
1343 						  remaining_quota);
1344 
1345 		if (work_done) {
1346 			intr_stats->num_rx_wbm_rel_ring_masks++;
1347 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1348 					 work_done, budget);
1349 		}
1350 
1351 		budget -=  work_done;
1352 		if (budget <= 0) {
1353 			goto budget_done;
1354 		}
1355 		remaining_quota = budget;
1356 	}
1357 
1358 	/* Process Rx interrupts */
1359 	if (rx_mask) {
1360 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1361 			if (!(rx_mask & (1 << ring)))
1362 				continue;
1363 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
1364 						  soc->reo_dest_ring[ring].hal_srng,
1365 						  ring,
1366 						  remaining_quota);
1367 			if (work_done) {
1368 				intr_stats->num_rx_ring_masks[ring]++;
1369 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1370 						 rx_mask, ring,
1371 						 work_done, budget);
1372 				budget -=  work_done;
1373 				if (budget <= 0)
1374 					goto budget_done;
1375 				remaining_quota = budget;
1376 			}
1377 		}
1378 	}
1379 
1380 	if (reo_status_mask) {
1381 		if (dp_reo_status_ring_handler(int_ctx, soc))
1382 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1383 	}
1384 
1385 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
1386 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1387 		if (work_done) {
1388 			budget -=  work_done;
1389 			if (budget <= 0)
1390 				goto budget_done;
1391 			remaining_quota = budget;
1392 		}
1393 	}
1394 
1395 	qdf_lro_flush(int_ctx->lro_ctx);
1396 	intr_stats->num_masks++;
1397 
1398 budget_done:
1399 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
1400 
1401 	if (soc->notify_fw_callback)
1402 		soc->notify_fw_callback(soc);
1403 
1404 	return dp_budget - budget;
1405 }
1406 
1407 #else /* QCA_HOST_MODE_WIFI_DISABLED */
1408 
1409 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1410 {
1411 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1412 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1413 	struct dp_soc *soc = int_ctx->soc;
1414 	uint32_t remaining_quota = dp_budget;
1415 	uint32_t work_done  = 0;
1416 	int budget = dp_budget;
1417 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1418 
1419 	if (reo_status_mask) {
1420 		if (dp_reo_status_ring_handler(int_ctx, soc))
1421 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1422 	}
1423 
1424 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
1425 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1426 		if (work_done) {
1427 			budget -=  work_done;
1428 			if (budget <= 0)
1429 				goto budget_done;
1430 			remaining_quota = budget;
1431 		}
1432 	}
1433 
1434 	qdf_lro_flush(int_ctx->lro_ctx);
1435 	intr_stats->num_masks++;
1436 
1437 budget_done:
1438 	return dp_budget - budget;
1439 }
1440 
1441 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1442 
1443 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1444 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1445 					struct dp_intr *intr_ctx)
1446 {
1447 	if (intr_ctx->rx_mon_ring_mask)
1448 		return true;
1449 
1450 	return false;
1451 }
1452 #else
1453 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1454 					struct dp_intr *intr_ctx)
1455 {
1456 	return false;
1457 }
1458 #endif
1459 
1460 QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1461 {
1462 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1463 	int i;
1464 	int lmac_id = 0;
1465 
1466 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1467 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
1468 	soc->intr_mode = DP_INTR_POLL;
1469 
1470 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1471 		soc->intr_ctx[i].dp_intr_id = i;
1472 		soc->intr_ctx[i].tx_ring_mask =
1473 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1474 		soc->intr_ctx[i].rx_ring_mask =
1475 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1476 		soc->intr_ctx[i].rx_mon_ring_mask =
1477 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1478 		soc->intr_ctx[i].rx_err_ring_mask =
1479 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1480 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1481 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1482 		soc->intr_ctx[i].reo_status_ring_mask =
1483 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1484 		soc->intr_ctx[i].rxdma2host_ring_mask =
1485 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1486 		soc->intr_ctx[i].soc = soc;
1487 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1488 
1489 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
1490 			hif_event_history_init(soc->hif_handle, i);
1491 			soc->mon_intr_id_lmac_map[lmac_id] = i;
1492 			lmac_id++;
1493 		}
1494 	}
1495 
1496 	qdf_timer_init(soc->osdev, &soc->int_timer,
1497 		       dp_interrupt_timer, (void *)soc,
1498 		       QDF_TIMER_TYPE_WAKE_APPS);
1499 
1500 	return QDF_STATUS_SUCCESS;
1501 }
1502 
1503 void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1504 {
1505 	uint32_t msi_base_data, msi_vector_start;
1506 	int msi_vector_count, ret;
1507 
1508 	soc->intr_mode = DP_INTR_INTEGRATED;
1509 
1510 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1511 	    (dp_is_monitor_mode_using_poll(soc) &&
1512 	     soc->cdp_soc.ol_ops->get_con_mode &&
1513 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1514 		soc->intr_mode = DP_INTR_POLL;
1515 	} else {
1516 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1517 						  &msi_vector_count,
1518 						  &msi_base_data,
1519 						  &msi_vector_start);
1520 		if (ret)
1521 			return;
1522 
1523 		soc->intr_mode = DP_INTR_MSI;
1524 	}
1525 }
1526 
1527 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
1528 /**
1529  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy() -
1530  * Calculate interrupt map for legacy interrupts
1531  * @soc: DP soc handle
1532  * @intr_ctx_num: Interrupt context number
1533  * @irq_id_map: IRQ map
1534  * @num_irq_r: Number of interrupts assigned for this context
1535  *
1536  * Return: void
1537  */
1538 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
1539 							    int intr_ctx_num,
1540 							    int *irq_id_map,
1541 							    int *num_irq_r)
1542 {
1543 	int j;
1544 	int num_irq = 0;
1545 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1546 					soc->wlan_cfg_ctx, intr_ctx_num);
1547 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1548 					soc->wlan_cfg_ctx, intr_ctx_num);
1549 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1550 					soc->wlan_cfg_ctx, intr_ctx_num);
1551 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1552 					soc->wlan_cfg_ctx, intr_ctx_num);
1553 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1554 					soc->wlan_cfg_ctx, intr_ctx_num);
1555 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1556 					soc->wlan_cfg_ctx, intr_ctx_num);
1557 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1558 					soc->wlan_cfg_ctx, intr_ctx_num);
1559 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1560 					soc->wlan_cfg_ctx, intr_ctx_num);
1561 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1562 					soc->wlan_cfg_ctx, intr_ctx_num);
1563 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
1564 					soc->wlan_cfg_ctx, intr_ctx_num);
1565 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
1566 					soc->wlan_cfg_ctx, intr_ctx_num);
1567 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
1568 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1569 		if (tx_mask & (1 << j))
1570 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
1571 		if (rx_mask & (1 << j))
1572 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
1573 		if (rx_mon_mask & (1 << j))
1574 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
1575 		if (rx_err_ring_mask & (1 << j))
1576 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
1577 		if (rx_wbm_rel_ring_mask & (1 << j))
1578 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
1579 		if (reo_status_ring_mask & (1 << j))
1580 			irq_id_map[num_irq++] = (reo_status - j);
1581 		if (rxdma2host_ring_mask & (1 << j))
1582 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
1583 		if (host2rxdma_ring_mask & (1 << j))
1584 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
1585 		if (host2rxdma_mon_ring_mask & (1 << j))
1586 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
1587 		if (host2txmon_ring_mask & (1 << j))
1588 			irq_id_map[num_irq++] = sw2txmon_src_ring;
1589 		if (txmon2host_mon_ring_mask & (1 << j))
1590 			irq_id_map[num_irq++] = (txmon2sw_p0_dest0 - j);
1591 	}
1592 	*num_irq_r = num_irq;
1593 }
1594 #else
1595 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
1596 							    int intr_ctx_num,
1597 							    int *irq_id_map,
1598 							    int *num_irq_r)
1599 {
1600 }
1601 #endif
1602 
1603 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1604 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1605 {
1606 	int j;
1607 	int num_irq = 0;
1608 
1609 	int tx_mask =
1610 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1611 	int rx_mask =
1612 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1613 	int rx_mon_mask =
1614 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1615 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1616 					soc->wlan_cfg_ctx, intr_ctx_num);
1617 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1618 					soc->wlan_cfg_ctx, intr_ctx_num);
1619 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1620 					soc->wlan_cfg_ctx, intr_ctx_num);
1621 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1622 					soc->wlan_cfg_ctx, intr_ctx_num);
1623 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1624 					soc->wlan_cfg_ctx, intr_ctx_num);
1625 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1626 					soc->wlan_cfg_ctx, intr_ctx_num);
1627 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
1628 					soc->wlan_cfg_ctx, intr_ctx_num);
1629 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
1630 					soc->wlan_cfg_ctx, intr_ctx_num);
1631 
1632 	soc->intr_mode = DP_INTR_INTEGRATED;
1633 
1634 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1635 
1636 		if (tx_mask & (1 << j)) {
1637 			irq_id_map[num_irq++] =
1638 				(wbm2host_tx_completions_ring1 - j);
1639 		}
1640 
1641 		if (rx_mask & (1 << j)) {
1642 			irq_id_map[num_irq++] =
1643 				(reo2host_destination_ring1 - j);
1644 		}
1645 
1646 		if (rxdma2host_ring_mask & (1 << j)) {
1647 			irq_id_map[num_irq++] =
1648 				rxdma2host_destination_ring_mac1 - j;
1649 		}
1650 
1651 		if (host2rxdma_ring_mask & (1 << j)) {
1652 			irq_id_map[num_irq++] =
1653 				host2rxdma_host_buf_ring_mac1 -	j;
1654 		}
1655 
1656 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1657 			irq_id_map[num_irq++] =
1658 				host2rxdma_monitor_ring1 - j;
1659 		}
1660 
1661 		if (rx_mon_mask & (1 << j)) {
1662 			irq_id_map[num_irq++] =
1663 				ppdu_end_interrupts_mac1 - j;
1664 			irq_id_map[num_irq++] =
1665 				rxdma2host_monitor_status_ring_mac1 - j;
1666 			irq_id_map[num_irq++] =
1667 				rxdma2host_monitor_destination_mac1 - j;
1668 		}
1669 
1670 		if (rx_wbm_rel_ring_mask & (1 << j))
1671 			irq_id_map[num_irq++] = wbm2host_rx_release;
1672 
1673 		if (rx_err_ring_mask & (1 << j))
1674 			irq_id_map[num_irq++] = reo2host_exception;
1675 
1676 		if (reo_status_ring_mask & (1 << j))
1677 			irq_id_map[num_irq++] = reo2host_status;
1678 
1679 		if (host2txmon_ring_mask & (1 << j))
1680 			irq_id_map[num_irq++] = host2tx_monitor_ring1;
1681 
1682 		if (txmon2host_mon_ring_mask & (1 << j)) {
1683 			irq_id_map[num_irq++] =
1684 				(txmon2host_monitor_destination_mac1 - j);
1685 		}
1686 
1687 	}
1688 	*num_irq_r = num_irq;
1689 }
1690 
1691 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1692 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1693 		int msi_vector_count, int msi_vector_start)
1694 {
1695 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1696 					soc->wlan_cfg_ctx, intr_ctx_num);
1697 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1698 					soc->wlan_cfg_ctx, intr_ctx_num);
1699 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1700 					soc->wlan_cfg_ctx, intr_ctx_num);
1701 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
1702 					soc->wlan_cfg_ctx, intr_ctx_num);
1703 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1704 					soc->wlan_cfg_ctx, intr_ctx_num);
1705 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1706 					soc->wlan_cfg_ctx, intr_ctx_num);
1707 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1708 					soc->wlan_cfg_ctx, intr_ctx_num);
1709 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1710 					soc->wlan_cfg_ctx, intr_ctx_num);
1711 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1712 					soc->wlan_cfg_ctx, intr_ctx_num);
1713 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1714 					soc->wlan_cfg_ctx, intr_ctx_num);
1715 	int rx_near_full_grp_1_mask =
1716 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
1717 						     intr_ctx_num);
1718 	int rx_near_full_grp_2_mask =
1719 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
1720 						     intr_ctx_num);
1721 	int tx_ring_near_full_mask =
1722 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
1723 						    intr_ctx_num);
1724 
1725 	int host2txmon_ring_mask =
1726 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
1727 						  intr_ctx_num);
1728 	unsigned int vector =
1729 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1730 	int num_irq = 0;
1731 
1732 	soc->intr_mode = DP_INTR_MSI;
1733 
1734 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
1735 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
1736 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
1737 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
1738 	    tx_ring_near_full_mask | host2txmon_ring_mask)
1739 		irq_id_map[num_irq++] =
1740 			pld_get_msi_irq(soc->osdev->dev, vector);
1741 
1742 	*num_irq_r = num_irq;
1743 }
1744 
1745 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1746 				    int *irq_id_map, int *num_irq)
1747 {
1748 	int msi_vector_count, ret;
1749 	uint32_t msi_base_data, msi_vector_start;
1750 
1751 	if (pld_get_enable_intx(soc->osdev->dev)) {
1752 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
1753 				intr_ctx_num, irq_id_map, num_irq);
1754 	}
1755 
1756 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1757 					  &msi_vector_count,
1758 					  &msi_base_data,
1759 					  &msi_vector_start);
1760 	if (ret)
1761 		return dp_soc_interrupt_map_calculate_integrated(soc,
1762 				intr_ctx_num, irq_id_map, num_irq);
1763 
1764 	else
1765 		dp_soc_interrupt_map_calculate_msi(soc,
1766 				intr_ctx_num, irq_id_map, num_irq,
1767 				msi_vector_count, msi_vector_start);
1768 }
1769 
1770 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1771 /**
1772  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
1773  * @soc: DP soc handle
1774  * @num_irq: IRQ number
1775  * @irq_id_map: IRQ map
1776  * @intr_id: interrupt context ID
1777  *
1778  * Return: 0 for success. nonzero for failure.
1779  */
1780 static inline int
1781 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
1782 				  int irq_id_map[], int intr_id)
1783 {
1784 	return hif_register_ext_group(soc->hif_handle,
1785 				      num_irq, irq_id_map,
1786 				      dp_service_near_full_srngs,
1787 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
1788 				      HIF_EXEC_NAPI_TYPE,
1789 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1790 }
1791 #else
1792 static inline int
1793 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
1794 				  int *irq_id_map, int intr_id)
1795 {
1796 	return 0;
1797 }
1798 #endif
1799 
1800 #ifdef DP_CON_MON_MSI_SKIP_SET
1801 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
1802 {
1803 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
1804 			QDF_GLOBAL_MONITOR_MODE);
1805 }
1806 #else
1807 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
1808 {
1809 	return false;
1810 }
1811 #endif
1812 
1813 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
1814 {
1815 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1816 	int i;
1817 
1818 	if (soc->intr_mode == DP_INTR_POLL) {
1819 		qdf_timer_free(&soc->int_timer);
1820 	} else {
1821 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
1822 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1823 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
1824 	}
1825 
1826 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1827 		soc->intr_ctx[i].tx_ring_mask = 0;
1828 		soc->intr_ctx[i].rx_ring_mask = 0;
1829 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1830 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1831 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1832 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1833 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1834 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1835 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1836 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
1837 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
1838 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
1839 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
1840 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
1841 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
1842 
1843 		hif_event_history_deinit(soc->hif_handle, i);
1844 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1845 	}
1846 
1847 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1848 		    sizeof(soc->mon_intr_id_lmac_map),
1849 		    DP_MON_INVALID_LMAC_ID);
1850 }
1851 
1852 QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
1853 {
1854 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1855 
1856 	int i = 0;
1857 	int num_irq = 0;
1858 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
1859 	int lmac_id = 0;
1860 	int napi_scale;
1861 
1862 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1863 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
1864 
1865 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1866 		int ret = 0;
1867 
1868 		/* Map of IRQ ids registered with one interrupt context */
1869 		int irq_id_map[HIF_MAX_GRP_IRQ];
1870 
1871 		int tx_mask =
1872 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1873 		int rx_mask =
1874 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1875 		int rx_mon_mask =
1876 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1877 		int tx_mon_ring_mask =
1878 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1879 		int rx_err_ring_mask =
1880 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1881 		int rx_wbm_rel_ring_mask =
1882 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1883 		int reo_status_ring_mask =
1884 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1885 		int rxdma2host_ring_mask =
1886 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1887 		int host2rxdma_ring_mask =
1888 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1889 		int host2rxdma_mon_ring_mask =
1890 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1891 				soc->wlan_cfg_ctx, i);
1892 		int rx_near_full_grp_1_mask =
1893 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
1894 							     i);
1895 		int rx_near_full_grp_2_mask =
1896 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
1897 							     i);
1898 		int tx_ring_near_full_mask =
1899 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
1900 							    i);
1901 		int host2txmon_ring_mask =
1902 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
1903 		int umac_reset_intr_mask =
1904 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
1905 
1906 		if (dp_skip_rx_mon_ring_mask_set(soc))
1907 			rx_mon_mask = 0;
1908 
1909 		soc->intr_ctx[i].dp_intr_id = i;
1910 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1911 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1912 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1913 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1914 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1915 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1916 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1917 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1918 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1919 			 host2rxdma_mon_ring_mask;
1920 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
1921 						rx_near_full_grp_1_mask;
1922 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
1923 						rx_near_full_grp_2_mask;
1924 		soc->intr_ctx[i].tx_ring_near_full_mask =
1925 						tx_ring_near_full_mask;
1926 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
1927 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
1928 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
1929 
1930 		soc->intr_ctx[i].soc = soc;
1931 
1932 		num_irq = 0;
1933 
1934 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1935 					       &num_irq);
1936 
1937 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
1938 		    tx_ring_near_full_mask) {
1939 			dp_soc_near_full_interrupt_attach(soc, num_irq,
1940 							  irq_id_map, i);
1941 		} else {
1942 			napi_scale = wlan_cfg_get_napi_scale_factor(
1943 							    soc->wlan_cfg_ctx);
1944 			if (!napi_scale)
1945 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
1946 
1947 			ret = hif_register_ext_group(soc->hif_handle,
1948 				num_irq, irq_id_map, dp_service_srngs,
1949 				&soc->intr_ctx[i], "dp_intr",
1950 				HIF_EXEC_NAPI_TYPE, napi_scale);
1951 		}
1952 
1953 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
1954 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
1955 
1956 		if (ret) {
1957 			dp_init_err("%pK: failed, ret = %d", soc, ret);
1958 			dp_soc_interrupt_detach(txrx_soc);
1959 			return QDF_STATUS_E_FAILURE;
1960 		}
1961 
1962 		hif_event_history_init(soc->hif_handle, i);
1963 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1964 
1965 		if (rx_err_ring_mask)
1966 			rx_err_ring_intr_ctxt_id = i;
1967 
1968 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
1969 			soc->mon_intr_id_lmac_map[lmac_id] = i;
1970 			lmac_id++;
1971 		}
1972 	}
1973 
1974 	hif_configure_ext_group_interrupts(soc->hif_handle);
1975 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
1976 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
1977 						  rx_err_ring_intr_ctxt_id, 0);
1978 
1979 	return QDF_STATUS_SUCCESS;
1980 }
1981 
1982 #define AVG_MAX_MPDUS_PER_TID 128
1983 #define AVG_TIDS_PER_CLIENT 2
1984 #define AVG_FLOWS_PER_TID 2
1985 #define AVG_MSDUS_PER_FLOW 128
1986 #define AVG_MSDUS_PER_MPDU 4
1987 
1988 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
1989 {
1990 	struct qdf_mem_multi_page_t *pages;
1991 
1992 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1993 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1994 	} else {
1995 		pages = &soc->link_desc_pages;
1996 	}
1997 
1998 	if (!pages) {
1999 		dp_err("can not get link desc pages");
2000 		QDF_ASSERT(0);
2001 		return;
2002 	}
2003 
2004 	if (pages->dma_pages) {
2005 		wlan_minidump_remove((void *)
2006 				     pages->dma_pages->page_v_addr_start,
2007 				     pages->num_pages * pages->page_size,
2008 				     soc->ctrl_psoc,
2009 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2010 				     "hw_link_desc_bank");
2011 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
2012 					     pages, 0, false);
2013 	}
2014 }
2015 
2016 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
2017 
2018 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2019 {
2020 	hal_soc_handle_t hal_soc = soc->hal_soc;
2021 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2022 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2023 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2024 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2025 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2026 	uint32_t num_mpdu_links_per_queue_desc =
2027 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2028 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2029 	uint32_t *total_link_descs, total_mem_size;
2030 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2031 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2032 	uint32_t num_entries;
2033 	struct qdf_mem_multi_page_t *pages;
2034 	struct dp_srng *dp_srng;
2035 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2036 
2037 	/* Only Tx queue descriptors are allocated from common link descriptor
2038 	 * pool Rx queue descriptors are not included in this because (REO queue
2039 	 * extension descriptors) they are expected to be allocated contiguously
2040 	 * with REO queue descriptors
2041 	 */
2042 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2043 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
2044 		/* dp_monitor_get_link_desc_pages returns NULL only
2045 		 * if monitor SOC is  NULL
2046 		 */
2047 		if (!pages) {
2048 			dp_err("can not get link desc pages");
2049 			QDF_ASSERT(0);
2050 			return QDF_STATUS_E_FAULT;
2051 		}
2052 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2053 		num_entries = dp_srng->alloc_size /
2054 			hal_srng_get_entrysize(soc->hal_soc,
2055 					       RXDMA_MONITOR_DESC);
2056 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
2057 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2058 			      MINIDUMP_STR_SIZE);
2059 	} else {
2060 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2061 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2062 
2063 		num_mpdu_queue_descs = num_mpdu_link_descs /
2064 			num_mpdu_links_per_queue_desc;
2065 
2066 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2067 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2068 			num_msdus_per_link_desc;
2069 
2070 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2071 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2072 
2073 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2074 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2075 
2076 		pages = &soc->link_desc_pages;
2077 		total_link_descs = &soc->total_link_descs;
2078 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2079 			      MINIDUMP_STR_SIZE);
2080 	}
2081 
2082 	/* If link descriptor banks are allocated, return from here */
2083 	if (pages->num_pages)
2084 		return QDF_STATUS_SUCCESS;
2085 
2086 	/* Round up to power of 2 */
2087 	*total_link_descs = 1;
2088 	while (*total_link_descs < num_entries)
2089 		*total_link_descs <<= 1;
2090 
2091 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
2092 		     soc, *total_link_descs, link_desc_size);
2093 	total_mem_size =  *total_link_descs * link_desc_size;
2094 	total_mem_size += link_desc_align;
2095 
2096 	dp_init_info("%pK: total_mem_size: %d",
2097 		     soc, total_mem_size);
2098 
2099 	dp_set_max_page_size(pages, max_alloc_size);
2100 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
2101 				      pages,
2102 				      link_desc_size,
2103 				      *total_link_descs,
2104 				      0, false);
2105 	if (!pages->num_pages) {
2106 		dp_err("Multi page alloc fail for hw link desc pool");
2107 		return QDF_STATUS_E_FAULT;
2108 	}
2109 
2110 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
2111 			  pages->num_pages * pages->page_size,
2112 			  soc->ctrl_psoc,
2113 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2114 			  "hw_link_desc_bank");
2115 
2116 	return QDF_STATUS_SUCCESS;
2117 }
2118 
2119 void dp_hw_link_desc_ring_free(struct dp_soc *soc)
2120 {
2121 	uint32_t i;
2122 	uint32_t size = soc->wbm_idle_scatter_buf_size;
2123 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
2124 	qdf_dma_addr_t paddr;
2125 
2126 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
2127 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2128 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2129 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2130 			if (vaddr) {
2131 				qdf_mem_free_consistent(soc->osdev,
2132 							soc->osdev->dev,
2133 							size,
2134 							vaddr,
2135 							paddr,
2136 							0);
2137 				vaddr = NULL;
2138 			}
2139 		}
2140 	} else {
2141 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2142 				     soc->wbm_idle_link_ring.alloc_size,
2143 				     soc->ctrl_psoc,
2144 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2145 				     "wbm_idle_link_ring");
2146 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
2147 	}
2148 }
2149 
2150 QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
2151 {
2152 	uint32_t entry_size, i;
2153 	uint32_t total_mem_size;
2154 	qdf_dma_addr_t *baseaddr = NULL;
2155 	struct dp_srng *dp_srng;
2156 	uint32_t ring_type;
2157 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2158 	uint32_t tlds;
2159 
2160 	ring_type = WBM_IDLE_LINK;
2161 	dp_srng = &soc->wbm_idle_link_ring;
2162 	tlds = soc->total_link_descs;
2163 
2164 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
2165 	total_mem_size = entry_size * tlds;
2166 
2167 	if (total_mem_size <= max_alloc_size) {
2168 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
2169 			dp_init_err("%pK: Link desc idle ring setup failed",
2170 				    soc);
2171 			goto fail;
2172 		}
2173 
2174 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2175 				  soc->wbm_idle_link_ring.alloc_size,
2176 				  soc->ctrl_psoc,
2177 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2178 				  "wbm_idle_link_ring");
2179 	} else {
2180 		uint32_t num_scatter_bufs;
2181 		uint32_t buf_size = 0;
2182 
2183 		soc->wbm_idle_scatter_buf_size =
2184 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2185 		hal_idle_scatter_buf_num_entries(
2186 					soc->hal_soc,
2187 					soc->wbm_idle_scatter_buf_size);
2188 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2189 					soc->hal_soc, total_mem_size,
2190 					soc->wbm_idle_scatter_buf_size);
2191 
2192 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2193 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2194 				  FL("scatter bufs size out of bounds"));
2195 			goto fail;
2196 		}
2197 
2198 		for (i = 0; i < num_scatter_bufs; i++) {
2199 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2200 			buf_size = soc->wbm_idle_scatter_buf_size;
2201 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
2202 				qdf_mem_alloc_consistent(soc->osdev,
2203 							 soc->osdev->dev,
2204 							 buf_size,
2205 							 baseaddr);
2206 
2207 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2208 				QDF_TRACE(QDF_MODULE_ID_DP,
2209 					  QDF_TRACE_LEVEL_ERROR,
2210 					  FL("Scatter lst memory alloc fail"));
2211 				goto fail;
2212 			}
2213 		}
2214 		soc->num_scatter_bufs = num_scatter_bufs;
2215 	}
2216 	return QDF_STATUS_SUCCESS;
2217 
2218 fail:
2219 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2220 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2221 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2222 
2223 		if (vaddr) {
2224 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2225 						soc->wbm_idle_scatter_buf_size,
2226 						vaddr,
2227 						paddr, 0);
2228 			vaddr = NULL;
2229 		}
2230 	}
2231 	return QDF_STATUS_E_NOMEM;
2232 }
2233 
2234 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
2235 
2236 QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
2237 {
2238 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
2239 
2240 	if (dp_srng->base_vaddr_unaligned) {
2241 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
2242 			return QDF_STATUS_E_FAILURE;
2243 	}
2244 	return QDF_STATUS_SUCCESS;
2245 }
2246 
2247 void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
2248 {
2249 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
2250 }
2251 
2252 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
2253 {
2254 	uint32_t cookie = 0;
2255 	uint32_t page_idx = 0;
2256 	struct qdf_mem_multi_page_t *pages;
2257 	struct qdf_mem_dma_page_t *dma_pages;
2258 	uint32_t offset = 0;
2259 	uint32_t count = 0;
2260 	uint32_t desc_id = 0;
2261 	void *desc_srng;
2262 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2263 	uint32_t *total_link_descs_addr;
2264 	uint32_t total_link_descs;
2265 	uint32_t scatter_buf_num;
2266 	uint32_t num_entries_per_buf = 0;
2267 	uint32_t rem_entries;
2268 	uint32_t num_descs_per_page;
2269 	uint32_t num_scatter_bufs = 0;
2270 	uint8_t *scatter_buf_ptr;
2271 	void *desc;
2272 
2273 	num_scatter_bufs = soc->num_scatter_bufs;
2274 
2275 	if (mac_id == WLAN_INVALID_PDEV_ID) {
2276 		pages = &soc->link_desc_pages;
2277 		total_link_descs = soc->total_link_descs;
2278 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
2279 	} else {
2280 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
2281 		/* dp_monitor_get_link_desc_pages returns NULL only
2282 		 * if monitor SOC is  NULL
2283 		 */
2284 		if (!pages) {
2285 			dp_err("can not get link desc pages");
2286 			QDF_ASSERT(0);
2287 			return;
2288 		}
2289 		total_link_descs_addr =
2290 				dp_monitor_get_total_link_descs(soc, mac_id);
2291 		total_link_descs = *total_link_descs_addr;
2292 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
2293 	}
2294 
2295 	dma_pages = pages->dma_pages;
2296 	do {
2297 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
2298 			     pages->page_size);
2299 		page_idx++;
2300 	} while (page_idx < pages->num_pages);
2301 
2302 	if (desc_srng) {
2303 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
2304 		page_idx = 0;
2305 		count = 0;
2306 		offset = 0;
2307 		pages = &soc->link_desc_pages;
2308 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
2309 						     desc_srng)) &&
2310 			(count < total_link_descs)) {
2311 			page_idx = count / pages->num_element_per_page;
2312 			if (desc_id == pages->num_element_per_page)
2313 				desc_id = 0;
2314 
2315 			offset = count % pages->num_element_per_page;
2316 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
2317 						  soc->link_desc_id_start);
2318 
2319 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
2320 					       dma_pages[page_idx].page_p_addr
2321 					       + (offset * link_desc_size),
2322 					       soc->idle_link_bm_id);
2323 			count++;
2324 			desc_id++;
2325 		}
2326 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
2327 	} else {
2328 		/* Populate idle list scatter buffers with link descriptor
2329 		 * pointers
2330 		 */
2331 		scatter_buf_num = 0;
2332 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2333 					soc->hal_soc,
2334 					soc->wbm_idle_scatter_buf_size);
2335 
2336 		scatter_buf_ptr = (uint8_t *)(
2337 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2338 		rem_entries = num_entries_per_buf;
2339 		pages = &soc->link_desc_pages;
2340 		page_idx = 0; count = 0;
2341 		offset = 0;
2342 		num_descs_per_page = pages->num_element_per_page;
2343 
2344 		while (count < total_link_descs) {
2345 			page_idx = count / num_descs_per_page;
2346 			offset = count % num_descs_per_page;
2347 			if (desc_id == pages->num_element_per_page)
2348 				desc_id = 0;
2349 
2350 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
2351 						  soc->link_desc_id_start);
2352 			hal_set_link_desc_addr(soc->hal_soc,
2353 					       (void *)scatter_buf_ptr,
2354 					       cookie,
2355 					       dma_pages[page_idx].page_p_addr +
2356 					       (offset * link_desc_size),
2357 					       soc->idle_link_bm_id);
2358 			rem_entries--;
2359 			if (rem_entries) {
2360 				scatter_buf_ptr += link_desc_size;
2361 			} else {
2362 				rem_entries = num_entries_per_buf;
2363 				scatter_buf_num++;
2364 				if (scatter_buf_num >= num_scatter_bufs)
2365 					break;
2366 				scatter_buf_ptr = (uint8_t *)
2367 					(soc->wbm_idle_scatter_buf_base_vaddr[
2368 					 scatter_buf_num]);
2369 			}
2370 			count++;
2371 			desc_id++;
2372 		}
2373 		/* Setup link descriptor idle list in HW */
2374 		hal_setup_link_idle_list(soc->hal_soc,
2375 			soc->wbm_idle_scatter_buf_base_paddr,
2376 			soc->wbm_idle_scatter_buf_base_vaddr,
2377 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2378 			(uint32_t)(scatter_buf_ptr -
2379 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2380 			scatter_buf_num-1])), total_link_descs);
2381 	}
2382 }
2383 
2384 qdf_export_symbol(dp_link_desc_ring_replenish);
2385 
2386 #ifdef IPA_OFFLOAD
2387 #define USE_1_IPA_RX_REO_RING 1
2388 #define USE_2_IPA_RX_REO_RINGS 2
2389 #define REO_DST_RING_SIZE_QCA6290 1023
2390 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2391 #define REO_DST_RING_SIZE_QCA8074 1023
2392 #define REO_DST_RING_SIZE_QCN9000 2048
2393 #else
2394 #define REO_DST_RING_SIZE_QCA8074 8
2395 #define REO_DST_RING_SIZE_QCN9000 8
2396 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2397 
2398 #ifdef IPA_WDI3_TX_TWO_PIPES
2399 #ifdef DP_MEMORY_OPT
2400 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2401 {
2402 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2403 }
2404 
2405 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2406 {
2407 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2408 }
2409 
2410 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2411 {
2412 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2413 }
2414 
2415 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2416 {
2417 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2418 }
2419 
2420 #else /* !DP_MEMORY_OPT */
2421 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2422 {
2423 	return 0;
2424 }
2425 
2426 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2427 {
2428 }
2429 
2430 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2431 {
2432 	return 0
2433 }
2434 
2435 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2436 {
2437 }
2438 #endif /* DP_MEMORY_OPT */
2439 
2440 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2441 {
2442 	hal_tx_init_data_ring(soc->hal_soc,
2443 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
2444 }
2445 
2446 #else /* !IPA_WDI3_TX_TWO_PIPES */
2447 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2448 {
2449 	return 0;
2450 }
2451 
2452 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2453 {
2454 }
2455 
2456 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2457 {
2458 	return 0;
2459 }
2460 
2461 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2462 {
2463 }
2464 
2465 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2466 {
2467 }
2468 
2469 #endif /* IPA_WDI3_TX_TWO_PIPES */
2470 
2471 #else
2472 
2473 #define REO_DST_RING_SIZE_QCA6290 1024
2474 
2475 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2476 {
2477 	return 0;
2478 }
2479 
2480 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2481 {
2482 }
2483 
2484 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2485 {
2486 	return 0;
2487 }
2488 
2489 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2490 {
2491 }
2492 
2493 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2494 {
2495 }
2496 
2497 #endif /* IPA_OFFLOAD */
2498 
2499 /**
2500  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
2501  * @soc: Datapath soc handler
2502  *
2503  * This api resets the default cpu ring map
2504  */
2505 void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2506 {
2507 	uint8_t i;
2508 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2509 
2510 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2511 		switch (nss_config) {
2512 		case dp_nss_cfg_first_radio:
2513 			/*
2514 			 * Setting Tx ring map for one nss offloaded radio
2515 			 */
2516 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2517 			break;
2518 
2519 		case dp_nss_cfg_second_radio:
2520 			/*
2521 			 * Setting Tx ring for two nss offloaded radios
2522 			 */
2523 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2524 			break;
2525 
2526 		case dp_nss_cfg_dbdc:
2527 			/*
2528 			 * Setting Tx ring map for 2 nss offloaded radios
2529 			 */
2530 			soc->tx_ring_map[i] =
2531 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2532 			break;
2533 
2534 		case dp_nss_cfg_dbtc:
2535 			/*
2536 			 * Setting Tx ring map for 3 nss offloaded radios
2537 			 */
2538 			soc->tx_ring_map[i] =
2539 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2540 			break;
2541 
2542 		default:
2543 			dp_err("tx_ring_map failed due to invalid nss cfg");
2544 			break;
2545 		}
2546 	}
2547 }
2548 
2549 /**
2550  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
2551  *					  unused WMAC hw rings
2552  * @soc: DP Soc handle
2553  * @mac_num: wmac num
2554  *
2555  * Return: Return void
2556  */
2557 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
2558 						int mac_num)
2559 {
2560 	uint8_t *grp_mask = NULL;
2561 	int group_number;
2562 
2563 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2564 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2565 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2566 					  group_number, 0x0);
2567 
2568 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2569 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2570 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2571 				      group_number, 0x0);
2572 
2573 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2574 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2575 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2576 					  group_number, 0x0);
2577 
2578 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2579 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2580 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2581 					      group_number, 0x0);
2582 }
2583 
2584 #ifdef IPA_OFFLOAD
2585 #ifdef IPA_WDI3_VLAN_SUPPORT
2586 /**
2587  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
2588  *                                     ring for vlan tagged traffic
2589  * @soc: DP Soc handle
2590  *
2591  * Return: Return void
2592  */
2593 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2594 {
2595 	uint8_t *grp_mask = NULL;
2596 	int group_number, mask;
2597 
2598 	if (!wlan_ipa_is_vlan_enabled())
2599 		return;
2600 
2601 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2602 
2603 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
2604 	if (group_number < 0) {
2605 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2606 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
2607 		return;
2608 	}
2609 
2610 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2611 
2612 	/* reset the interrupt mask for offloaded ring */
2613 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
2614 
2615 	/*
2616 	 * set the interrupt mask to zero for rx offloaded radio.
2617 	 */
2618 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2619 }
2620 #else
2621 inline
2622 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2623 { }
2624 #endif /* IPA_WDI3_VLAN_SUPPORT */
2625 #else
2626 inline
2627 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2628 { }
2629 #endif /* IPA_OFFLOAD */
2630 
2631 /**
2632  * dp_soc_reset_intr_mask() - reset interrupt mask
2633  * @soc: DP Soc handle
2634  *
2635  * Return: Return void
2636  */
2637 void dp_soc_reset_intr_mask(struct dp_soc *soc)
2638 {
2639 	uint8_t j;
2640 	uint8_t *grp_mask = NULL;
2641 	int group_number, mask, num_ring;
2642 
2643 	/* number of tx ring */
2644 	num_ring = soc->num_tcl_data_rings;
2645 
2646 	/*
2647 	 * group mask for tx completion  ring.
2648 	 */
2649 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2650 
2651 	/* loop and reset the mask for only offloaded ring */
2652 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
2653 		/*
2654 		 * Group number corresponding to tx offloaded ring.
2655 		 */
2656 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2657 		if (group_number < 0) {
2658 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2659 				      soc, WBM2SW_RELEASE, j);
2660 			continue;
2661 		}
2662 
2663 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2664 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
2665 		    (!mask)) {
2666 			continue;
2667 		}
2668 
2669 		/* reset the tx mask for offloaded ring */
2670 		mask &= (~(1 << j));
2671 
2672 		/*
2673 		 * reset the interrupt mask for offloaded ring.
2674 		 */
2675 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2676 	}
2677 
2678 	/* number of rx rings */
2679 	num_ring = soc->num_reo_dest_rings;
2680 
2681 	/*
2682 	 * group mask for reo destination ring.
2683 	 */
2684 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2685 
2686 	/* loop and reset the mask for only offloaded ring */
2687 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
2688 		/*
2689 		 * Group number corresponding to rx offloaded ring.
2690 		 */
2691 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2692 		if (group_number < 0) {
2693 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2694 				      soc, REO_DST, j);
2695 			continue;
2696 		}
2697 
2698 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2699 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
2700 		    (!mask)) {
2701 			continue;
2702 		}
2703 
2704 		/* reset the interrupt mask for offloaded ring */
2705 		mask &= (~(1 << j));
2706 
2707 		/*
2708 		 * set the interrupt mask to zero for rx offloaded radio.
2709 		 */
2710 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2711 	}
2712 
2713 	/*
2714 	 * group mask for Rx buffer refill ring
2715 	 */
2716 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2717 
2718 	/* loop and reset the mask for only offloaded ring */
2719 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2720 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
2721 
2722 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2723 			continue;
2724 		}
2725 
2726 		/*
2727 		 * Group number corresponding to rx offloaded ring.
2728 		 */
2729 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
2730 		if (group_number < 0) {
2731 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2732 				      soc, REO_DST, lmac_id);
2733 			continue;
2734 		}
2735 
2736 		/* set the interrupt mask for offloaded ring */
2737 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2738 							  group_number);
2739 		mask &= (~(1 << lmac_id));
2740 
2741 		/*
2742 		 * set the interrupt mask to zero for rx offloaded radio.
2743 		 */
2744 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2745 						  group_number, mask);
2746 	}
2747 
2748 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
2749 
2750 	for (j = 0; j < num_ring; j++) {
2751 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
2752 			continue;
2753 		}
2754 
2755 		/*
2756 		 * Group number corresponding to rx err ring.
2757 		 */
2758 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2759 		if (group_number < 0) {
2760 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2761 				      soc, REO_EXCEPTION, j);
2762 			continue;
2763 		}
2764 
2765 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
2766 					      group_number, 0);
2767 	}
2768 }
2769 
2770 #ifdef IPA_OFFLOAD
2771 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
2772 			 uint32_t *remap1, uint32_t *remap2)
2773 {
2774 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
2775 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
2776 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
2777 
2778 	switch (soc->arch_id) {
2779 	case CDP_ARCH_TYPE_BE:
2780 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2781 					      soc->num_reo_dest_rings -
2782 					      USE_2_IPA_RX_REO_RINGS, remap1,
2783 					      remap2);
2784 		break;
2785 
2786 	case CDP_ARCH_TYPE_LI:
2787 		if (wlan_ipa_is_vlan_enabled()) {
2788 			hal_compute_reo_remap_ix2_ix3(
2789 					soc->hal_soc, ring,
2790 					soc->num_reo_dest_rings -
2791 					USE_2_IPA_RX_REO_RINGS, remap1,
2792 					remap2);
2793 
2794 		} else {
2795 			hal_compute_reo_remap_ix2_ix3(
2796 					soc->hal_soc, ring,
2797 					soc->num_reo_dest_rings -
2798 					USE_1_IPA_RX_REO_RING, remap1,
2799 					remap2);
2800 		}
2801 
2802 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
2803 		break;
2804 	default:
2805 		dp_err("unknown arch_id 0x%x", soc->arch_id);
2806 		QDF_BUG(0);
2807 	}
2808 
2809 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2810 
2811 	return true;
2812 }
2813 
2814 #ifdef IPA_WDI3_TX_TWO_PIPES
2815 static bool dp_ipa_is_alt_tx_ring(int index)
2816 {
2817 	return index == IPA_TX_ALT_RING_IDX;
2818 }
2819 
2820 static bool dp_ipa_is_alt_tx_comp_ring(int index)
2821 {
2822 	return index == IPA_TX_ALT_COMP_RING_IDX;
2823 }
2824 #else /* !IPA_WDI3_TX_TWO_PIPES */
2825 static bool dp_ipa_is_alt_tx_ring(int index)
2826 {
2827 	return false;
2828 }
2829 
2830 static bool dp_ipa_is_alt_tx_comp_ring(int index)
2831 {
2832 	return false;
2833 }
2834 #endif /* IPA_WDI3_TX_TWO_PIPES */
2835 
2836 /**
2837  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
2838  *
2839  * @tx_ring_num: Tx ring number
2840  * @tx_ipa_ring_sz: Return param only updated for IPA.
2841  * @soc_cfg_ctx: dp soc cfg context
2842  *
2843  * Return: None
2844  */
2845 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
2846 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2847 {
2848 	if (!soc_cfg_ctx->ipa_enabled)
2849 		return;
2850 
2851 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
2852 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
2853 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
2854 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
2855 }
2856 
2857 /**
2858  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
2859  *
2860  * @tx_comp_ring_num: Tx comp ring number
2861  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
2862  * @soc_cfg_ctx: dp soc cfg context
2863  *
2864  * Return: None
2865  */
2866 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
2867 					 int *tx_comp_ipa_ring_sz,
2868 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2869 {
2870 	if (!soc_cfg_ctx->ipa_enabled)
2871 		return;
2872 
2873 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
2874 		*tx_comp_ipa_ring_sz =
2875 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
2876 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
2877 		*tx_comp_ipa_ring_sz =
2878 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
2879 }
2880 #else
2881 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
2882 {
2883 	uint8_t num = 0;
2884 
2885 	switch (value) {
2886 	/* should we have all the different possible ring configs */
2887 	case 0xFF:
2888 		num = 8;
2889 		ring[0] = REO_REMAP_SW1;
2890 		ring[1] = REO_REMAP_SW2;
2891 		ring[2] = REO_REMAP_SW3;
2892 		ring[3] = REO_REMAP_SW4;
2893 		ring[4] = REO_REMAP_SW5;
2894 		ring[5] = REO_REMAP_SW6;
2895 		ring[6] = REO_REMAP_SW7;
2896 		ring[7] = REO_REMAP_SW8;
2897 		break;
2898 
2899 	case 0x3F:
2900 		num = 6;
2901 		ring[0] = REO_REMAP_SW1;
2902 		ring[1] = REO_REMAP_SW2;
2903 		ring[2] = REO_REMAP_SW3;
2904 		ring[3] = REO_REMAP_SW4;
2905 		ring[4] = REO_REMAP_SW5;
2906 		ring[5] = REO_REMAP_SW6;
2907 		break;
2908 
2909 	case 0xF:
2910 		num = 4;
2911 		ring[0] = REO_REMAP_SW1;
2912 		ring[1] = REO_REMAP_SW2;
2913 		ring[2] = REO_REMAP_SW3;
2914 		ring[3] = REO_REMAP_SW4;
2915 		break;
2916 	case 0xE:
2917 		num = 3;
2918 		ring[0] = REO_REMAP_SW2;
2919 		ring[1] = REO_REMAP_SW3;
2920 		ring[2] = REO_REMAP_SW4;
2921 		break;
2922 	case 0xD:
2923 		num = 3;
2924 		ring[0] = REO_REMAP_SW1;
2925 		ring[1] = REO_REMAP_SW3;
2926 		ring[2] = REO_REMAP_SW4;
2927 		break;
2928 	case 0xC:
2929 		num = 2;
2930 		ring[0] = REO_REMAP_SW3;
2931 		ring[1] = REO_REMAP_SW4;
2932 		break;
2933 	case 0xB:
2934 		num = 3;
2935 		ring[0] = REO_REMAP_SW1;
2936 		ring[1] = REO_REMAP_SW2;
2937 		ring[2] = REO_REMAP_SW4;
2938 		break;
2939 	case 0xA:
2940 		num = 2;
2941 		ring[0] = REO_REMAP_SW2;
2942 		ring[1] = REO_REMAP_SW4;
2943 		break;
2944 	case 0x9:
2945 		num = 2;
2946 		ring[0] = REO_REMAP_SW1;
2947 		ring[1] = REO_REMAP_SW4;
2948 		break;
2949 	case 0x8:
2950 		num = 1;
2951 		ring[0] = REO_REMAP_SW4;
2952 		break;
2953 	case 0x7:
2954 		num = 3;
2955 		ring[0] = REO_REMAP_SW1;
2956 		ring[1] = REO_REMAP_SW2;
2957 		ring[2] = REO_REMAP_SW3;
2958 		break;
2959 	case 0x6:
2960 		num = 2;
2961 		ring[0] = REO_REMAP_SW2;
2962 		ring[1] = REO_REMAP_SW3;
2963 		break;
2964 	case 0x5:
2965 		num = 2;
2966 		ring[0] = REO_REMAP_SW1;
2967 		ring[1] = REO_REMAP_SW3;
2968 		break;
2969 	case 0x4:
2970 		num = 1;
2971 		ring[0] = REO_REMAP_SW3;
2972 		break;
2973 	case 0x3:
2974 		num = 2;
2975 		ring[0] = REO_REMAP_SW1;
2976 		ring[1] = REO_REMAP_SW2;
2977 		break;
2978 	case 0x2:
2979 		num = 1;
2980 		ring[0] = REO_REMAP_SW2;
2981 		break;
2982 	case 0x1:
2983 		num = 1;
2984 		ring[0] = REO_REMAP_SW1;
2985 		break;
2986 	default:
2987 		dp_err("unknown reo ring map 0x%x", value);
2988 		QDF_BUG(0);
2989 	}
2990 	return num;
2991 }
2992 
2993 bool dp_reo_remap_config(struct dp_soc *soc,
2994 			 uint32_t *remap0,
2995 			 uint32_t *remap1,
2996 			 uint32_t *remap2)
2997 {
2998 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2999 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
3000 	uint8_t num;
3001 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
3002 	uint32_t value;
3003 
3004 	switch (offload_radio) {
3005 	case dp_nss_cfg_default:
3006 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
3007 		num = dp_reo_ring_selection(value, ring);
3008 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3009 					      num, remap1, remap2);
3010 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
3011 
3012 		break;
3013 	case dp_nss_cfg_first_radio:
3014 		value = reo_config & 0xE;
3015 		num = dp_reo_ring_selection(value, ring);
3016 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3017 					      num, remap1, remap2);
3018 
3019 		break;
3020 	case dp_nss_cfg_second_radio:
3021 		value = reo_config & 0xD;
3022 		num = dp_reo_ring_selection(value, ring);
3023 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3024 					      num, remap1, remap2);
3025 
3026 		break;
3027 	case dp_nss_cfg_dbdc:
3028 	case dp_nss_cfg_dbtc:
3029 		/* return false if both or all are offloaded to NSS */
3030 		return false;
3031 	}
3032 
3033 	dp_debug("remap1 %x remap2 %x offload_radio %u",
3034 		 *remap1, *remap2, offload_radio);
3035 	return true;
3036 }
3037 
3038 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
3039 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3040 {
3041 }
3042 
3043 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3044 					 int *tx_comp_ipa_ring_sz,
3045 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3046 {
3047 }
3048 #endif /* IPA_OFFLOAD */
3049 
3050 /**
3051  * dp_reo_frag_dst_set() - configure reo register to set the
3052  *                        fragment destination ring
3053  * @soc: Datapath soc
3054  * @frag_dst_ring: output parameter to set fragment destination ring
3055  *
3056  * Based on offload_radio below fragment destination rings is selected
3057  * 0 - TCL
3058  * 1 - SW1
3059  * 2 - SW2
3060  * 3 - SW3
3061  * 4 - SW4
3062  * 5 - Release
3063  * 6 - FW
3064  * 7 - alternate select
3065  *
3066  * Return: void
3067  */
3068 void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
3069 {
3070 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3071 
3072 	switch (offload_radio) {
3073 	case dp_nss_cfg_default:
3074 		*frag_dst_ring = REO_REMAP_TCL;
3075 		break;
3076 	case dp_nss_cfg_first_radio:
3077 		/*
3078 		 * This configuration is valid for single band radio which
3079 		 * is also NSS offload.
3080 		 */
3081 	case dp_nss_cfg_dbdc:
3082 	case dp_nss_cfg_dbtc:
3083 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
3084 		break;
3085 	default:
3086 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
3087 		break;
3088 	}
3089 }
3090 
3091 #ifdef WLAN_FEATURE_STATS_EXT
3092 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3093 {
3094 	qdf_event_create(&soc->rx_hw_stats_event);
3095 }
3096 #else
3097 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3098 {
3099 }
3100 #endif
3101 
3102 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
3103 {
3104 	int tcl_ring_num, wbm_ring_num;
3105 
3106 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
3107 						index,
3108 						&tcl_ring_num,
3109 						&wbm_ring_num);
3110 
3111 	if (tcl_ring_num == -1) {
3112 		dp_err("incorrect tcl ring num for index %u", index);
3113 		return;
3114 	}
3115 
3116 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
3117 			     soc->tcl_data_ring[index].alloc_size,
3118 			     soc->ctrl_psoc,
3119 			     WLAN_MD_DP_SRNG_TCL_DATA,
3120 			     "tcl_data_ring");
3121 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
3122 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
3123 		       tcl_ring_num);
3124 
3125 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
3126 		return;
3127 
3128 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
3129 			     soc->tx_comp_ring[index].alloc_size,
3130 			     soc->ctrl_psoc,
3131 			     WLAN_MD_DP_SRNG_TX_COMP,
3132 			     "tcl_comp_ring");
3133 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3134 		       wbm_ring_num);
3135 }
3136 
3137 /**
3138  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
3139  * ring pair
3140  * @soc: DP soc pointer
3141  * @index: index of soc->tcl_data or soc->tx_comp to initialize
3142  *
3143  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
3144  */
3145 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
3146 						uint8_t index)
3147 {
3148 	int tcl_ring_num, wbm_ring_num;
3149 	uint8_t bm_id;
3150 
3151 	if (index >= MAX_TCL_DATA_RINGS) {
3152 		dp_err("unexpected index!");
3153 		QDF_BUG(0);
3154 		goto fail1;
3155 	}
3156 
3157 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
3158 						index,
3159 						&tcl_ring_num,
3160 						&wbm_ring_num);
3161 
3162 	if (tcl_ring_num == -1) {
3163 		dp_err("incorrect tcl ring num for index %u", index);
3164 		goto fail1;
3165 	}
3166 
3167 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
3168 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
3169 			 tcl_ring_num, 0)) {
3170 		dp_err("dp_srng_init failed for tcl_data_ring");
3171 		goto fail1;
3172 	}
3173 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
3174 			  soc->tcl_data_ring[index].alloc_size,
3175 			  soc->ctrl_psoc,
3176 			  WLAN_MD_DP_SRNG_TCL_DATA,
3177 			  "tcl_data_ring");
3178 
3179 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
3180 		goto set_rbm;
3181 
3182 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3183 			 wbm_ring_num, 0)) {
3184 		dp_err("dp_srng_init failed for tx_comp_ring");
3185 		goto fail1;
3186 	}
3187 
3188 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
3189 			  soc->tx_comp_ring[index].alloc_size,
3190 			  soc->ctrl_psoc,
3191 			  WLAN_MD_DP_SRNG_TX_COMP,
3192 			  "tcl_comp_ring");
3193 set_rbm:
3194 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
3195 
3196 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
3197 
3198 	return QDF_STATUS_SUCCESS;
3199 
3200 fail1:
3201 	return QDF_STATUS_E_FAILURE;
3202 }
3203 
3204 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
3205 {
3206 	dp_debug("index %u", index);
3207 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
3208 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
3209 }
3210 
3211 /**
3212  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
3213  * ring pair for the given "index"
3214  * @soc: DP soc pointer
3215  * @index: index of soc->tcl_data or soc->tx_comp to initialize
3216  *
3217  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
3218  */
3219 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
3220 						 uint8_t index)
3221 {
3222 	int tx_ring_size;
3223 	int tx_comp_ring_size;
3224 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3225 	int cached = 0;
3226 
3227 	if (index >= MAX_TCL_DATA_RINGS) {
3228 		dp_err("unexpected index!");
3229 		QDF_BUG(0);
3230 		goto fail1;
3231 	}
3232 
3233 	dp_debug("index %u", index);
3234 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
3235 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
3236 
3237 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
3238 			  tx_ring_size, cached)) {
3239 		dp_err("dp_srng_alloc failed for tcl_data_ring");
3240 		goto fail1;
3241 	}
3242 
3243 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3244 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
3245 	/* Enable cached TCL desc if NSS offload is disabled */
3246 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3247 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3248 
3249 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
3250 	    INVALID_WBM_RING_NUM)
3251 		return QDF_STATUS_SUCCESS;
3252 
3253 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3254 			  tx_comp_ring_size, cached)) {
3255 		dp_err("dp_srng_alloc failed for tx_comp_ring");
3256 		goto fail1;
3257 	}
3258 
3259 	return QDF_STATUS_SUCCESS;
3260 
3261 fail1:
3262 	return QDF_STATUS_E_FAILURE;
3263 }
3264 
3265 /**
3266  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
3267  * @pdev: DP_PDEV handle
3268  *
3269  * Return: void
3270  */
3271 void
3272 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3273 {
3274 	uint8_t map_id;
3275 	struct dp_soc *soc = pdev->soc;
3276 
3277 	if (!soc)
3278 		return;
3279 
3280 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3281 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3282 			     default_dscp_tid_map,
3283 			     sizeof(default_dscp_tid_map));
3284 	}
3285 
3286 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3287 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3288 					default_dscp_tid_map,
3289 					map_id);
3290 	}
3291 }
3292 
3293 /**
3294  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
3295  * @pdev: DP_PDEV handle
3296  *
3297  * Return: void
3298  */
3299 void
3300 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3301 {
3302 	struct dp_soc *soc = pdev->soc;
3303 
3304 	if (!soc)
3305 		return;
3306 
3307 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3308 		     sizeof(default_pcp_tid_map));
3309 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3310 }
3311 
3312 #ifndef DP_UMAC_HW_RESET_SUPPORT
3313 static inline
3314 #endif
3315 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3316 {
3317 	struct reo_desc_list_node *desc;
3318 	struct dp_rx_tid *rx_tid;
3319 
3320 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3321 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3322 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3323 		rx_tid = &desc->rx_tid;
3324 		qdf_mem_unmap_nbytes_single(soc->osdev,
3325 			rx_tid->hw_qdesc_paddr,
3326 			QDF_DMA_BIDIRECTIONAL,
3327 			rx_tid->hw_qdesc_alloc_size);
3328 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3329 		qdf_mem_free(desc);
3330 	}
3331 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3332 	qdf_list_destroy(&soc->reo_desc_freelist);
3333 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3334 }
3335 
3336 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3337 /**
3338  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
3339  *                                          for deferred reo desc list
3340  * @soc: Datapath soc handle
3341  *
3342  * Return: void
3343  */
3344 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
3345 {
3346 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
3347 	qdf_list_create(&soc->reo_desc_deferred_freelist,
3348 			REO_DESC_DEFERRED_FREELIST_SIZE);
3349 	soc->reo_desc_deferred_freelist_init = true;
3350 }
3351 
3352 /**
3353  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
3354  *                                           free the leftover REO QDESCs
3355  * @soc: Datapath soc handle
3356  *
3357  * Return: void
3358  */
3359 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
3360 {
3361 	struct reo_desc_deferred_freelist_node *desc;
3362 
3363 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3364 	soc->reo_desc_deferred_freelist_init = false;
3365 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3366 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3367 		qdf_mem_unmap_nbytes_single(soc->osdev,
3368 					    desc->hw_qdesc_paddr,
3369 					    QDF_DMA_BIDIRECTIONAL,
3370 					    desc->hw_qdesc_alloc_size);
3371 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3372 		qdf_mem_free(desc);
3373 	}
3374 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3375 
3376 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
3377 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
3378 }
3379 #else
3380 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
3381 {
3382 }
3383 
3384 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
3385 {
3386 }
3387 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3388 
3389 /**
3390  * dp_soc_reset_txrx_ring_map() - reset tx ring map
3391  * @soc: DP SOC handle
3392  *
3393  */
3394 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
3395 {
3396 	uint32_t i;
3397 
3398 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
3399 		soc->tx_ring_map[i] = 0;
3400 }
3401 
3402 /**
3403  * dp_soc_deinit() - Deinitialize txrx SOC
3404  * @txrx_soc: Opaque DP SOC handle
3405  *
3406  * Return: None
3407  */
3408 void dp_soc_deinit(void *txrx_soc)
3409 {
3410 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3411 	struct htt_soc *htt_soc = soc->htt_handle;
3412 
3413 	dp_monitor_soc_deinit(soc);
3414 
3415 	/* free peer tables & AST tables allocated during peer_map_attach */
3416 	if (soc->peer_map_attach_success) {
3417 		dp_peer_find_detach(soc);
3418 		soc->arch_ops.txrx_peer_map_detach(soc);
3419 		soc->peer_map_attach_success = FALSE;
3420 	}
3421 
3422 	qdf_flush_work(&soc->htt_stats.work);
3423 	qdf_disable_work(&soc->htt_stats.work);
3424 
3425 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3426 
3427 	dp_soc_reset_txrx_ring_map(soc);
3428 
3429 	dp_reo_desc_freelist_destroy(soc);
3430 	dp_reo_desc_deferred_freelist_destroy(soc);
3431 
3432 	DEINIT_RX_HW_STATS_LOCK(soc);
3433 
3434 	qdf_spinlock_destroy(&soc->ast_lock);
3435 
3436 	dp_peer_mec_spinlock_destroy(soc);
3437 
3438 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3439 
3440 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
3441 
3442 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3443 
3444 	qdf_spinlock_destroy(&soc->vdev_map_lock);
3445 
3446 	dp_reo_cmdlist_destroy(soc);
3447 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3448 
3449 	dp_soc_tx_desc_sw_pools_deinit(soc);
3450 
3451 	dp_soc_srng_deinit(soc);
3452 
3453 	dp_hw_link_desc_ring_deinit(soc);
3454 
3455 	dp_soc_print_inactive_objects(soc);
3456 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
3457 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
3458 
3459 	htt_soc_htc_dealloc(soc->htt_handle);
3460 
3461 	htt_soc_detach(htt_soc);
3462 
3463 	/* Free wbm sg list and reset flags in down path */
3464 	dp_rx_wbm_sg_list_deinit(soc);
3465 
3466 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
3467 			     WLAN_MD_DP_SOC, "dp_soc");
3468 }
3469 
3470 #ifdef QCA_HOST2FW_RXBUF_RING
3471 void
3472 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
3473 				int lmac_id)
3474 {
3475 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
3476 		htt_srng_setup(soc->htt_handle, mac_id,
3477 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
3478 			       RXDMA_DST);
3479 }
3480 #endif
3481 
3482 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
3483 				  enum cdp_host_reo_dest_ring *reo_dest,
3484 				  bool *hash_based)
3485 {
3486 	struct dp_soc *soc;
3487 	struct dp_pdev *pdev;
3488 
3489 	pdev = vdev->pdev;
3490 	soc = pdev->soc;
3491 	/*
3492 	 * hash based steering is disabled for Radios which are offloaded
3493 	 * to NSS
3494 	 */
3495 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3496 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3497 
3498 	/*
3499 	 * Below line of code will ensure the proper reo_dest ring is chosen
3500 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3501 	 */
3502 	*reo_dest = pdev->reo_dest;
3503 }
3504 
3505 #ifdef IPA_OFFLOAD
3506 /**
3507  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
3508  * @vdev: Virtual device
3509  *
3510  * Return: true if the vdev is of subtype P2P
3511  *	   false if the vdev is of any other subtype
3512  */
3513 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
3514 {
3515 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
3516 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
3517 	    vdev->subtype == wlan_op_subtype_p2p_go)
3518 		return true;
3519 
3520 	return false;
3521 }
3522 
3523 /**
3524  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
3525  * @vdev: Datapath VDEV handle
3526  * @setup_info:
3527  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
3528  * @hash_based: pointer to hash value (enabled/disabled) to be populated
3529  * @lmac_peer_id_msb:
3530  *
3531  * If IPA is enabled in ini, for SAP mode, disable hash based
3532  * steering, use default reo_dst ring for RX. Use config values for other modes.
3533  *
3534  * Return: None
3535  */
3536 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
3537 				       struct cdp_peer_setup_info *setup_info,
3538 				       enum cdp_host_reo_dest_ring *reo_dest,
3539 				       bool *hash_based,
3540 				       uint8_t *lmac_peer_id_msb)
3541 {
3542 	struct dp_soc *soc;
3543 	struct dp_pdev *pdev;
3544 
3545 	pdev = vdev->pdev;
3546 	soc = pdev->soc;
3547 
3548 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
3549 
3550 	/* For P2P-GO interfaces we do not need to change the REO
3551 	 * configuration even if IPA config is enabled
3552 	 */
3553 	if (dp_is_vdev_subtype_p2p(vdev))
3554 		return;
3555 
3556 	/*
3557 	 * If IPA is enabled, disable hash-based flow steering and set
3558 	 * reo_dest_ring_4 as the REO ring to receive packets on.
3559 	 * IPA is configured to reap reo_dest_ring_4.
3560 	 *
3561 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
3562 	 * value enum value is from 1 - 4.
3563 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
3564 	 */
3565 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3566 		if (dp_ipa_is_mdm_platform()) {
3567 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
3568 			if (vdev->opmode == wlan_op_mode_ap)
3569 				*hash_based = 0;
3570 		} else {
3571 			dp_debug("opt_dp: default HOST reo ring is set");
3572 		}
3573 	}
3574 }
3575 
3576 #else
3577 
3578 /**
3579  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
3580  * @vdev: Datapath VDEV handle
3581  * @setup_info:
3582  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
3583  * @hash_based: pointer to hash value (enabled/disabled) to be populated
3584  * @lmac_peer_id_msb:
3585  *
3586  * Use system config values for hash based steering.
3587  * Return: None
3588  */
3589 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
3590 				       struct cdp_peer_setup_info *setup_info,
3591 				       enum cdp_host_reo_dest_ring *reo_dest,
3592 				       bool *hash_based,
3593 				       uint8_t *lmac_peer_id_msb)
3594 {
3595 	struct dp_soc *soc = vdev->pdev->soc;
3596 
3597 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
3598 					lmac_peer_id_msb);
3599 }
3600 #endif /* IPA_OFFLOAD */
3601 
3602 /**
3603  * dp_peer_setup_wifi3() - initialize the peer
3604  * @soc_hdl: soc handle object
3605  * @vdev_id: vdev_id of vdev object
3606  * @peer_mac: Peer's mac address
3607  * @setup_info: peer setup info for MLO
3608  *
3609  * Return: QDF_STATUS
3610  */
3611 QDF_STATUS
3612 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3613 		    uint8_t *peer_mac,
3614 		    struct cdp_peer_setup_info *setup_info)
3615 {
3616 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3617 	struct dp_pdev *pdev;
3618 	bool hash_based = 0;
3619 	enum cdp_host_reo_dest_ring reo_dest;
3620 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3621 	struct dp_vdev *vdev = NULL;
3622 	struct dp_peer *peer =
3623 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
3624 					       DP_MOD_ID_CDP);
3625 	struct dp_peer *mld_peer = NULL;
3626 	enum wlan_op_mode vdev_opmode;
3627 	uint8_t lmac_peer_id_msb = 0;
3628 
3629 	if (!peer)
3630 		return QDF_STATUS_E_FAILURE;
3631 
3632 	vdev = peer->vdev;
3633 	if (!vdev) {
3634 		status = QDF_STATUS_E_FAILURE;
3635 		goto fail;
3636 	}
3637 
3638 	/* save vdev related member in case vdev freed */
3639 	vdev_opmode = vdev->opmode;
3640 	pdev = vdev->pdev;
3641 	dp_peer_setup_get_reo_hash(vdev, setup_info,
3642 				   &reo_dest, &hash_based,
3643 				   &lmac_peer_id_msb);
3644 
3645 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
3646 					   peer, vdev, vdev->vdev_id,
3647 					   setup_info);
3648 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
3649 		"hash-based-steering:%d default-reo_dest:%u",
3650 		pdev->pdev_id, vdev->vdev_id,
3651 		vdev->opmode, peer,
3652 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
3653 
3654 	/*
3655 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
3656 	 * i.e both the devices have same MAC address. In these
3657 	 * cases we want such pkts to be processed in NULL Q handler
3658 	 * which is REO2TCL ring. for this reason we should
3659 	 * not setup reo_queues and default route for bss_peer.
3660 	 */
3661 	if (!IS_MLO_DP_MLD_PEER(peer))
3662 		dp_monitor_peer_tx_init(pdev, peer);
3663 
3664 	if (!setup_info)
3665 		if (dp_peer_legacy_setup(soc, peer) !=
3666 				QDF_STATUS_SUCCESS) {
3667 			status = QDF_STATUS_E_RESOURCES;
3668 			goto fail;
3669 		}
3670 
3671 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
3672 		status = QDF_STATUS_E_FAILURE;
3673 		goto fail;
3674 	}
3675 
3676 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3677 		/* TODO: Check the destination ring number to be passed to FW */
3678 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3679 				soc->ctrl_psoc,
3680 				peer->vdev->pdev->pdev_id,
3681 				peer->mac_addr.raw,
3682 				peer->vdev->vdev_id, hash_based, reo_dest,
3683 				lmac_peer_id_msb);
3684 	}
3685 
3686 	qdf_atomic_set(&peer->is_default_route_set, 1);
3687 
3688 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
3689 	if (QDF_IS_STATUS_ERROR(status)) {
3690 		dp_peer_err("peer mlo setup failed");
3691 		qdf_assert_always(0);
3692 	}
3693 
3694 	if (vdev_opmode != wlan_op_mode_monitor) {
3695 		/* In case of MLD peer, switch peer to mld peer and
3696 		 * do peer_rx_init.
3697 		 */
3698 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
3699 		    IS_MLO_DP_LINK_PEER(peer)) {
3700 			if (setup_info && setup_info->is_first_link) {
3701 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
3702 				if (mld_peer)
3703 					dp_peer_rx_init(pdev, mld_peer);
3704 				else
3705 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
3706 			}
3707 		} else {
3708 			dp_peer_rx_init(pdev, peer);
3709 		}
3710 	}
3711 
3712 	if (!IS_MLO_DP_MLD_PEER(peer))
3713 		dp_peer_ppdu_delayed_ba_init(peer);
3714 
3715 fail:
3716 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3717 	return status;
3718 }
3719 
3720 /**
3721  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
3722  * @txrx_soc: cdp soc handle
3723  * @ac: Access category
3724  * @value: timeout value in millisec
3725  *
3726  * Return: void
3727  */
3728 void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
3729 			     uint8_t ac, uint32_t value)
3730 {
3731 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3732 
3733 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
3734 }
3735 
3736 /**
3737  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
3738  * @txrx_soc: cdp soc handle
3739  * @ac: access category
3740  * @value: timeout value in millisec
3741  *
3742  * Return: void
3743  */
3744 void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
3745 			     uint8_t ac, uint32_t *value)
3746 {
3747 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3748 
3749 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
3750 }
3751 
3752 /**
3753  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3754  * @txrx_soc: cdp soc handle
3755  * @pdev_id: id of physical device object
3756  * @val: reo destination ring index (1 - 4)
3757  *
3758  * Return: QDF_STATUS
3759  */
3760 QDF_STATUS
3761 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
3762 		     enum cdp_host_reo_dest_ring val)
3763 {
3764 	struct dp_pdev *pdev =
3765 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
3766 						   pdev_id);
3767 
3768 	if (pdev) {
3769 		pdev->reo_dest = val;
3770 		return QDF_STATUS_SUCCESS;
3771 	}
3772 
3773 	return QDF_STATUS_E_FAILURE;
3774 }
3775 
3776 /**
3777  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3778  * @txrx_soc: cdp soc handle
3779  * @pdev_id: id of physical device object
3780  *
3781  * Return: reo destination ring index
3782  */
3783 enum cdp_host_reo_dest_ring
3784 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
3785 {
3786 	struct dp_pdev *pdev =
3787 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
3788 						   pdev_id);
3789 
3790 	if (pdev)
3791 		return pdev->reo_dest;
3792 	else
3793 		return cdp_host_reo_dest_ring_unknown;
3794 }
3795 
3796 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
3797 	union hal_reo_status *reo_status)
3798 {
3799 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
3800 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
3801 
3802 	if (!dp_check_pdev_exists(soc, pdev)) {
3803 		dp_err_rl("pdev doesn't exist");
3804 		return;
3805 	}
3806 
3807 	if (!qdf_atomic_read(&soc->cmn_init_done))
3808 		return;
3809 
3810 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
3811 		DP_PRINT_STATS("REO stats failure %d",
3812 			       queue_status->header.status);
3813 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
3814 		return;
3815 	}
3816 
3817 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
3818 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
3819 }
3820 
3821 /**
3822  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
3823  * @soc: dp soc.
3824  * @pdev: dp pdev.
3825  *
3826  * Return: None.
3827  */
3828 void
3829 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
3830 {
3831 	uint32_t hw_head;
3832 	uint32_t hw_tail;
3833 	struct dp_srng *srng;
3834 
3835 	if (!soc) {
3836 		dp_err("soc is NULL");
3837 		return;
3838 	}
3839 
3840 	if (!pdev) {
3841 		dp_err("pdev is NULL");
3842 		return;
3843 	}
3844 
3845 	srng = &pdev->soc->wbm_idle_link_ring;
3846 	if (!srng) {
3847 		dp_err("wbm_idle_link_ring srng is NULL");
3848 		return;
3849 	}
3850 
3851 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
3852 			&hw_tail, WBM_IDLE_LINK);
3853 
3854 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
3855 		 hw_head, hw_tail);
3856 }
3857 
3858 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3859 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
3860 				      uint32_t rx_limit)
3861 {
3862 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
3863 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
3864 }
3865 
3866 #else
3867 
3868 static inline
3869 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
3870 			       uint32_t rx_limit)
3871 {
3872 }
3873 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
3874 
3875 /**
3876  * dp_display_srng_info() - Dump the srng HP TP info
3877  * @soc_hdl: CDP Soc handle
3878  *
3879  * This function dumps the SW hp/tp values for the important rings.
3880  * HW hp/tp values are not being dumped, since it can lead to
3881  * READ NOC error when UMAC is in low power state. MCC does not have
3882  * device force wake working yet.
3883  *
3884  * Return: none
3885  */
3886 void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
3887 {
3888 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3889 	hal_soc_handle_t hal_soc = soc->hal_soc;
3890 	uint32_t hp, tp, i;
3891 
3892 	dp_info("SRNG HP-TP data:");
3893 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3894 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
3895 				&tp, &hp);
3896 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3897 
3898 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
3899 		    INVALID_WBM_RING_NUM)
3900 			continue;
3901 
3902 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
3903 				&tp, &hp);
3904 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3905 	}
3906 
3907 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3908 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
3909 				&tp, &hp);
3910 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3911 	}
3912 
3913 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
3914 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
3915 
3916 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
3917 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
3918 
3919 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
3920 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
3921 }
3922 
3923 /**
3924  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
3925  * @psoc: dp soc handle
3926  * @pdev_id: id of DP_PDEV handle
3927  * @pcp: pcp value
3928  * @tid: tid value passed by the user
3929  *
3930  * Return: QDF_STATUS_SUCCESS on success
3931  */
3932 QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
3933 					 uint8_t pdev_id,
3934 					 uint8_t pcp, uint8_t tid)
3935 {
3936 	struct dp_soc *soc = (struct dp_soc *)psoc;
3937 
3938 	soc->pcp_tid_map[pcp] = tid;
3939 
3940 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
3941 	return QDF_STATUS_SUCCESS;
3942 }
3943 
3944 /**
3945  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
3946  * @soc_hdl: DP soc handle
3947  * @vdev_id: id of DP_VDEV handle
3948  * @pcp: pcp value
3949  * @tid: tid value passed by the user
3950  *
3951  * Return: QDF_STATUS_SUCCESS on success
3952  */
3953 QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
3954 					 uint8_t vdev_id,
3955 					 uint8_t pcp, uint8_t tid)
3956 {
3957 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3958 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3959 						     DP_MOD_ID_CDP);
3960 
3961 	if (!vdev)
3962 		return QDF_STATUS_E_FAILURE;
3963 
3964 	vdev->pcp_tid_map[pcp] = tid;
3965 
3966 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3967 	return QDF_STATUS_SUCCESS;
3968 }
3969 
3970 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
3971 void dp_drain_txrx(struct cdp_soc_t *soc_handle)
3972 {
3973 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3974 	uint32_t cur_tx_limit, cur_rx_limit;
3975 	uint32_t budget = 0xffff;
3976 	uint32_t val;
3977 	int i;
3978 	int cpu = dp_srng_get_cpu();
3979 
3980 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
3981 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
3982 
3983 	/* Temporarily increase soft irq limits when going to drain
3984 	 * the UMAC/LMAC SRNGs and restore them after polling.
3985 	 * Though the budget is on higher side, the TX/RX reaping loops
3986 	 * will not execute longer as both TX and RX would be suspended
3987 	 * by the time this API is called.
3988 	 */
3989 	dp_update_soft_irq_limits(soc, budget, budget);
3990 
3991 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
3992 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
3993 
3994 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
3995 
3996 	/* Do a dummy read at offset 0; this will ensure all
3997 	 * pendings writes(HP/TP) are flushed before read returns.
3998 	 */
3999 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
4000 	dp_debug("Register value at offset 0: %u\n", val);
4001 }
4002 #endif
4003 
4004 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
4005 /**
4006  * dp_flush_ring_hptp() - Update ring shadow
4007  *			  register HP/TP address when runtime
4008  *                        resume
4009  * @soc: DP soc context
4010  * @hal_srng: srng
4011  *
4012  * Return: None
4013  */
4014 static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
4015 {
4016 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
4017 						 HAL_SRNG_FLUSH_EVENT)) {
4018 		/* Acquire the lock */
4019 		hal_srng_access_start(soc->hal_soc, hal_srng);
4020 
4021 		hal_srng_access_end(soc->hal_soc, hal_srng);
4022 
4023 		hal_srng_set_flush_last_ts(hal_srng);
4024 
4025 		dp_debug("flushed");
4026 	}
4027 }
4028 
4029 void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
4030 {
4031 	 uint8_t i;
4032 
4033 	if (force_flush_tx) {
4034 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4035 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
4036 					   HAL_SRNG_FLUSH_EVENT);
4037 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
4038 		}
4039 
4040 		return;
4041 	}
4042 
4043 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4044 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
4045 
4046 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
4047 }
4048 #endif
4049 
4050 #ifdef WLAN_FEATURE_STATS_EXT
4051 /* rx hw stats event wait timeout in ms */
4052 #define DP_REO_STATUS_STATS_TIMEOUT 100
4053 
4054 /**
4055  * dp_rx_hw_stats_cb() - request rx hw stats response callback
4056  * @soc: soc handle
4057  * @cb_ctxt: callback context
4058  * @reo_status: reo command response status
4059  *
4060  * Return: None
4061  */
4062 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4063 			      union hal_reo_status *reo_status)
4064 {
4065 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
4066 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
4067 	bool is_query_timeout;
4068 
4069 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4070 	is_query_timeout = rx_hw_stats->is_query_timeout;
4071 	/* free the cb_ctxt if all pending tid stats query is received */
4072 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
4073 		if (!is_query_timeout) {
4074 			qdf_event_set(&soc->rx_hw_stats_event);
4075 			soc->is_last_stats_ctx_init = false;
4076 		}
4077 
4078 		qdf_mem_free(rx_hw_stats);
4079 	}
4080 
4081 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4082 		dp_info("REO stats failure %d",
4083 			queue_status->header.status);
4084 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4085 		return;
4086 	}
4087 
4088 	if (!is_query_timeout) {
4089 		soc->ext_stats.rx_mpdu_received +=
4090 					queue_status->mpdu_frms_cnt;
4091 		soc->ext_stats.rx_mpdu_missed +=
4092 					queue_status->hole_cnt;
4093 	}
4094 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4095 }
4096 
4097 /**
4098  * dp_request_rx_hw_stats() - request rx hardware stats
4099  * @soc_hdl: soc handle
4100  * @vdev_id: vdev id
4101  *
4102  * Return: None
4103  */
4104 QDF_STATUS
4105 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
4106 {
4107 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4108 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4109 						     DP_MOD_ID_CDP);
4110 	struct dp_peer *peer = NULL;
4111 	QDF_STATUS status;
4112 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
4113 	int rx_stats_sent_cnt = 0;
4114 	uint32_t last_rx_mpdu_received;
4115 	uint32_t last_rx_mpdu_missed;
4116 
4117 	if (!vdev) {
4118 		dp_err("vdev is null for vdev_id: %u", vdev_id);
4119 		status = QDF_STATUS_E_INVAL;
4120 		goto out;
4121 	}
4122 
4123 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
4124 
4125 	if (!peer) {
4126 		dp_err("Peer is NULL");
4127 		status = QDF_STATUS_E_INVAL;
4128 		goto out;
4129 	}
4130 
4131 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
4132 
4133 	if (!rx_hw_stats) {
4134 		dp_err("malloc failed for hw stats structure");
4135 		status = QDF_STATUS_E_INVAL;
4136 		goto out;
4137 	}
4138 
4139 	qdf_event_reset(&soc->rx_hw_stats_event);
4140 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4141 	/* save the last soc cumulative stats and reset it to 0 */
4142 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
4143 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
4144 	soc->ext_stats.rx_mpdu_received = 0;
4145 	soc->ext_stats.rx_mpdu_missed = 0;
4146 
4147 	dp_debug("HW stats query start");
4148 	rx_stats_sent_cnt =
4149 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
4150 	if (!rx_stats_sent_cnt) {
4151 		dp_err("no tid stats sent successfully");
4152 		qdf_mem_free(rx_hw_stats);
4153 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4154 		status = QDF_STATUS_E_INVAL;
4155 		goto out;
4156 	}
4157 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
4158 		       rx_stats_sent_cnt);
4159 	rx_hw_stats->is_query_timeout = false;
4160 	soc->is_last_stats_ctx_init = true;
4161 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4162 
4163 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
4164 				       DP_REO_STATUS_STATS_TIMEOUT);
4165 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
4166 
4167 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4168 	if (status != QDF_STATUS_SUCCESS) {
4169 		dp_info("partial rx hw stats event collected with %d",
4170 			qdf_atomic_read(
4171 				&rx_hw_stats->pending_tid_stats_cnt));
4172 		if (soc->is_last_stats_ctx_init)
4173 			rx_hw_stats->is_query_timeout = true;
4174 		/*
4175 		 * If query timeout happened, use the last saved stats
4176 		 * for this time query.
4177 		 */
4178 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
4179 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
4180 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
4181 
4182 	}
4183 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4184 
4185 out:
4186 	if (peer)
4187 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4188 	if (vdev)
4189 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4190 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
4191 
4192 	return status;
4193 }
4194 
4195 /**
4196  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
4197  * @soc_hdl: soc handle
4198  *
4199  * Return: None
4200  */
4201 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
4202 {
4203 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4204 
4205 	soc->ext_stats.rx_mpdu_received = 0;
4206 	soc->ext_stats.rx_mpdu_missed = 0;
4207 }
4208 #endif /* WLAN_FEATURE_STATS_EXT */
4209 
4210 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
4211 {
4212 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4213 
4214 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
4215 }
4216 
4217 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
4218 {
4219 	uint32_t i;
4220 
4221 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4222 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
4223 	}
4224 }
4225 
4226 qdf_export_symbol(dp_soc_set_txrx_ring_map);
4227 
4228 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
4229 {
4230 	dp_init_info("DP soc Dump for Target = %d", target_type);
4231 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
4232 		     soc->ast_override_support, soc->da_war_enabled);
4233 
4234 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
4235 }
4236 
4237 /**
4238  * dp_soc_cfg_init() - initialize target specific configuration
4239  *		       during dp_soc_init
4240  * @soc: dp soc handle
4241  */
4242 static void dp_soc_cfg_init(struct dp_soc *soc)
4243 {
4244 	uint32_t target_type;
4245 
4246 	target_type = hal_get_target_type(soc->hal_soc);
4247 	switch (target_type) {
4248 	case TARGET_TYPE_QCA6290:
4249 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4250 					       REO_DST_RING_SIZE_QCA6290);
4251 		soc->ast_override_support = 1;
4252 		soc->da_war_enabled = false;
4253 		break;
4254 	case TARGET_TYPE_QCA6390:
4255 	case TARGET_TYPE_QCA6490:
4256 	case TARGET_TYPE_QCA6750:
4257 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4258 					       REO_DST_RING_SIZE_QCA6290);
4259 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
4260 		soc->ast_override_support = 1;
4261 		if (soc->cdp_soc.ol_ops->get_con_mode &&
4262 		    soc->cdp_soc.ol_ops->get_con_mode() ==
4263 		    QDF_GLOBAL_MONITOR_MODE) {
4264 			int int_ctx;
4265 
4266 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
4267 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
4268 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
4269 			}
4270 		}
4271 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4272 		break;
4273 	case TARGET_TYPE_KIWI:
4274 	case TARGET_TYPE_MANGO:
4275 	case TARGET_TYPE_PEACH:
4276 		soc->ast_override_support = 1;
4277 		soc->per_tid_basize_max_tid = 8;
4278 
4279 		if (soc->cdp_soc.ol_ops->get_con_mode &&
4280 		    soc->cdp_soc.ol_ops->get_con_mode() ==
4281 		    QDF_GLOBAL_MONITOR_MODE) {
4282 			int int_ctx;
4283 
4284 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
4285 			     int_ctx++) {
4286 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
4287 				if (dp_is_monitor_mode_using_poll(soc))
4288 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
4289 			}
4290 		}
4291 
4292 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4293 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
4294 		break;
4295 	case TARGET_TYPE_QCA8074:
4296 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
4297 		soc->da_war_enabled = true;
4298 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4299 		break;
4300 	case TARGET_TYPE_QCA8074V2:
4301 	case TARGET_TYPE_QCA6018:
4302 	case TARGET_TYPE_QCA9574:
4303 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4304 		soc->ast_override_support = 1;
4305 		soc->per_tid_basize_max_tid = 8;
4306 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4307 		soc->da_war_enabled = false;
4308 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4309 		break;
4310 	case TARGET_TYPE_QCN9000:
4311 		soc->ast_override_support = 1;
4312 		soc->da_war_enabled = false;
4313 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4314 		soc->per_tid_basize_max_tid = 8;
4315 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4316 		soc->lmac_polled_mode = 0;
4317 		soc->wbm_release_desc_rx_sg_support = 1;
4318 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4319 		break;
4320 	case TARGET_TYPE_QCA5018:
4321 	case TARGET_TYPE_QCN6122:
4322 	case TARGET_TYPE_QCN9160:
4323 		soc->ast_override_support = 1;
4324 		soc->da_war_enabled = false;
4325 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4326 		soc->per_tid_basize_max_tid = 8;
4327 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
4328 		soc->disable_mac1_intr = 1;
4329 		soc->disable_mac2_intr = 1;
4330 		soc->wbm_release_desc_rx_sg_support = 1;
4331 		break;
4332 	case TARGET_TYPE_QCN9224:
4333 		soc->ast_override_support = 1;
4334 		soc->da_war_enabled = false;
4335 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4336 		soc->per_tid_basize_max_tid = 8;
4337 		soc->wbm_release_desc_rx_sg_support = 1;
4338 		soc->rxdma2sw_rings_not_supported = 1;
4339 		soc->wbm_sg_last_msdu_war = 1;
4340 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
4341 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
4342 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4343 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
4344 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
4345 						  CFG_DP_HOST_AST_DB_ENABLE);
4346 		soc->features.wds_ext_ast_override_enable = true;
4347 		break;
4348 	case TARGET_TYPE_QCA5332:
4349 	case TARGET_TYPE_QCN6432:
4350 		soc->ast_override_support = 1;
4351 		soc->da_war_enabled = false;
4352 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4353 		soc->per_tid_basize_max_tid = 8;
4354 		soc->wbm_release_desc_rx_sg_support = 1;
4355 		soc->rxdma2sw_rings_not_supported = 1;
4356 		soc->wbm_sg_last_msdu_war = 1;
4357 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
4358 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
4359 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
4360 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
4361 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
4362 						  CFG_DP_HOST_AST_DB_ENABLE);
4363 		soc->features.wds_ext_ast_override_enable = true;
4364 		break;
4365 	default:
4366 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
4367 		qdf_assert_always(0);
4368 		break;
4369 	}
4370 	dp_soc_cfg_dump(soc, target_type);
4371 }
4372 
4373 /**
4374  * dp_soc_init() - Initialize txrx SOC
4375  * @soc: Opaque DP SOC handle
4376  * @htc_handle: Opaque HTC handle
4377  * @hif_handle: Opaque HIF handle
4378  *
4379  * Return: DP SOC handle on success, NULL on failure
4380  */
4381 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
4382 		  struct hif_opaque_softc *hif_handle)
4383 {
4384 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
4385 	bool is_monitor_mode = false;
4386 	uint8_t i;
4387 	int num_dp_msi;
4388 	bool ppeds_attached = false;
4389 
4390 	htt_soc = htt_soc_attach(soc, htc_handle);
4391 	if (!htt_soc)
4392 		goto fail1;
4393 
4394 	soc->htt_handle = htt_soc;
4395 
4396 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
4397 		goto fail2;
4398 
4399 	htt_set_htc_handle(htt_soc, htc_handle);
4400 
4401 	dp_soc_cfg_init(soc);
4402 
4403 	dp_monitor_soc_cfg_init(soc);
4404 	/* Reset/Initialize wbm sg list and flags */
4405 	dp_rx_wbm_sg_list_reset(soc);
4406 
4407 	/* Note: Any SRNG ring initialization should happen only after
4408 	 * Interrupt mode is set and followed by filling up the
4409 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
4410 	 */
4411 	dp_soc_set_interrupt_mode(soc);
4412 	if (soc->cdp_soc.ol_ops->get_con_mode &&
4413 	    soc->cdp_soc.ol_ops->get_con_mode() ==
4414 	    QDF_GLOBAL_MONITOR_MODE) {
4415 		is_monitor_mode = true;
4416 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
4417 	} else {
4418 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
4419 	}
4420 
4421 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
4422 	if (num_dp_msi < 0) {
4423 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
4424 		goto fail3;
4425 	}
4426 
4427 	if (soc->arch_ops.ppeds_handle_attached)
4428 		ppeds_attached = soc->arch_ops.ppeds_handle_attached(soc);
4429 
4430 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
4431 				     soc->intr_mode, is_monitor_mode,
4432 				     ppeds_attached);
4433 
4434 	/* initialize WBM_IDLE_LINK ring */
4435 	if (dp_hw_link_desc_ring_init(soc)) {
4436 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
4437 		goto fail3;
4438 	}
4439 
4440 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
4441 
4442 	if (dp_soc_srng_init(soc)) {
4443 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
4444 		goto fail4;
4445 	}
4446 
4447 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
4448 			       htt_get_htc_handle(htt_soc),
4449 			       soc->hal_soc, soc->osdev) == NULL)
4450 		goto fail5;
4451 
4452 	/* Initialize descriptors in TCL Rings */
4453 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4454 		hal_tx_init_data_ring(soc->hal_soc,
4455 				      soc->tcl_data_ring[i].hal_srng);
4456 	}
4457 
4458 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
4459 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
4460 		goto fail6;
4461 	}
4462 
4463 	if (soc->arch_ops.txrx_soc_ppeds_start) {
4464 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
4465 			dp_init_err("%pK: ppeds start failed", soc);
4466 			goto fail7;
4467 		}
4468 	}
4469 
4470 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
4471 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
4472 	soc->cce_disable = false;
4473 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
4474 
4475 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
4476 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
4477 	qdf_spinlock_create(&soc->vdev_map_lock);
4478 	qdf_atomic_init(&soc->num_tx_outstanding);
4479 	qdf_atomic_init(&soc->num_tx_exception);
4480 	soc->num_tx_allowed =
4481 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
4482 	soc->num_tx_spl_allowed =
4483 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
4484 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
4485 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
4486 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4487 				CDP_CFG_MAX_PEER_ID);
4488 
4489 		if (ret != -EINVAL)
4490 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
4491 
4492 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4493 				CDP_CFG_CCE_DISABLE);
4494 		if (ret == 1)
4495 			soc->cce_disable = true;
4496 	}
4497 
4498 	/*
4499 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
4500 	 * and IPQ5018 WMAC2 is not there in these platforms.
4501 	 */
4502 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
4503 	    soc->disable_mac2_intr)
4504 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
4505 
4506 	/*
4507 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
4508 	 * WMAC1 is not there in this platform.
4509 	 */
4510 	if (soc->disable_mac1_intr)
4511 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
4512 
4513 	/* setup the global rx defrag waitlist */
4514 	TAILQ_INIT(&soc->rx.defrag.waitlist);
4515 	soc->rx.defrag.timeout_ms =
4516 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
4517 	soc->rx.defrag.next_flush_ms = 0;
4518 	soc->rx.flags.defrag_timeout_check =
4519 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
4520 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
4521 
4522 	dp_monitor_soc_init(soc);
4523 
4524 	qdf_atomic_set(&soc->cmn_init_done, 1);
4525 
4526 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
4527 
4528 	qdf_spinlock_create(&soc->ast_lock);
4529 	dp_peer_mec_spinlock_create(soc);
4530 
4531 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
4532 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
4533 	INIT_RX_HW_STATS_LOCK(soc);
4534 
4535 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
4536 	/* fill the tx/rx cpu ring map*/
4537 	dp_soc_set_txrx_ring_map(soc);
4538 
4539 	TAILQ_INIT(&soc->inactive_peer_list);
4540 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
4541 	TAILQ_INIT(&soc->inactive_vdev_list);
4542 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
4543 	qdf_spinlock_create(&soc->htt_stats.lock);
4544 	/* initialize work queue for stats processing */
4545 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4546 
4547 	dp_reo_desc_deferred_freelist_create(soc);
4548 
4549 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
4550 		qdf_dma_mem_stats_read(),
4551 		qdf_heap_mem_stats_read(),
4552 		qdf_skb_total_mem_stats_read());
4553 
4554 	soc->vdev_stats_id_map = 0;
4555 
4556 	return soc;
4557 fail7:
4558 	dp_soc_tx_desc_sw_pools_deinit(soc);
4559 fail6:
4560 	htt_soc_htc_dealloc(soc->htt_handle);
4561 fail5:
4562 	dp_soc_srng_deinit(soc);
4563 fail4:
4564 	dp_hw_link_desc_ring_deinit(soc);
4565 fail3:
4566 	htt_htc_pkt_pool_free(htt_soc);
4567 fail2:
4568 	htt_soc_detach(htt_soc);
4569 fail1:
4570 	return NULL;
4571 }
4572 
4573 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
4574 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
4575 {
4576 	QDF_STATUS status;
4577 
4578 	if (soc->init_tcl_cmd_cred_ring) {
4579 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
4580 				       TCL_CMD_CREDIT, 0, 0);
4581 		if (QDF_IS_STATUS_ERROR(status))
4582 			return status;
4583 
4584 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
4585 				  soc->tcl_cmd_credit_ring.alloc_size,
4586 				  soc->ctrl_psoc,
4587 				  WLAN_MD_DP_SRNG_TCL_CMD,
4588 				  "wbm_desc_rel_ring");
4589 	}
4590 
4591 	return QDF_STATUS_SUCCESS;
4592 }
4593 
4594 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
4595 {
4596 	if (soc->init_tcl_cmd_cred_ring) {
4597 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
4598 				     soc->tcl_cmd_credit_ring.alloc_size,
4599 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
4600 				     "wbm_desc_rel_ring");
4601 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
4602 			       TCL_CMD_CREDIT, 0);
4603 	}
4604 }
4605 
4606 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
4607 {
4608 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4609 	uint32_t entries;
4610 	QDF_STATUS status;
4611 
4612 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
4613 	if (soc->init_tcl_cmd_cred_ring) {
4614 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
4615 				       TCL_CMD_CREDIT, entries, 0);
4616 		if (QDF_IS_STATUS_ERROR(status))
4617 			return status;
4618 	}
4619 
4620 	return QDF_STATUS_SUCCESS;
4621 }
4622 
4623 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
4624 {
4625 	if (soc->init_tcl_cmd_cred_ring)
4626 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
4627 }
4628 
4629 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
4630 {
4631 	if (soc->init_tcl_cmd_cred_ring)
4632 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
4633 					    soc->tcl_cmd_credit_ring.hal_srng);
4634 }
4635 #else
4636 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
4637 {
4638 	return QDF_STATUS_SUCCESS;
4639 }
4640 
4641 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
4642 {
4643 }
4644 
4645 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
4646 {
4647 	return QDF_STATUS_SUCCESS;
4648 }
4649 
4650 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
4651 {
4652 }
4653 
4654 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
4655 {
4656 }
4657 #endif
4658 
4659 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
4660 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
4661 {
4662 	QDF_STATUS status;
4663 
4664 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
4665 	if (QDF_IS_STATUS_ERROR(status))
4666 		return status;
4667 
4668 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
4669 			  soc->tcl_status_ring.alloc_size,
4670 			  soc->ctrl_psoc,
4671 			  WLAN_MD_DP_SRNG_TCL_STATUS,
4672 			  "wbm_desc_rel_ring");
4673 
4674 	return QDF_STATUS_SUCCESS;
4675 }
4676 
4677 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
4678 {
4679 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
4680 			     soc->tcl_status_ring.alloc_size,
4681 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
4682 			     "wbm_desc_rel_ring");
4683 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4684 }
4685 
4686 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
4687 {
4688 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4689 	uint32_t entries;
4690 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4691 
4692 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
4693 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
4694 			       TCL_STATUS, entries, 0);
4695 
4696 	return status;
4697 }
4698 
4699 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
4700 {
4701 	dp_srng_free(soc, &soc->tcl_status_ring);
4702 }
4703 #else
4704 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
4705 {
4706 	return QDF_STATUS_SUCCESS;
4707 }
4708 
4709 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
4710 {
4711 }
4712 
4713 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
4714 {
4715 	return QDF_STATUS_SUCCESS;
4716 }
4717 
4718 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
4719 {
4720 }
4721 #endif
4722 
4723 /**
4724  * dp_soc_srng_deinit() - de-initialize soc srng rings
4725  * @soc: Datapath soc handle
4726  *
4727  */
4728 void dp_soc_srng_deinit(struct dp_soc *soc)
4729 {
4730 	uint32_t i;
4731 
4732 	if (soc->arch_ops.txrx_soc_srng_deinit)
4733 		soc->arch_ops.txrx_soc_srng_deinit(soc);
4734 
4735 	/* Free the ring memories */
4736 	/* Common rings */
4737 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
4738 			     soc->wbm_desc_rel_ring.alloc_size,
4739 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
4740 			     "wbm_desc_rel_ring");
4741 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4742 
4743 	/* Tx data rings */
4744 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4745 		dp_deinit_tx_pair_by_index(soc, i);
4746 
4747 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4748 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
4749 		dp_ipa_deinit_alt_tx_ring(soc);
4750 	}
4751 
4752 	/* TCL command and status rings */
4753 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
4754 	dp_soc_tcl_status_srng_deinit(soc);
4755 
4756 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4757 		/* TODO: Get number of rings and ring sizes
4758 		 * from wlan_cfg
4759 		 */
4760 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
4761 				     soc->reo_dest_ring[i].alloc_size,
4762 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
4763 				     "reo_dest_ring");
4764 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
4765 	}
4766 
4767 	/* REO reinjection ring */
4768 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
4769 			     soc->reo_reinject_ring.alloc_size,
4770 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
4771 			     "reo_reinject_ring");
4772 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4773 
4774 	/* Rx release ring */
4775 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
4776 			     soc->rx_rel_ring.alloc_size,
4777 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
4778 			     "reo_release_ring");
4779 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4780 
4781 	/* Rx exception ring */
4782 	/* TODO: Better to store ring_type and ring_num in
4783 	 * dp_srng during setup
4784 	 */
4785 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
4786 			     soc->reo_exception_ring.alloc_size,
4787 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
4788 			     "reo_exception_ring");
4789 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4790 
4791 	/* REO command and status rings */
4792 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
4793 			     soc->reo_cmd_ring.alloc_size,
4794 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
4795 			     "reo_cmd_ring");
4796 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4797 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
4798 			     soc->reo_status_ring.alloc_size,
4799 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
4800 			     "reo_status_ring");
4801 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
4802 }
4803 
4804 /**
4805  * dp_soc_srng_init() - Initialize soc level srng rings
4806  * @soc: Datapath soc handle
4807  *
4808  * Return: QDF_STATUS_SUCCESS on success
4809  *	   QDF_STATUS_E_FAILURE on failure
4810  */
4811 QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
4812 {
4813 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4814 	uint8_t i;
4815 	uint8_t wbm2_sw_rx_rel_ring_id;
4816 
4817 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4818 
4819 	dp_enable_verbose_debug(soc);
4820 
4821 	/* WBM descriptor release ring */
4822 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
4823 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
4824 		goto fail1;
4825 	}
4826 
4827 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
4828 			  soc->wbm_desc_rel_ring.alloc_size,
4829 			  soc->ctrl_psoc,
4830 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
4831 			  "wbm_desc_rel_ring");
4832 
4833 	/* TCL command and status rings */
4834 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
4835 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
4836 		goto fail1;
4837 	}
4838 
4839 	if (dp_soc_tcl_status_srng_init(soc)) {
4840 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
4841 		goto fail1;
4842 	}
4843 
4844 	/* REO reinjection ring */
4845 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
4846 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
4847 		goto fail1;
4848 	}
4849 
4850 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
4851 			  soc->reo_reinject_ring.alloc_size,
4852 			  soc->ctrl_psoc,
4853 			  WLAN_MD_DP_SRNG_REO_REINJECT,
4854 			  "reo_reinject_ring");
4855 
4856 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
4857 	/* Rx release ring */
4858 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
4859 			 wbm2_sw_rx_rel_ring_id, 0)) {
4860 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
4861 		goto fail1;
4862 	}
4863 
4864 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
4865 			  soc->rx_rel_ring.alloc_size,
4866 			  soc->ctrl_psoc,
4867 			  WLAN_MD_DP_SRNG_RX_REL,
4868 			  "reo_release_ring");
4869 
4870 	/* Rx exception ring */
4871 	if (dp_srng_init(soc, &soc->reo_exception_ring,
4872 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
4873 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
4874 		goto fail1;
4875 	}
4876 
4877 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
4878 			  soc->reo_exception_ring.alloc_size,
4879 			  soc->ctrl_psoc,
4880 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
4881 			  "reo_exception_ring");
4882 
4883 	/* REO command and status rings */
4884 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
4885 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
4886 		goto fail1;
4887 	}
4888 
4889 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
4890 			  soc->reo_cmd_ring.alloc_size,
4891 			  soc->ctrl_psoc,
4892 			  WLAN_MD_DP_SRNG_REO_CMD,
4893 			  "reo_cmd_ring");
4894 
4895 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
4896 	TAILQ_INIT(&soc->rx.reo_cmd_list);
4897 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
4898 
4899 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
4900 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
4901 		goto fail1;
4902 	}
4903 
4904 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
4905 			  soc->reo_status_ring.alloc_size,
4906 			  soc->ctrl_psoc,
4907 			  WLAN_MD_DP_SRNG_REO_STATUS,
4908 			  "reo_status_ring");
4909 
4910 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4911 		if (dp_init_tx_ring_pair_by_index(soc, i))
4912 			goto fail1;
4913 	}
4914 
4915 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4916 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
4917 			goto fail1;
4918 
4919 		if (dp_ipa_init_alt_tx_ring(soc))
4920 			goto fail1;
4921 	}
4922 
4923 	dp_create_ext_stats_event(soc);
4924 
4925 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4926 		/* Initialize REO destination ring */
4927 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
4928 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
4929 			goto fail1;
4930 		}
4931 
4932 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
4933 				  soc->reo_dest_ring[i].alloc_size,
4934 				  soc->ctrl_psoc,
4935 				  WLAN_MD_DP_SRNG_REO_DEST,
4936 				  "reo_dest_ring");
4937 	}
4938 
4939 	if (soc->arch_ops.txrx_soc_srng_init) {
4940 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
4941 			dp_init_err("%pK: dp_srng_init failed for arch rings",
4942 				    soc);
4943 			goto fail1;
4944 		}
4945 	}
4946 
4947 	return QDF_STATUS_SUCCESS;
4948 fail1:
4949 	/*
4950 	 * Cleanup will be done as part of soc_detach, which will
4951 	 * be called on pdev attach failure
4952 	 */
4953 	dp_soc_srng_deinit(soc);
4954 	return QDF_STATUS_E_FAILURE;
4955 }
4956 
4957 /**
4958  * dp_soc_srng_free() - free soc level srng rings
4959  * @soc: Datapath soc handle
4960  *
4961  */
4962 void dp_soc_srng_free(struct dp_soc *soc)
4963 {
4964 	uint32_t i;
4965 
4966 	if (soc->arch_ops.txrx_soc_srng_free)
4967 		soc->arch_ops.txrx_soc_srng_free(soc);
4968 
4969 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
4970 
4971 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4972 		dp_free_tx_ring_pair_by_index(soc, i);
4973 
4974 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
4975 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4976 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
4977 		dp_ipa_free_alt_tx_ring(soc);
4978 	}
4979 
4980 	dp_soc_tcl_cmd_cred_srng_free(soc);
4981 	dp_soc_tcl_status_srng_free(soc);
4982 
4983 	for (i = 0; i < soc->num_reo_dest_rings; i++)
4984 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
4985 
4986 	dp_srng_free(soc, &soc->reo_reinject_ring);
4987 	dp_srng_free(soc, &soc->rx_rel_ring);
4988 
4989 	dp_srng_free(soc, &soc->reo_exception_ring);
4990 
4991 	dp_srng_free(soc, &soc->reo_cmd_ring);
4992 	dp_srng_free(soc, &soc->reo_status_ring);
4993 }
4994 
4995 /**
4996  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
4997  * @soc: Datapath soc handle
4998  *
4999  * Return: QDF_STATUS_SUCCESS on success
5000  *	   QDF_STATUS_E_NOMEM on failure
5001  */
5002 QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
5003 {
5004 	uint32_t entries;
5005 	uint32_t i;
5006 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5007 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
5008 	uint32_t reo_dst_ring_size;
5009 
5010 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5011 
5012 	/* sw2wbm link descriptor release ring */
5013 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
5014 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
5015 			  entries, 0)) {
5016 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
5017 		goto fail1;
5018 	}
5019 
5020 	/* TCL command and status rings */
5021 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
5022 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
5023 		goto fail1;
5024 	}
5025 
5026 	if (dp_soc_tcl_status_srng_alloc(soc)) {
5027 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
5028 		goto fail1;
5029 	}
5030 
5031 	/* REO reinjection ring */
5032 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
5033 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
5034 			  entries, 0)) {
5035 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
5036 		goto fail1;
5037 	}
5038 
5039 	/* Rx release ring */
5040 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
5041 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
5042 			  entries, 0)) {
5043 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
5044 		goto fail1;
5045 	}
5046 
5047 	/* Rx exception ring */
5048 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
5049 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
5050 			  entries, 0)) {
5051 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
5052 		goto fail1;
5053 	}
5054 
5055 	/* REO command and status rings */
5056 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
5057 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
5058 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
5059 		goto fail1;
5060 	}
5061 
5062 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
5063 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
5064 			  entries, 0)) {
5065 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
5066 		goto fail1;
5067 	}
5068 
5069 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
5070 
5071 	/* Disable cached desc if NSS offload is enabled */
5072 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
5073 		cached = 0;
5074 
5075 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
5076 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
5077 			goto fail1;
5078 	}
5079 
5080 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
5081 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5082 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
5083 			goto fail1;
5084 
5085 		if (dp_ipa_alloc_alt_tx_ring(soc))
5086 			goto fail1;
5087 	}
5088 
5089 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
5090 		/* Setup REO destination ring */
5091 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
5092 				  reo_dst_ring_size, cached)) {
5093 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
5094 			goto fail1;
5095 		}
5096 	}
5097 
5098 	if (soc->arch_ops.txrx_soc_srng_alloc) {
5099 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
5100 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
5101 				    soc);
5102 			goto fail1;
5103 		}
5104 	}
5105 
5106 	return QDF_STATUS_SUCCESS;
5107 
5108 fail1:
5109 	dp_soc_srng_free(soc);
5110 	return QDF_STATUS_E_NOMEM;
5111 }
5112 
5113 /**
5114  * dp_soc_cfg_attach() - set target specific configuration in
5115  *			 dp soc cfg.
5116  * @soc: dp soc handle
5117  */
5118 void dp_soc_cfg_attach(struct dp_soc *soc)
5119 {
5120 	int target_type;
5121 	int nss_cfg = 0;
5122 
5123 	target_type = hal_get_target_type(soc->hal_soc);
5124 	switch (target_type) {
5125 	case TARGET_TYPE_QCA6290:
5126 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
5127 					       REO_DST_RING_SIZE_QCA6290);
5128 		break;
5129 	case TARGET_TYPE_QCA6390:
5130 	case TARGET_TYPE_QCA6490:
5131 	case TARGET_TYPE_QCA6750:
5132 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
5133 					       REO_DST_RING_SIZE_QCA6290);
5134 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5135 		break;
5136 	case TARGET_TYPE_KIWI:
5137 	case TARGET_TYPE_MANGO:
5138 	case TARGET_TYPE_PEACH:
5139 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5140 		break;
5141 	case TARGET_TYPE_QCA8074:
5142 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5143 		break;
5144 	case TARGET_TYPE_QCA8074V2:
5145 	case TARGET_TYPE_QCA6018:
5146 	case TARGET_TYPE_QCA9574:
5147 	case TARGET_TYPE_QCN6122:
5148 	case TARGET_TYPE_QCA5018:
5149 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5150 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5151 		break;
5152 	case TARGET_TYPE_QCN9160:
5153 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5154 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5155 		break;
5156 	case TARGET_TYPE_QCN9000:
5157 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5158 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5159 		break;
5160 	case TARGET_TYPE_QCN9224:
5161 	case TARGET_TYPE_QCA5332:
5162 	case TARGET_TYPE_QCN6432:
5163 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5164 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5165 		break;
5166 	default:
5167 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
5168 		qdf_assert_always(0);
5169 		break;
5170 	}
5171 
5172 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
5173 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
5174 
5175 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
5176 
5177 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
5178 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
5179 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
5180 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
5181 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
5182 		soc->init_tcl_cmd_cred_ring = false;
5183 		soc->num_tcl_data_rings =
5184 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
5185 		soc->num_reo_dest_rings =
5186 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
5187 
5188 	} else {
5189 		soc->init_tcl_cmd_cred_ring = true;
5190 		soc->num_tx_comp_rings =
5191 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
5192 		soc->num_tcl_data_rings =
5193 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
5194 		soc->num_reo_dest_rings =
5195 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
5196 	}
5197 
5198 }
5199 
5200 void dp_pdev_set_default_reo(struct dp_pdev *pdev)
5201 {
5202 	struct dp_soc *soc = pdev->soc;
5203 
5204 	switch (pdev->pdev_id) {
5205 	case 0:
5206 		pdev->reo_dest =
5207 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
5208 		break;
5209 
5210 	case 1:
5211 		pdev->reo_dest =
5212 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
5213 		break;
5214 
5215 	case 2:
5216 		pdev->reo_dest =
5217 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
5218 		break;
5219 
5220 	default:
5221 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
5222 			    soc, pdev->pdev_id);
5223 		break;
5224 	}
5225 }
5226 
5227