xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rings_main.c (revision cbe81707988efe2ae91b3ee68cb9464251d5e597)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_rings.h"
34 #include "dp_internal.h"
35 #include "dp_tx.h"
36 #include "dp_tx_desc.h"
37 #include "dp_rx.h"
38 #ifdef DP_RATETABLE_SUPPORT
39 #include "dp_ratetable.h"
40 #endif
41 #include <cdp_txrx_handle.h>
42 #include <wlan_cfg.h>
43 #include <wlan_utility.h>
44 #include "cdp_txrx_cmn_struct.h"
45 #include "cdp_txrx_stats_struct.h"
46 #include "cdp_txrx_cmn_reg.h"
47 #include <qdf_util.h>
48 #include "dp_peer.h"
49 #include "htt_stats.h"
50 #include "dp_htt.h"
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 #include <wlan_module_ids.h>
55 
56 #ifdef WIFI_MONITOR_SUPPORT
57 #include <dp_mon.h>
58 #endif
59 
60 #ifdef WLAN_FEATURE_STATS_EXT
61 #define INIT_RX_HW_STATS_LOCK(_soc) \
62 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
63 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
64 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
65 #else
66 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
67 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
68 #endif
69 
70 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
71 #define TXCOMP_RING4_NUM 3
72 #else
73 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
74 #endif
75 
76 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
77 						uint8_t index);
78 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
79 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
80 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
81 						 uint8_t index);
82 
83 /* default_dscp_tid_map - Default DSCP-TID mapping
84  *
85  * DSCP        TID
86  * 000000      0
87  * 001000      1
88  * 010000      2
89  * 011000      3
90  * 100000      4
91  * 101000      5
92  * 110000      6
93  * 111000      7
94  */
95 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
96 	0, 0, 0, 0, 0, 0, 0, 0,
97 	1, 1, 1, 1, 1, 1, 1, 1,
98 	2, 2, 2, 2, 2, 2, 2, 2,
99 	3, 3, 3, 3, 3, 3, 3, 3,
100 	4, 4, 4, 4, 4, 4, 4, 4,
101 	5, 5, 5, 5, 5, 5, 5, 5,
102 	6, 6, 6, 6, 6, 6, 6, 6,
103 	7, 7, 7, 7, 7, 7, 7, 7,
104 };
105 
106 /* default_pcp_tid_map - Default PCP-TID mapping
107  *
108  * PCP     TID
109  * 000      0
110  * 001      1
111  * 010      2
112  * 011      3
113  * 100      4
114  * 101      5
115  * 110      6
116  * 111      7
117  */
118 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
119 	0, 1, 2, 3, 4, 5, 6, 7,
120 };
121 
122 uint8_t
123 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
124 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
125 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
126 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
127 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
128 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
129 #ifdef WLAN_TX_PKT_CAPTURE_ENH
130 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
131 #endif
132 };
133 
134 qdf_export_symbol(dp_cpu_ring_map);
135 
136 /**
137  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
138  * @soc: DP soc handle
139  * @ring_type: ring type
140  * @ring_num: ring_num
141  *
142  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
143  */
144 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
145 					    enum hal_ring_type ring_type,
146 					    int ring_num)
147 {
148 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
149 	uint8_t status = 0;
150 
151 	switch (ring_type) {
152 	case WBM2SW_RELEASE:
153 	case REO_DST:
154 	case RXDMA_BUF:
155 	case REO_EXCEPTION:
156 		status = ((nss_config) & (1 << ring_num));
157 		break;
158 	default:
159 		break;
160 	}
161 
162 	return status;
163 }
164 
165 /* MCL specific functions */
166 #if defined(DP_CON_MON)
167 
168 #ifdef DP_CON_MON_MSI_ENABLED
169 /**
170  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
171  * @soc: pointer to dp_soc handle
172  * @intr_ctx_num: interrupt context number for which mon mask is needed
173  *
174  * For MCL, monitor mode rings are being processed in timer contexts (polled).
175  * This function is returning 0, since in interrupt mode(softirq based RX),
176  * we donot want to process monitor mode rings in a softirq.
177  *
178  * So, in case packet log is enabled for SAP/STA/P2P modes,
179  * regular interrupt processing will not process monitor mode rings. It would be
180  * done in a separate timer context.
181  *
182  * Return: 0
183  */
184 static inline uint32_t
185 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
186 {
187 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
188 }
189 #else
190 /**
191  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
192  * @soc: pointer to dp_soc handle
193  * @intr_ctx_num: interrupt context number for which mon mask is needed
194  *
195  * For MCL, monitor mode rings are being processed in timer contexts (polled).
196  * This function is returning 0, since in interrupt mode(softirq based RX),
197  * we donot want to process monitor mode rings in a softirq.
198  *
199  * So, in case packet log is enabled for SAP/STA/P2P modes,
200  * regular interrupt processing will not process monitor mode rings. It would be
201  * done in a separate timer context.
202  *
203  * Return: 0
204  */
205 static inline uint32_t
206 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
207 {
208 	return 0;
209 }
210 #endif
211 
212 #else
213 
214 /**
215  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
216  * @soc: pointer to dp_soc handle
217  * @intr_ctx_num: interrupt context number for which mon mask is needed
218  *
219  * Return: mon mask value
220  */
221 static inline
222 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc,
223 						int intr_ctx_num)
224 {
225 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
226 }
227 
228 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
229 {
230 	int i;
231 
232 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
233 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
234 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
235 	}
236 }
237 
238 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
239 
240 void dp_service_lmac_rings(void *arg)
241 {
242 	struct dp_soc *soc = (struct dp_soc *)arg;
243 	int ring = 0, i;
244 	struct dp_pdev *pdev = NULL;
245 	union dp_rx_desc_list_elem_t *desc_list = NULL;
246 	union dp_rx_desc_list_elem_t *tail = NULL;
247 
248 	/* Process LMAC interrupts */
249 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
250 		int mac_for_pdev = ring;
251 		struct dp_srng *rx_refill_buf_ring;
252 
253 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
254 		if (!pdev)
255 			continue;
256 
257 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
258 
259 		dp_monitor_process(soc, NULL, mac_for_pdev,
260 				   QCA_NAPI_BUDGET);
261 
262 		for (i = 0;
263 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
264 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
265 					     mac_for_pdev,
266 					     QCA_NAPI_BUDGET);
267 
268 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
269 						  mac_for_pdev))
270 			dp_rx_buffers_replenish(soc, mac_for_pdev,
271 						rx_refill_buf_ring,
272 						&soc->rx_desc_buf[mac_for_pdev],
273 						0, &desc_list, &tail, false);
274 	}
275 
276 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
277 }
278 
279 #endif
280 
281 /**
282  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
283  * @ring_num: ring num of the ring being queried
284  * @grp_mask: the grp_mask array for the ring type in question.
285  *
286  * The grp_mask array is indexed by group number and the bit fields correspond
287  * to ring numbers.  We are finding which interrupt group a ring belongs to.
288  *
289  * Return: the index in the grp_mask array with the ring number.
290  * -QDF_STATUS_E_NOENT if no entry is found
291  */
292 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
293 {
294 	int ext_group_num;
295 	uint8_t mask = 1 << ring_num;
296 
297 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
298 	     ext_group_num++) {
299 		if (mask & grp_mask[ext_group_num])
300 			return ext_group_num;
301 	}
302 
303 	return -QDF_STATUS_E_NOENT;
304 }
305 
306 /**
307  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
308  * @soc: dp_soc
309  * @msi_group_number: MSI group number.
310  * @msi_data_count: MSI data count.
311  *
312  * Return: true if msi_group_number is invalid.
313  */
314 static bool dp_is_msi_group_number_invalid(struct dp_soc *soc,
315 					   int msi_group_number,
316 					   int msi_data_count)
317 {
318 	if (soc && soc->osdev && soc->osdev->dev &&
319 	    pld_is_one_msi(soc->osdev->dev))
320 		return false;
321 
322 	return msi_group_number > msi_data_count;
323 }
324 
325 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
326 /**
327  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
328  *				rx_near_full_grp1 mask
329  * @soc: Datapath SoC Handle
330  * @ring_num: REO ring number
331  *
332  * Return: 1 if the ring_num belongs to reo_nf_grp1,
333  *	   0, otherwise.
334  */
335 static inline int
336 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
337 {
338 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
339 }
340 
341 /**
342  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
343  *				rx_near_full_grp2 mask
344  * @soc: Datapath SoC Handle
345  * @ring_num: REO ring number
346  *
347  * Return: 1 if the ring_num belongs to reo_nf_grp2,
348  *	   0, otherwise.
349  */
350 static inline int
351 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
352 {
353 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
354 }
355 
356 /**
357  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
358  *				ring type and number
359  * @soc: Datapath SoC handle
360  * @ring_type: SRNG type
361  * @ring_num: ring num
362  *
363  * Return: near-full irq mask pointer
364  */
365 static inline
366 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
367 					enum hal_ring_type ring_type,
368 					int ring_num)
369 {
370 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
371 	uint8_t wbm2_sw_rx_rel_ring_id;
372 	uint8_t *nf_irq_mask = NULL;
373 
374 	switch (ring_type) {
375 	case WBM2SW_RELEASE:
376 		wbm2_sw_rx_rel_ring_id =
377 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
378 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
379 			nf_irq_mask = &soc->wlan_cfg_ctx->
380 					int_tx_ring_near_full_irq_mask[0];
381 		}
382 		break;
383 	case REO_DST:
384 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
385 			nf_irq_mask =
386 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
387 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
388 			nf_irq_mask =
389 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
390 		else
391 			qdf_assert(0);
392 		break;
393 	default:
394 		break;
395 	}
396 
397 	return nf_irq_mask;
398 }
399 
400 /**
401  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
402  * @soc: Datapath SoC handle
403  * @ring_params: srng params handle
404  * @msi2_addr: MSI2 addr to be set for the SRNG
405  * @msi2_data: MSI2 data to be set for the SRNG
406  *
407  * Return: None
408  */
409 static inline
410 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
411 				  struct hal_srng_params *ring_params,
412 				  qdf_dma_addr_t msi2_addr,
413 				  uint32_t msi2_data)
414 {
415 	ring_params->msi2_addr = msi2_addr;
416 	ring_params->msi2_data = msi2_data;
417 }
418 
419 /**
420  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
421  * @soc: Datapath SoC handle
422  * @ring_params: ring_params for SRNG
423  * @ring_type: SENG type
424  * @ring_num: ring number for the SRNG
425  * @nf_msi_grp_num: near full msi group number
426  *
427  * Return: None
428  */
429 static inline void
430 dp_srng_msi2_setup(struct dp_soc *soc,
431 		   struct hal_srng_params *ring_params,
432 		   int ring_type, int ring_num, int nf_msi_grp_num)
433 {
434 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
435 	int msi_data_count, ret;
436 
437 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
438 					  &msi_data_count, &msi_data_start,
439 					  &msi_irq_start);
440 	if (ret)
441 		return;
442 
443 	if (nf_msi_grp_num < 0) {
444 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
445 			     soc, ring_type, ring_num);
446 		ring_params->msi2_addr = 0;
447 		ring_params->msi2_data = 0;
448 		return;
449 	}
450 
451 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
452 					   msi_data_count)) {
453 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
454 			     soc, nf_msi_grp_num);
455 		QDF_ASSERT(0);
456 	}
457 
458 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
459 
460 	ring_params->nf_irq_support = 1;
461 	ring_params->msi2_addr = addr_low;
462 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
463 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
464 		+ msi_data_start;
465 	ring_params->flags |= HAL_SRNG_MSI_INTR;
466 }
467 
468 /* Percentage of ring entries considered as nearly full */
469 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
470 /* Percentage of ring entries considered as critically full */
471 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
472 /* Percentage of ring entries considered as safe threshold */
473 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
474 
475 /**
476  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
477  *			near full irq
478  * @soc: Datapath SoC handle
479  * @ring_params: ring params for SRNG
480  * @ring_type: ring type
481  */
482 static inline void
483 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
484 					  struct hal_srng_params *ring_params,
485 					  int ring_type)
486 {
487 	if (ring_params->nf_irq_support) {
488 		ring_params->high_thresh = (ring_params->num_entries *
489 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
490 		ring_params->crit_thresh = (ring_params->num_entries *
491 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
492 		ring_params->safe_thresh = (ring_params->num_entries *
493 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
494 	}
495 }
496 
497 /**
498  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
499  *			structure from the ring params
500  * @soc: Datapath SoC handle
501  * @srng: SRNG handle
502  * @ring_params: ring params for a SRNG
503  *
504  * Return: None
505  */
506 static inline void
507 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
508 			  struct hal_srng_params *ring_params)
509 {
510 	srng->crit_thresh = ring_params->crit_thresh;
511 	srng->safe_thresh = ring_params->safe_thresh;
512 }
513 
514 #else
515 static inline
516 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
517 					enum hal_ring_type ring_type,
518 					int ring_num)
519 {
520 	return NULL;
521 }
522 
523 static inline
524 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
525 				  struct hal_srng_params *ring_params,
526 				  qdf_dma_addr_t msi2_addr,
527 				  uint32_t msi2_data)
528 {
529 }
530 
531 static inline void
532 dp_srng_msi2_setup(struct dp_soc *soc,
533 		   struct hal_srng_params *ring_params,
534 		   int ring_type, int ring_num, int nf_msi_grp_num)
535 {
536 }
537 
538 static inline void
539 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
540 					  struct hal_srng_params *ring_params,
541 					  int ring_type)
542 {
543 }
544 
545 static inline void
546 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
547 			  struct hal_srng_params *ring_params)
548 {
549 }
550 #endif
551 
552 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
553 				       enum hal_ring_type ring_type,
554 				       int ring_num,
555 				       int *reg_msi_grp_num,
556 				       bool nf_irq_support,
557 				       int *nf_msi_grp_num)
558 {
559 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
560 	uint8_t *grp_mask, *nf_irq_mask = NULL;
561 	bool nf_irq_enabled = false;
562 	uint8_t wbm2_sw_rx_rel_ring_id;
563 
564 	switch (ring_type) {
565 	case WBM2SW_RELEASE:
566 		wbm2_sw_rx_rel_ring_id =
567 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
568 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
569 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
570 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
571 			ring_num = 0;
572 		} else if (ring_num == WBM2_SW_PPE_REL_RING_ID) {
573 			grp_mask = &cfg_ctx->int_ppeds_wbm_release_ring_mask[0];
574 			ring_num = 0;
575 		}  else { /* dp_tx_comp_handler - soc->tx_comp_ring */
576 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
577 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
578 								     ring_type,
579 								     ring_num);
580 			if (nf_irq_mask)
581 				nf_irq_enabled = true;
582 
583 			/*
584 			 * Using ring 4 as 4th tx completion ring since ring 3
585 			 * is Rx error ring
586 			 */
587 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
588 				ring_num = TXCOMP_RING4_NUM;
589 		}
590 	break;
591 
592 	case REO_EXCEPTION:
593 		/* dp_rx_err_process - &soc->reo_exception_ring */
594 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
595 	break;
596 
597 	case REO_DST:
598 		/* dp_rx_process - soc->reo_dest_ring */
599 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
600 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
601 							     ring_num);
602 		if (nf_irq_mask)
603 			nf_irq_enabled = true;
604 	break;
605 
606 	case REO_STATUS:
607 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
608 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
609 	break;
610 
611 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
612 	case RXDMA_MONITOR_STATUS:
613 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
614 	case RXDMA_MONITOR_DST:
615 		/* dp_mon_process */
616 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
617 	break;
618 	case TX_MONITOR_DST:
619 		/* dp_tx_mon_process */
620 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
621 	break;
622 	case RXDMA_DST:
623 		/* dp_rxdma_err_process */
624 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
625 	break;
626 
627 	case RXDMA_BUF:
628 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
629 	break;
630 
631 	case RXDMA_MONITOR_BUF:
632 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
633 	break;
634 
635 	case TX_MONITOR_BUF:
636 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
637 	break;
638 
639 	case REO2PPE:
640 		grp_mask = &soc->wlan_cfg_ctx->int_reo2ppe_ring_mask[0];
641 	break;
642 
643 	case PPE2TCL:
644 		grp_mask = &soc->wlan_cfg_ctx->int_ppe2tcl_ring_mask[0];
645 	break;
646 
647 	case TCL_DATA:
648 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
649 	case TCL_CMD_CREDIT:
650 	case REO_CMD:
651 	case SW2WBM_RELEASE:
652 	case WBM_IDLE_LINK:
653 		/* normally empty SW_TO_HW rings */
654 		return -QDF_STATUS_E_NOENT;
655 	break;
656 
657 	case TCL_STATUS:
658 	case REO_REINJECT:
659 		/* misc unused rings */
660 		return -QDF_STATUS_E_NOENT;
661 	break;
662 
663 	case CE_SRC:
664 	case CE_DST:
665 	case CE_DST_STATUS:
666 		/* CE_rings - currently handled by hif */
667 	default:
668 		return -QDF_STATUS_E_NOENT;
669 	break;
670 	}
671 
672 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
673 
674 	if (nf_irq_support && nf_irq_enabled) {
675 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
676 							    nf_irq_mask);
677 	}
678 
679 	return QDF_STATUS_SUCCESS;
680 }
681 
682 /**
683  * dp_get_num_msi_available()- API to get number of MSIs available
684  * @soc: DP soc Handle
685  * @interrupt_mode: Mode of interrupts
686  *
687  * Return: Number of MSIs available or 0 in case of integrated
688  */
689 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
690 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
691 {
692 	return 0;
693 }
694 #else
695 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
696 {
697 	int msi_data_count;
698 	int msi_data_start;
699 	int msi_irq_start;
700 	int ret;
701 
702 	if (interrupt_mode == DP_INTR_INTEGRATED) {
703 		return 0;
704 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
705 		   DP_INTR_POLL) {
706 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
707 						  &msi_data_count,
708 						  &msi_data_start,
709 						  &msi_irq_start);
710 		if (ret) {
711 			qdf_err("Unable to get DP MSI assignment %d",
712 				interrupt_mode);
713 			return -EINVAL;
714 		}
715 		return msi_data_count;
716 	}
717 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
718 	return -EINVAL;
719 }
720 #endif
721 
722 #if defined(IPA_OFFLOAD) && defined(IPA_WDI3_VLAN_SUPPORT)
723 static void
724 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
725 			   int ring_num)
726 {
727 	if (wlan_ipa_is_vlan_enabled()) {
728 		if ((ring_type == REO_DST) &&
729 				(ring_num == IPA_ALT_REO_DEST_RING_IDX)) {
730 			ring_params->msi_addr = 0;
731 			ring_params->msi_data = 0;
732 			ring_params->flags &= ~HAL_SRNG_MSI_INTR;
733 		}
734 	}
735 }
736 #else
737 static inline void
738 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
739 			   int ring_num)
740 {
741 }
742 #endif
743 
744 static void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng,
745 			      struct hal_srng_params *ring_params,
746 			      int ring_type, int ring_num)
747 {
748 	int reg_msi_grp_num;
749 	/*
750 	 * nf_msi_grp_num needs to be initialized with negative value,
751 	 * to avoid configuring near-full msi for WBM2SW3 ring
752 	 */
753 	int nf_msi_grp_num = -1;
754 	int msi_data_count;
755 	int ret;
756 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
757 	bool nf_irq_support;
758 	int vector;
759 
760 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
761 					  &msi_data_count, &msi_data_start,
762 					  &msi_irq_start);
763 
764 	if (ret)
765 		return;
766 
767 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
768 							     ring_type,
769 							     ring_num);
770 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
771 					  &reg_msi_grp_num,
772 					  nf_irq_support,
773 					  &nf_msi_grp_num);
774 	if (ret < 0) {
775 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
776 			     soc, ring_type, ring_num);
777 		ring_params->msi_addr = 0;
778 		ring_params->msi_data = 0;
779 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
780 		return;
781 	}
782 
783 	if (reg_msi_grp_num < 0) {
784 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
785 			     soc, ring_type, ring_num);
786 		ring_params->msi_addr = 0;
787 		ring_params->msi_data = 0;
788 		goto configure_msi2;
789 	}
790 
791 	if (dp_is_msi_group_number_invalid(soc, reg_msi_grp_num,
792 					   msi_data_count)) {
793 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
794 			     soc, reg_msi_grp_num);
795 		QDF_ASSERT(0);
796 	}
797 
798 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
799 
800 	ring_params->msi_addr = addr_low;
801 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
802 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
803 		+ msi_data_start;
804 	ring_params->flags |= HAL_SRNG_MSI_INTR;
805 
806 	dp_ipa_vlan_srng_msi_setup(ring_params, ring_type, ring_num);
807 
808 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
809 		 ring_type, ring_num, ring_params->msi_data,
810 		 (uint64_t)ring_params->msi_addr);
811 
812 	vector = msi_irq_start + (reg_msi_grp_num % msi_data_count);
813 
814 	/*
815 	 * During umac reset ppeds interrupts free is not called.
816 	 * Avoid registering interrupts again.
817 	 *
818 	 */
819 	if (dp_check_umac_reset_in_progress(soc))
820 		goto configure_msi2;
821 
822 	if (soc->arch_ops.dp_register_ppeds_interrupts)
823 		if (soc->arch_ops.dp_register_ppeds_interrupts(soc, srng,
824 							       vector,
825 							       ring_type,
826 							       ring_num))
827 			return;
828 
829 configure_msi2:
830 	if (!nf_irq_support) {
831 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
832 		return;
833 	}
834 
835 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
836 			   nf_msi_grp_num);
837 }
838 
839 /**
840  * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
841  * update threshold value from wlan_cfg_ctx
842  * @soc: device handle
843  * @ring_params: per ring specific parameters
844  * @ring_type: Ring type
845  * @ring_num: Ring number for a given ring type
846  * @num_entries: number of entries to fill
847  *
848  * Fill the ring params with the pointer update threshold
849  * configuration parameters available in wlan_cfg_ctx
850  *
851  * Return: None
852  */
853 static void
854 dp_srng_configure_pointer_update_thresholds(
855 				struct dp_soc *soc,
856 				struct hal_srng_params *ring_params,
857 				int ring_type, int ring_num,
858 				int num_entries)
859 {
860 	if (ring_type == REO_DST) {
861 		ring_params->pointer_timer_threshold =
862 			wlan_cfg_get_pointer_timer_threshold_rx(
863 						soc->wlan_cfg_ctx);
864 		ring_params->pointer_num_threshold =
865 			wlan_cfg_get_pointer_num_threshold_rx(
866 						soc->wlan_cfg_ctx);
867 	}
868 }
869 
870 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
871 /**
872  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
873  * threshold values from the wlan_srng_cfg table for each ring type
874  * @soc: device handle
875  * @ring_params: per ring specific parameters
876  * @ring_type: Ring type
877  * @ring_num: Ring number for a given ring type
878  * @num_entries: number of entries to fill
879  *
880  * Fill the ring params with the interrupt threshold
881  * configuration parameters available in the per ring type wlan_srng_cfg
882  * table.
883  *
884  * Return: None
885  */
886 static void
887 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
888 				       struct hal_srng_params *ring_params,
889 				       int ring_type, int ring_num,
890 				       int num_entries)
891 {
892 	uint8_t wbm2_sw_rx_rel_ring_id;
893 
894 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
895 
896 	if (ring_type == REO_DST) {
897 		ring_params->intr_timer_thres_us =
898 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
899 		ring_params->intr_batch_cntr_thres_entries =
900 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
901 	} else if (ring_type == WBM2SW_RELEASE &&
902 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
903 		ring_params->intr_timer_thres_us =
904 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
905 		ring_params->intr_batch_cntr_thres_entries =
906 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
907 	} else {
908 		ring_params->intr_timer_thres_us =
909 				soc->wlan_srng_cfg[ring_type].timer_threshold;
910 		ring_params->intr_batch_cntr_thres_entries =
911 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
912 	}
913 	ring_params->low_threshold =
914 			soc->wlan_srng_cfg[ring_type].low_threshold;
915 	if (ring_params->low_threshold)
916 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
917 
918 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
919 }
920 #else
921 static void
922 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
923 				       struct hal_srng_params *ring_params,
924 				       int ring_type, int ring_num,
925 				       int num_entries)
926 {
927 	uint8_t wbm2_sw_rx_rel_ring_id;
928 	bool rx_refill_lt_disable;
929 
930 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
931 
932 	if (ring_type == REO_DST || ring_type == REO2PPE) {
933 		ring_params->intr_timer_thres_us =
934 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
935 		ring_params->intr_batch_cntr_thres_entries =
936 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
937 	} else if (ring_type == WBM2SW_RELEASE &&
938 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
939 		   ring_num == WBM2SW_TXCOMP_RING4_NUM ||
940 		   ring_num == WBM2_SW_PPE_REL_RING_ID)) {
941 		ring_params->intr_timer_thres_us =
942 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
943 		ring_params->intr_batch_cntr_thres_entries =
944 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
945 	} else if (ring_type == RXDMA_BUF) {
946 		rx_refill_lt_disable =
947 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
948 							(soc->wlan_cfg_ctx);
949 		ring_params->intr_timer_thres_us =
950 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
951 
952 		if (!rx_refill_lt_disable) {
953 			ring_params->low_threshold = num_entries >> 3;
954 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
955 			ring_params->intr_batch_cntr_thres_entries = 0;
956 		}
957 	} else {
958 		ring_params->intr_timer_thres_us =
959 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
960 		ring_params->intr_batch_cntr_thres_entries =
961 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
962 	}
963 
964 	/* These rings donot require interrupt to host. Make them zero */
965 	switch (ring_type) {
966 	case REO_REINJECT:
967 	case REO_CMD:
968 	case TCL_DATA:
969 	case TCL_CMD_CREDIT:
970 	case TCL_STATUS:
971 	case WBM_IDLE_LINK:
972 	case SW2WBM_RELEASE:
973 	case SW2RXDMA_NEW:
974 		ring_params->intr_timer_thres_us = 0;
975 		ring_params->intr_batch_cntr_thres_entries = 0;
976 		break;
977 	case PPE2TCL:
978 		ring_params->intr_timer_thres_us =
979 			wlan_cfg_get_int_timer_threshold_ppe2tcl(soc->wlan_cfg_ctx);
980 		ring_params->intr_batch_cntr_thres_entries =
981 			wlan_cfg_get_int_batch_threshold_ppe2tcl(soc->wlan_cfg_ctx);
982 		break;
983 	}
984 
985 	/* Enable low threshold interrupts for rx buffer rings (regular and
986 	 * monitor buffer rings.
987 	 * TODO: See if this is required for any other ring
988 	 */
989 	if ((ring_type == RXDMA_MONITOR_BUF) ||
990 	    (ring_type == RXDMA_MONITOR_STATUS ||
991 	    (ring_type == TX_MONITOR_BUF))) {
992 		/* TODO: Setting low threshold to 1/8th of ring size
993 		 * see if this needs to be configurable
994 		 */
995 		ring_params->low_threshold = num_entries >> 3;
996 		ring_params->intr_timer_thres_us =
997 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
998 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
999 		ring_params->intr_batch_cntr_thres_entries = 0;
1000 	}
1001 
1002 	/* During initialisation monitor rings are only filled with
1003 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1004 	 * a value less than that. Low threshold value is reconfigured again
1005 	 * to 1/8th of the ring size when monitor vap is created.
1006 	 */
1007 	if (ring_type == RXDMA_MONITOR_BUF)
1008 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1009 
1010 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1011 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1012 	 * Keep batch threshold as 8 so that interrupt is received for
1013 	 * every 4 packets in MONITOR_STATUS ring
1014 	 */
1015 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1016 	    (soc->intr_mode == DP_INTR_MSI))
1017 		ring_params->intr_batch_cntr_thres_entries = 4;
1018 }
1019 #endif
1020 
1021 #ifdef DISABLE_MON_RING_MSI_CFG
1022 /**
1023  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
1024  * @soc: DP SoC context
1025  * @ring_type: sring type
1026  *
1027  * Return: True if msi cfg should be skipped for srng type else false
1028  */
1029 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1030 {
1031 	if (ring_type == RXDMA_MONITOR_STATUS)
1032 		return true;
1033 
1034 	return false;
1035 }
1036 #else
1037 #ifdef DP_CON_MON_MSI_ENABLED
1038 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1039 {
1040 	if (soc->cdp_soc.ol_ops->get_con_mode &&
1041 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
1042 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
1043 			return true;
1044 	} else if (ring_type == RXDMA_MONITOR_STATUS &&
1045 		  !wlan_cfg_get_local_pkt_capture(soc->wlan_cfg_ctx)) {
1046 		return true;
1047 	}
1048 
1049 	return false;
1050 }
1051 #else
1052 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1053 {
1054 	return false;
1055 }
1056 #endif /* DP_CON_MON_MSI_ENABLED */
1057 #endif /* DISABLE_MON_RING_MSI_CFG */
1058 
1059 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
1060 			    int ring_type, int ring_num, int mac_id,
1061 			    uint32_t idx)
1062 {
1063 	bool idle_check;
1064 
1065 	hal_soc_handle_t hal_soc = soc->hal_soc;
1066 	struct hal_srng_params ring_params;
1067 
1068 	if (srng->hal_srng) {
1069 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
1070 			    soc, ring_type, ring_num);
1071 		return QDF_STATUS_SUCCESS;
1072 	}
1073 
1074 	/* memset the srng ring to zero */
1075 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1076 
1077 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
1078 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1079 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1080 
1081 	ring_params.num_entries = srng->num_entries;
1082 
1083 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1084 		ring_type, ring_num,
1085 		(void *)ring_params.ring_base_vaddr,
1086 		(void *)ring_params.ring_base_paddr,
1087 		ring_params.num_entries);
1088 
1089 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
1090 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
1091 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1092 				 ring_type, ring_num);
1093 	} else {
1094 		ring_params.msi_data = 0;
1095 		ring_params.msi_addr = 0;
1096 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
1097 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1098 				 ring_type, ring_num);
1099 	}
1100 
1101 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1102 					       ring_type, ring_num,
1103 					       srng->num_entries);
1104 
1105 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
1106 	dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
1107 						    ring_type, ring_num,
1108 						    srng->num_entries);
1109 
1110 	if (srng->cached)
1111 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1112 
1113 	idle_check = dp_check_umac_reset_in_progress(soc);
1114 
1115 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
1116 					    mac_id, &ring_params, idle_check,
1117 					    idx);
1118 
1119 	if (!srng->hal_srng) {
1120 		dp_srng_free(soc, srng);
1121 		return QDF_STATUS_E_FAILURE;
1122 	}
1123 
1124 	return QDF_STATUS_SUCCESS;
1125 }
1126 
1127 qdf_export_symbol(dp_srng_init_idx);
1128 
1129 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
1130 				     struct dp_intr *int_ctx,
1131 				     int mac_for_pdev,
1132 				     int total_budget)
1133 {
1134 	uint32_t target_type;
1135 
1136 	target_type = hal_get_target_type(soc->hal_soc);
1137 	if (target_type == TARGET_TYPE_QCN9160)
1138 		return dp_monitor_process(soc, int_ctx,
1139 					  mac_for_pdev, total_budget);
1140 	else
1141 		return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
1142 					    total_budget);
1143 }
1144 
1145 /**
1146  * dp_process_lmac_rings() - Process LMAC rings
1147  * @int_ctx: interrupt context
1148  * @total_budget: budget of work which can be done
1149  *
1150  * Return: work done
1151  */
1152 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
1153 {
1154 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1155 	struct dp_soc *soc = int_ctx->soc;
1156 	uint32_t remaining_quota = total_budget;
1157 	struct dp_pdev *pdev = NULL;
1158 	uint32_t work_done  = 0;
1159 	int budget = total_budget;
1160 	int ring = 0;
1161 	bool rx_refill_lt_disable;
1162 
1163 	rx_refill_lt_disable =
1164 		wlan_cfg_get_dp_soc_rxdma_refill_lt_disable(soc->wlan_cfg_ctx);
1165 
1166 	/* Process LMAC interrupts */
1167 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1168 		int mac_for_pdev = ring;
1169 
1170 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1171 		if (!pdev)
1172 			continue;
1173 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1174 			work_done = dp_monitor_process(soc, int_ctx,
1175 						       mac_for_pdev,
1176 						       remaining_quota);
1177 			if (work_done)
1178 				intr_stats->num_rx_mon_ring_masks++;
1179 			budget -= work_done;
1180 			if (budget <= 0)
1181 				goto budget_done;
1182 			remaining_quota = budget;
1183 		}
1184 
1185 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
1186 			work_done = dp_tx_mon_process(soc, int_ctx,
1187 						      mac_for_pdev,
1188 						      remaining_quota);
1189 			if (work_done)
1190 				intr_stats->num_tx_mon_ring_masks++;
1191 			budget -= work_done;
1192 			if (budget <= 0)
1193 				goto budget_done;
1194 			remaining_quota = budget;
1195 		}
1196 
1197 		if (int_ctx->rxdma2host_ring_mask &
1198 				(1 << mac_for_pdev)) {
1199 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
1200 							      mac_for_pdev,
1201 							      remaining_quota);
1202 			if (work_done)
1203 				intr_stats->num_rxdma2host_ring_masks++;
1204 			budget -=  work_done;
1205 			if (budget <= 0)
1206 				goto budget_done;
1207 			remaining_quota = budget;
1208 		}
1209 
1210 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
1211 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1212 			union dp_rx_desc_list_elem_t *tail = NULL;
1213 			struct dp_srng *rx_refill_buf_ring;
1214 			struct rx_desc_pool *rx_desc_pool;
1215 
1216 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
1217 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1218 				rx_refill_buf_ring =
1219 					&soc->rx_refill_buf_ring[mac_for_pdev];
1220 			else
1221 				rx_refill_buf_ring =
1222 					&soc->rx_refill_buf_ring[pdev->lmac_id];
1223 
1224 			intr_stats->num_host2rxdma_ring_masks++;
1225 
1226 			if (!rx_refill_lt_disable)
1227 				dp_rx_buffers_lt_replenish_simple(soc,
1228 							  mac_for_pdev,
1229 							  rx_refill_buf_ring,
1230 							  rx_desc_pool,
1231 							  0,
1232 							  &desc_list,
1233 							  &tail);
1234 		}
1235 	}
1236 
1237 	if (int_ctx->host2rxdma_mon_ring_mask)
1238 		dp_rx_mon_buf_refill(int_ctx);
1239 
1240 	if (int_ctx->host2txmon_ring_mask)
1241 		dp_tx_mon_buf_refill(int_ctx);
1242 
1243 budget_done:
1244 	return total_budget - budget;
1245 }
1246 
1247 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1248 /**
1249  * dp_service_near_full_srngs() - Bottom half handler to process the near
1250  *				full IRQ on a SRNG
1251  * @dp_ctx: Datapath SoC handle
1252  * @dp_budget: Number of SRNGs which can be processed in a single attempt
1253  *		without rescheduling
1254  * @cpu: cpu id
1255  *
1256  * Return: remaining budget/quota for the soc device
1257  */
1258 static
1259 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1260 {
1261 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1262 	struct dp_soc *soc = int_ctx->soc;
1263 
1264 	/*
1265 	 * dp_service_near_full_srngs arch ops should be initialized always
1266 	 * if the NEAR FULL IRQ feature is enabled.
1267 	 */
1268 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
1269 							dp_budget);
1270 }
1271 #endif
1272 
1273 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1274 
1275 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1276 {
1277 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1278 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1279 	struct dp_soc *soc = int_ctx->soc;
1280 	int ring = 0;
1281 	int index;
1282 	uint32_t work_done  = 0;
1283 	int budget = dp_budget;
1284 	uint32_t remaining_quota = dp_budget;
1285 	uint8_t tx_mask = 0;
1286 	uint8_t rx_mask = 0;
1287 	uint8_t rx_err_mask = 0;
1288 	uint8_t rx_wbm_rel_mask = 0;
1289 	uint8_t reo_status_mask = 0;
1290 
1291 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
1292 
1293 	tx_mask = int_ctx->tx_ring_mask;
1294 	rx_mask = int_ctx->rx_ring_mask;
1295 	rx_err_mask = int_ctx->rx_err_ring_mask;
1296 	rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1297 	reo_status_mask = int_ctx->reo_status_ring_mask;
1298 
1299 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x",
1300 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1301 			 reo_status_mask,
1302 			 int_ctx->rx_mon_ring_mask,
1303 			 int_ctx->host2rxdma_ring_mask,
1304 			 int_ctx->rxdma2host_ring_mask);
1305 
1306 	/* Process Tx completion interrupts first to return back buffers */
1307 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
1308 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
1309 			continue;
1310 		work_done = dp_tx_comp_handler(int_ctx,
1311 					       soc,
1312 					       soc->tx_comp_ring[index].hal_srng,
1313 					       index, remaining_quota);
1314 		if (work_done) {
1315 			intr_stats->num_tx_ring_masks[index]++;
1316 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
1317 					 tx_mask, index, budget,
1318 					 work_done);
1319 		}
1320 		budget -= work_done;
1321 		if (budget <= 0)
1322 			goto budget_done;
1323 
1324 		remaining_quota = budget;
1325 	}
1326 
1327 	/* Process REO Exception ring interrupt */
1328 	if (rx_err_mask) {
1329 		work_done = dp_rx_err_process(int_ctx, soc,
1330 					      soc->reo_exception_ring.hal_srng,
1331 					      remaining_quota);
1332 
1333 		if (work_done) {
1334 			intr_stats->num_rx_err_ring_masks++;
1335 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1336 					 work_done, budget);
1337 		}
1338 
1339 		budget -=  work_done;
1340 		if (budget <= 0) {
1341 			goto budget_done;
1342 		}
1343 		remaining_quota = budget;
1344 	}
1345 
1346 	/* Process Rx WBM release ring interrupt */
1347 	if (rx_wbm_rel_mask) {
1348 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1349 						  soc->rx_rel_ring.hal_srng,
1350 						  remaining_quota);
1351 
1352 		if (work_done) {
1353 			intr_stats->num_rx_wbm_rel_ring_masks++;
1354 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1355 					 work_done, budget);
1356 		}
1357 
1358 		budget -=  work_done;
1359 		if (budget <= 0) {
1360 			goto budget_done;
1361 		}
1362 		remaining_quota = budget;
1363 	}
1364 
1365 	/* Process Rx interrupts */
1366 	if (rx_mask) {
1367 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1368 			if (!(rx_mask & (1 << ring)))
1369 				continue;
1370 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
1371 						  soc->reo_dest_ring[ring].hal_srng,
1372 						  ring,
1373 						  remaining_quota);
1374 			if (work_done) {
1375 				intr_stats->num_rx_ring_masks[ring]++;
1376 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1377 						 rx_mask, ring,
1378 						 work_done, budget);
1379 				budget -=  work_done;
1380 				if (budget <= 0)
1381 					goto budget_done;
1382 				remaining_quota = budget;
1383 			}
1384 		}
1385 	}
1386 
1387 	if (reo_status_mask) {
1388 		if (dp_reo_status_ring_handler(int_ctx, soc))
1389 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1390 	}
1391 
1392 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
1393 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1394 		if (work_done) {
1395 			budget -=  work_done;
1396 			if (budget <= 0)
1397 				goto budget_done;
1398 			remaining_quota = budget;
1399 		}
1400 	}
1401 
1402 	qdf_lro_flush(int_ctx->lro_ctx);
1403 	intr_stats->num_masks++;
1404 
1405 budget_done:
1406 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
1407 
1408 	if (soc->notify_fw_callback)
1409 		soc->notify_fw_callback(soc);
1410 
1411 	return dp_budget - budget;
1412 }
1413 
1414 #else /* QCA_HOST_MODE_WIFI_DISABLED */
1415 
1416 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1417 {
1418 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1419 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1420 	struct dp_soc *soc = int_ctx->soc;
1421 	uint32_t remaining_quota = dp_budget;
1422 	uint32_t work_done  = 0;
1423 	int budget = dp_budget;
1424 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1425 
1426 	if (reo_status_mask) {
1427 		if (dp_reo_status_ring_handler(int_ctx, soc))
1428 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1429 	}
1430 
1431 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
1432 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1433 		if (work_done) {
1434 			budget -=  work_done;
1435 			if (budget <= 0)
1436 				goto budget_done;
1437 			remaining_quota = budget;
1438 		}
1439 	}
1440 
1441 	qdf_lro_flush(int_ctx->lro_ctx);
1442 	intr_stats->num_masks++;
1443 
1444 budget_done:
1445 	return dp_budget - budget;
1446 }
1447 
1448 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1449 
1450 QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1451 {
1452 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1453 	int i;
1454 	int lmac_id = 0;
1455 
1456 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1457 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
1458 	soc->intr_mode = DP_INTR_POLL;
1459 
1460 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1461 		soc->intr_ctx[i].dp_intr_id = i;
1462 		soc->intr_ctx[i].tx_ring_mask =
1463 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1464 		soc->intr_ctx[i].rx_ring_mask =
1465 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1466 		soc->intr_ctx[i].rx_mon_ring_mask =
1467 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1468 		soc->intr_ctx[i].rx_err_ring_mask =
1469 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1470 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1471 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1472 		soc->intr_ctx[i].reo_status_ring_mask =
1473 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1474 		soc->intr_ctx[i].rxdma2host_ring_mask =
1475 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1476 		soc->intr_ctx[i].soc = soc;
1477 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1478 
1479 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
1480 			hif_event_history_init(soc->hif_handle, i);
1481 			soc->mon_intr_id_lmac_map[lmac_id] = i;
1482 			lmac_id++;
1483 		}
1484 	}
1485 
1486 	qdf_timer_init(soc->osdev, &soc->int_timer,
1487 		       dp_interrupt_timer, (void *)soc,
1488 		       QDF_TIMER_TYPE_WAKE_APPS);
1489 
1490 	return QDF_STATUS_SUCCESS;
1491 }
1492 
1493 void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1494 {
1495 	uint32_t msi_base_data, msi_vector_start;
1496 	int msi_vector_count, ret;
1497 
1498 	soc->intr_mode = DP_INTR_INTEGRATED;
1499 
1500 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1501 	    (dp_is_monitor_mode_using_poll(soc) &&
1502 	     soc->cdp_soc.ol_ops->get_con_mode &&
1503 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1504 		soc->intr_mode = DP_INTR_POLL;
1505 	} else {
1506 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1507 						  &msi_vector_count,
1508 						  &msi_base_data,
1509 						  &msi_vector_start);
1510 		if (ret)
1511 			return;
1512 
1513 		soc->intr_mode = DP_INTR_MSI;
1514 	}
1515 }
1516 
1517 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
1518 /**
1519  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy() -
1520  * Calculate interrupt map for legacy interrupts
1521  * @soc: DP soc handle
1522  * @intr_ctx_num: Interrupt context number
1523  * @irq_id_map: IRQ map
1524  * @num_irq_r: Number of interrupts assigned for this context
1525  *
1526  * Return: void
1527  */
1528 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
1529 							    int intr_ctx_num,
1530 							    int *irq_id_map,
1531 							    int *num_irq_r)
1532 {
1533 	int j;
1534 	int num_irq = 0;
1535 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1536 					soc->wlan_cfg_ctx, intr_ctx_num);
1537 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1538 					soc->wlan_cfg_ctx, intr_ctx_num);
1539 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1540 					soc->wlan_cfg_ctx, intr_ctx_num);
1541 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1542 					soc->wlan_cfg_ctx, intr_ctx_num);
1543 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1544 					soc->wlan_cfg_ctx, intr_ctx_num);
1545 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1546 					soc->wlan_cfg_ctx, intr_ctx_num);
1547 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1548 					soc->wlan_cfg_ctx, intr_ctx_num);
1549 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1550 					soc->wlan_cfg_ctx, intr_ctx_num);
1551 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1552 					soc->wlan_cfg_ctx, intr_ctx_num);
1553 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
1554 					soc->wlan_cfg_ctx, intr_ctx_num);
1555 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
1556 					soc->wlan_cfg_ctx, intr_ctx_num);
1557 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
1558 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1559 		if (tx_mask & (1 << j))
1560 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
1561 		if (rx_mask & (1 << j))
1562 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
1563 		if (rx_mon_mask & (1 << j))
1564 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
1565 		if (rx_err_ring_mask & (1 << j))
1566 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
1567 		if (rx_wbm_rel_ring_mask & (1 << j))
1568 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
1569 		if (reo_status_ring_mask & (1 << j))
1570 			irq_id_map[num_irq++] = (reo_status - j);
1571 		if (rxdma2host_ring_mask & (1 << j))
1572 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
1573 		if (host2rxdma_ring_mask & (1 << j))
1574 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
1575 		if (host2rxdma_mon_ring_mask & (1 << j))
1576 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
1577 		if (host2txmon_ring_mask & (1 << j))
1578 			irq_id_map[num_irq++] = sw2txmon_src_ring;
1579 		if (txmon2host_mon_ring_mask & (1 << j))
1580 			irq_id_map[num_irq++] = (txmon2sw_p0_dest0 - j);
1581 	}
1582 	*num_irq_r = num_irq;
1583 }
1584 #else
1585 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
1586 							    int intr_ctx_num,
1587 							    int *irq_id_map,
1588 							    int *num_irq_r)
1589 {
1590 }
1591 #endif
1592 
1593 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1594 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1595 {
1596 	int j;
1597 	int num_irq = 0;
1598 
1599 	int tx_mask =
1600 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1601 	int rx_mask =
1602 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1603 	int rx_mon_mask =
1604 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1605 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1606 					soc->wlan_cfg_ctx, intr_ctx_num);
1607 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1608 					soc->wlan_cfg_ctx, intr_ctx_num);
1609 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1610 					soc->wlan_cfg_ctx, intr_ctx_num);
1611 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1612 					soc->wlan_cfg_ctx, intr_ctx_num);
1613 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1614 					soc->wlan_cfg_ctx, intr_ctx_num);
1615 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1616 					soc->wlan_cfg_ctx, intr_ctx_num);
1617 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
1618 					soc->wlan_cfg_ctx, intr_ctx_num);
1619 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
1620 					soc->wlan_cfg_ctx, intr_ctx_num);
1621 
1622 	soc->intr_mode = DP_INTR_INTEGRATED;
1623 
1624 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1625 
1626 		if (tx_mask & (1 << j)) {
1627 			irq_id_map[num_irq++] =
1628 				(wbm2host_tx_completions_ring1 - j);
1629 		}
1630 
1631 		if (rx_mask & (1 << j)) {
1632 			irq_id_map[num_irq++] =
1633 				(reo2host_destination_ring1 - j);
1634 		}
1635 
1636 		if (rxdma2host_ring_mask & (1 << j)) {
1637 			irq_id_map[num_irq++] =
1638 				rxdma2host_destination_ring_mac1 - j;
1639 		}
1640 
1641 		if (host2rxdma_ring_mask & (1 << j)) {
1642 			irq_id_map[num_irq++] =
1643 				host2rxdma_host_buf_ring_mac1 -	j;
1644 		}
1645 
1646 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1647 			irq_id_map[num_irq++] =
1648 				host2rxdma_monitor_ring1 - j;
1649 		}
1650 
1651 		if (rx_mon_mask & (1 << j)) {
1652 			irq_id_map[num_irq++] =
1653 				ppdu_end_interrupts_mac1 - j;
1654 			irq_id_map[num_irq++] =
1655 				rxdma2host_monitor_status_ring_mac1 - j;
1656 			irq_id_map[num_irq++] =
1657 				rxdma2host_monitor_destination_mac1 - j;
1658 		}
1659 
1660 		if (rx_wbm_rel_ring_mask & (1 << j))
1661 			irq_id_map[num_irq++] = wbm2host_rx_release;
1662 
1663 		if (rx_err_ring_mask & (1 << j))
1664 			irq_id_map[num_irq++] = reo2host_exception;
1665 
1666 		if (reo_status_ring_mask & (1 << j))
1667 			irq_id_map[num_irq++] = reo2host_status;
1668 
1669 		if (host2txmon_ring_mask & (1 << j))
1670 			irq_id_map[num_irq++] = host2tx_monitor_ring1;
1671 
1672 		if (txmon2host_mon_ring_mask & (1 << j)) {
1673 			irq_id_map[num_irq++] =
1674 				(txmon2host_monitor_destination_mac1 - j);
1675 		}
1676 
1677 	}
1678 	*num_irq_r = num_irq;
1679 }
1680 
1681 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1682 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1683 		int msi_vector_count, int msi_vector_start)
1684 {
1685 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1686 					soc->wlan_cfg_ctx, intr_ctx_num);
1687 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1688 					soc->wlan_cfg_ctx, intr_ctx_num);
1689 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1690 					soc->wlan_cfg_ctx, intr_ctx_num);
1691 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
1692 					soc->wlan_cfg_ctx, intr_ctx_num);
1693 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1694 					soc->wlan_cfg_ctx, intr_ctx_num);
1695 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1696 					soc->wlan_cfg_ctx, intr_ctx_num);
1697 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1698 					soc->wlan_cfg_ctx, intr_ctx_num);
1699 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1700 					soc->wlan_cfg_ctx, intr_ctx_num);
1701 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1702 					soc->wlan_cfg_ctx, intr_ctx_num);
1703 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1704 					soc->wlan_cfg_ctx, intr_ctx_num);
1705 	int rx_near_full_grp_1_mask =
1706 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
1707 						     intr_ctx_num);
1708 	int rx_near_full_grp_2_mask =
1709 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
1710 						     intr_ctx_num);
1711 	int tx_ring_near_full_mask =
1712 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
1713 						    intr_ctx_num);
1714 
1715 	int host2txmon_ring_mask =
1716 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
1717 						  intr_ctx_num);
1718 	unsigned int vector =
1719 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1720 	int num_irq = 0;
1721 
1722 	soc->intr_mode = DP_INTR_MSI;
1723 
1724 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
1725 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
1726 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
1727 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
1728 	    tx_ring_near_full_mask | host2txmon_ring_mask)
1729 		irq_id_map[num_irq++] =
1730 			pld_get_msi_irq(soc->osdev->dev, vector);
1731 
1732 	*num_irq_r = num_irq;
1733 }
1734 
1735 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1736 				    int *irq_id_map, int *num_irq)
1737 {
1738 	int msi_vector_count, ret;
1739 	uint32_t msi_base_data, msi_vector_start;
1740 
1741 	if (pld_get_enable_intx(soc->osdev->dev)) {
1742 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
1743 				intr_ctx_num, irq_id_map, num_irq);
1744 	}
1745 
1746 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1747 					  &msi_vector_count,
1748 					  &msi_base_data,
1749 					  &msi_vector_start);
1750 	if (ret)
1751 		return dp_soc_interrupt_map_calculate_integrated(soc,
1752 				intr_ctx_num, irq_id_map, num_irq);
1753 
1754 	else
1755 		dp_soc_interrupt_map_calculate_msi(soc,
1756 				intr_ctx_num, irq_id_map, num_irq,
1757 				msi_vector_count, msi_vector_start);
1758 }
1759 
1760 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1761 /**
1762  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
1763  * @soc: DP soc handle
1764  * @num_irq: IRQ number
1765  * @irq_id_map: IRQ map
1766  * @intr_id: interrupt context ID
1767  *
1768  * Return: 0 for success. nonzero for failure.
1769  */
1770 static inline int
1771 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
1772 				  int irq_id_map[], int intr_id)
1773 {
1774 	return hif_register_ext_group(soc->hif_handle,
1775 				      num_irq, irq_id_map,
1776 				      dp_service_near_full_srngs,
1777 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
1778 				      HIF_EXEC_NAPI_TYPE,
1779 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1780 }
1781 #else
1782 static inline int
1783 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
1784 				  int *irq_id_map, int intr_id)
1785 {
1786 	return 0;
1787 }
1788 #endif
1789 
1790 #ifdef DP_CON_MON_MSI_SKIP_SET
1791 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
1792 {
1793 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
1794 		 QDF_GLOBAL_MONITOR_MODE &&
1795 		 !wlan_cfg_get_local_pkt_capture(soc->wlan_cfg_ctx));
1796 }
1797 #else
1798 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
1799 {
1800 	return false;
1801 }
1802 #endif
1803 
1804 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
1805 {
1806 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1807 	int i;
1808 
1809 	if (soc->intr_mode == DP_INTR_POLL) {
1810 		qdf_timer_free(&soc->int_timer);
1811 	} else {
1812 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
1813 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1814 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
1815 	}
1816 
1817 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1818 		soc->intr_ctx[i].tx_ring_mask = 0;
1819 		soc->intr_ctx[i].rx_ring_mask = 0;
1820 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1821 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1822 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1823 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1824 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1825 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1826 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1827 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
1828 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
1829 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
1830 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
1831 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
1832 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
1833 
1834 		hif_event_history_deinit(soc->hif_handle, i);
1835 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1836 	}
1837 
1838 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1839 		    sizeof(soc->mon_intr_id_lmac_map),
1840 		    DP_MON_INVALID_LMAC_ID);
1841 }
1842 
1843 QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
1844 {
1845 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1846 
1847 	int i = 0;
1848 	int num_irq = 0;
1849 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
1850 	int lmac_id = 0;
1851 	int napi_scale;
1852 
1853 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1854 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
1855 
1856 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1857 		int ret = 0;
1858 
1859 		/* Map of IRQ ids registered with one interrupt context */
1860 		int irq_id_map[HIF_MAX_GRP_IRQ];
1861 
1862 		int tx_mask =
1863 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1864 		int rx_mask =
1865 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1866 		int rx_mon_mask =
1867 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1868 		int tx_mon_ring_mask =
1869 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1870 		int rx_err_ring_mask =
1871 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1872 		int rx_wbm_rel_ring_mask =
1873 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1874 		int reo_status_ring_mask =
1875 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1876 		int rxdma2host_ring_mask =
1877 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1878 		int host2rxdma_ring_mask =
1879 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1880 		int host2rxdma_mon_ring_mask =
1881 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1882 				soc->wlan_cfg_ctx, i);
1883 		int rx_near_full_grp_1_mask =
1884 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
1885 							     i);
1886 		int rx_near_full_grp_2_mask =
1887 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
1888 							     i);
1889 		int tx_ring_near_full_mask =
1890 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
1891 							    i);
1892 		int host2txmon_ring_mask =
1893 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
1894 		int umac_reset_intr_mask =
1895 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
1896 
1897 		if (dp_skip_rx_mon_ring_mask_set(soc))
1898 			rx_mon_mask = 0;
1899 
1900 		soc->intr_ctx[i].dp_intr_id = i;
1901 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1902 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1903 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1904 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1905 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1906 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1907 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1908 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1909 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1910 			 host2rxdma_mon_ring_mask;
1911 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
1912 						rx_near_full_grp_1_mask;
1913 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
1914 						rx_near_full_grp_2_mask;
1915 		soc->intr_ctx[i].tx_ring_near_full_mask =
1916 						tx_ring_near_full_mask;
1917 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
1918 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
1919 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
1920 
1921 		soc->intr_ctx[i].soc = soc;
1922 
1923 		num_irq = 0;
1924 
1925 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1926 					       &num_irq);
1927 
1928 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
1929 		    tx_ring_near_full_mask) {
1930 			dp_soc_near_full_interrupt_attach(soc, num_irq,
1931 							  irq_id_map, i);
1932 		} else {
1933 			napi_scale = wlan_cfg_get_napi_scale_factor(
1934 							    soc->wlan_cfg_ctx);
1935 			if (!napi_scale)
1936 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
1937 
1938 			ret = hif_register_ext_group(soc->hif_handle,
1939 				num_irq, irq_id_map, dp_service_srngs,
1940 				&soc->intr_ctx[i], "dp_intr",
1941 				HIF_EXEC_NAPI_TYPE, napi_scale);
1942 		}
1943 
1944 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
1945 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
1946 
1947 		if (ret) {
1948 			dp_init_err("%pK: failed, ret = %d", soc, ret);
1949 			dp_soc_interrupt_detach(txrx_soc);
1950 			return QDF_STATUS_E_FAILURE;
1951 		}
1952 
1953 		hif_event_history_init(soc->hif_handle, i);
1954 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1955 
1956 		if (rx_err_ring_mask)
1957 			rx_err_ring_intr_ctxt_id = i;
1958 
1959 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
1960 			soc->mon_intr_id_lmac_map[lmac_id] = i;
1961 			lmac_id++;
1962 		}
1963 	}
1964 
1965 	hif_configure_ext_group_interrupts(soc->hif_handle);
1966 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
1967 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
1968 						  rx_err_ring_intr_ctxt_id, 0);
1969 
1970 	return QDF_STATUS_SUCCESS;
1971 }
1972 
1973 #define AVG_MAX_MPDUS_PER_TID 128
1974 #define AVG_TIDS_PER_CLIENT 2
1975 #define AVG_FLOWS_PER_TID 2
1976 #define AVG_MSDUS_PER_FLOW 128
1977 #define AVG_MSDUS_PER_MPDU 4
1978 
1979 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
1980 {
1981 	struct qdf_mem_multi_page_t *pages;
1982 
1983 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1984 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1985 	} else {
1986 		pages = &soc->link_desc_pages;
1987 	}
1988 
1989 	if (!pages) {
1990 		dp_err("can not get link desc pages");
1991 		QDF_ASSERT(0);
1992 		return;
1993 	}
1994 
1995 	if (pages->dma_pages) {
1996 		wlan_minidump_remove((void *)
1997 				     pages->dma_pages->page_v_addr_start,
1998 				     pages->num_pages * pages->page_size,
1999 				     soc->ctrl_psoc,
2000 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2001 				     "hw_link_desc_bank");
2002 		dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE,
2003 					     pages, 0, false);
2004 	}
2005 }
2006 
2007 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
2008 
2009 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2010 {
2011 	hal_soc_handle_t hal_soc = soc->hal_soc;
2012 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2013 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2014 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2015 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2016 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2017 	uint32_t num_mpdu_links_per_queue_desc =
2018 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2019 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2020 	uint32_t *total_link_descs, total_mem_size;
2021 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2022 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2023 	uint32_t num_entries;
2024 	struct qdf_mem_multi_page_t *pages;
2025 	struct dp_srng *dp_srng;
2026 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2027 
2028 	/* Only Tx queue descriptors are allocated from common link descriptor
2029 	 * pool Rx queue descriptors are not included in this because (REO queue
2030 	 * extension descriptors) they are expected to be allocated contiguously
2031 	 * with REO queue descriptors
2032 	 */
2033 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2034 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
2035 		/* dp_monitor_get_link_desc_pages returns NULL only
2036 		 * if monitor SOC is  NULL
2037 		 */
2038 		if (!pages) {
2039 			dp_err("can not get link desc pages");
2040 			QDF_ASSERT(0);
2041 			return QDF_STATUS_E_FAULT;
2042 		}
2043 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2044 		num_entries = dp_srng->alloc_size /
2045 			hal_srng_get_entrysize(soc->hal_soc,
2046 					       RXDMA_MONITOR_DESC);
2047 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
2048 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2049 			      MINIDUMP_STR_SIZE);
2050 	} else {
2051 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2052 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2053 
2054 		num_mpdu_queue_descs = num_mpdu_link_descs /
2055 			num_mpdu_links_per_queue_desc;
2056 
2057 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2058 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2059 			num_msdus_per_link_desc;
2060 
2061 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2062 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2063 
2064 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2065 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2066 
2067 		pages = &soc->link_desc_pages;
2068 		total_link_descs = &soc->total_link_descs;
2069 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2070 			      MINIDUMP_STR_SIZE);
2071 	}
2072 
2073 	/* If link descriptor banks are allocated, return from here */
2074 	if (pages->num_pages)
2075 		return QDF_STATUS_SUCCESS;
2076 
2077 	/* Round up to power of 2 */
2078 	*total_link_descs = 1;
2079 	while (*total_link_descs < num_entries)
2080 		*total_link_descs <<= 1;
2081 
2082 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
2083 		     soc, *total_link_descs, link_desc_size);
2084 	total_mem_size =  *total_link_descs * link_desc_size;
2085 	total_mem_size += link_desc_align;
2086 
2087 	dp_init_info("%pK: total_mem_size: %d",
2088 		     soc, total_mem_size);
2089 
2090 	dp_set_max_page_size(pages, max_alloc_size);
2091 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE,
2092 				      pages,
2093 				      link_desc_size,
2094 				      *total_link_descs,
2095 				      0, false);
2096 	if (!pages->num_pages) {
2097 		dp_err("Multi page alloc fail for hw link desc pool");
2098 		return QDF_STATUS_E_FAULT;
2099 	}
2100 
2101 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
2102 			  pages->num_pages * pages->page_size,
2103 			  soc->ctrl_psoc,
2104 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2105 			  "hw_link_desc_bank");
2106 
2107 	return QDF_STATUS_SUCCESS;
2108 }
2109 
2110 void dp_hw_link_desc_ring_free(struct dp_soc *soc)
2111 {
2112 	uint32_t i;
2113 	uint32_t size = soc->wbm_idle_scatter_buf_size;
2114 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
2115 	qdf_dma_addr_t paddr;
2116 
2117 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
2118 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2119 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2120 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2121 			if (vaddr) {
2122 				qdf_mem_free_consistent(soc->osdev,
2123 							soc->osdev->dev,
2124 							size,
2125 							vaddr,
2126 							paddr,
2127 							0);
2128 				vaddr = NULL;
2129 			}
2130 		}
2131 	} else {
2132 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2133 				     soc->wbm_idle_link_ring.alloc_size,
2134 				     soc->ctrl_psoc,
2135 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2136 				     "wbm_idle_link_ring");
2137 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
2138 	}
2139 }
2140 
2141 QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
2142 {
2143 	uint32_t entry_size, i;
2144 	uint32_t total_mem_size;
2145 	qdf_dma_addr_t *baseaddr = NULL;
2146 	struct dp_srng *dp_srng;
2147 	uint32_t ring_type;
2148 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2149 	uint32_t tlds;
2150 
2151 	ring_type = WBM_IDLE_LINK;
2152 	dp_srng = &soc->wbm_idle_link_ring;
2153 	tlds = soc->total_link_descs;
2154 
2155 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
2156 	total_mem_size = entry_size * tlds;
2157 
2158 	if (total_mem_size <= max_alloc_size) {
2159 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
2160 			dp_init_err("%pK: Link desc idle ring setup failed",
2161 				    soc);
2162 			goto fail;
2163 		}
2164 
2165 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2166 				  soc->wbm_idle_link_ring.alloc_size,
2167 				  soc->ctrl_psoc,
2168 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2169 				  "wbm_idle_link_ring");
2170 	} else {
2171 		uint32_t num_scatter_bufs;
2172 		uint32_t buf_size = 0;
2173 
2174 		soc->wbm_idle_scatter_buf_size =
2175 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2176 		hal_idle_scatter_buf_num_entries(
2177 					soc->hal_soc,
2178 					soc->wbm_idle_scatter_buf_size);
2179 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2180 					soc->hal_soc, total_mem_size,
2181 					soc->wbm_idle_scatter_buf_size);
2182 
2183 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2184 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2185 				  FL("scatter bufs size out of bounds"));
2186 			goto fail;
2187 		}
2188 
2189 		for (i = 0; i < num_scatter_bufs; i++) {
2190 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2191 			buf_size = soc->wbm_idle_scatter_buf_size;
2192 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
2193 				qdf_mem_alloc_consistent(soc->osdev,
2194 							 soc->osdev->dev,
2195 							 buf_size,
2196 							 baseaddr);
2197 
2198 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2199 				QDF_TRACE(QDF_MODULE_ID_DP,
2200 					  QDF_TRACE_LEVEL_ERROR,
2201 					  FL("Scatter lst memory alloc fail"));
2202 				goto fail;
2203 			}
2204 		}
2205 		soc->num_scatter_bufs = num_scatter_bufs;
2206 	}
2207 	return QDF_STATUS_SUCCESS;
2208 
2209 fail:
2210 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2211 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2212 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2213 
2214 		if (vaddr) {
2215 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2216 						soc->wbm_idle_scatter_buf_size,
2217 						vaddr,
2218 						paddr, 0);
2219 			vaddr = NULL;
2220 		}
2221 	}
2222 	return QDF_STATUS_E_NOMEM;
2223 }
2224 
2225 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
2226 
2227 QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
2228 {
2229 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
2230 
2231 	if (dp_srng->base_vaddr_unaligned) {
2232 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
2233 			return QDF_STATUS_E_FAILURE;
2234 	}
2235 	return QDF_STATUS_SUCCESS;
2236 }
2237 
2238 void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
2239 {
2240 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
2241 }
2242 
2243 #ifdef IPA_OFFLOAD
2244 #define USE_1_IPA_RX_REO_RING 1
2245 #define USE_2_IPA_RX_REO_RINGS 2
2246 #define REO_DST_RING_SIZE_QCA6290 1023
2247 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2248 #define REO_DST_RING_SIZE_QCA8074 1023
2249 #define REO_DST_RING_SIZE_QCN9000 2048
2250 #else
2251 #define REO_DST_RING_SIZE_QCA8074 8
2252 #define REO_DST_RING_SIZE_QCN9000 8
2253 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2254 
2255 #ifdef IPA_WDI3_TX_TWO_PIPES
2256 #ifdef DP_MEMORY_OPT
2257 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2258 {
2259 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2260 }
2261 
2262 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2263 {
2264 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2265 }
2266 
2267 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2268 {
2269 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2270 }
2271 
2272 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2273 {
2274 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2275 }
2276 
2277 #else /* !DP_MEMORY_OPT */
2278 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2279 {
2280 	return 0;
2281 }
2282 
2283 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2284 {
2285 }
2286 
2287 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2288 {
2289 	return 0;
2290 }
2291 
2292 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2293 {
2294 }
2295 #endif /* DP_MEMORY_OPT */
2296 
2297 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2298 {
2299 	hal_tx_init_data_ring(soc->hal_soc,
2300 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
2301 }
2302 
2303 #else /* !IPA_WDI3_TX_TWO_PIPES */
2304 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2305 {
2306 	return 0;
2307 }
2308 
2309 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2310 {
2311 }
2312 
2313 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2314 {
2315 	return 0;
2316 }
2317 
2318 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2319 {
2320 }
2321 
2322 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2323 {
2324 }
2325 
2326 #endif /* IPA_WDI3_TX_TWO_PIPES */
2327 
2328 #else
2329 
2330 #define REO_DST_RING_SIZE_QCA6290 1024
2331 
2332 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2333 {
2334 	return 0;
2335 }
2336 
2337 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2338 {
2339 }
2340 
2341 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2342 {
2343 	return 0;
2344 }
2345 
2346 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2347 {
2348 }
2349 
2350 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2351 {
2352 }
2353 
2354 #endif /* IPA_OFFLOAD */
2355 
2356 /**
2357  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
2358  * @soc: Datapath soc handler
2359  *
2360  * This api resets the default cpu ring map
2361  */
2362 void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2363 {
2364 	uint8_t i;
2365 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2366 
2367 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2368 		switch (nss_config) {
2369 		case dp_nss_cfg_first_radio:
2370 			/*
2371 			 * Setting Tx ring map for one nss offloaded radio
2372 			 */
2373 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2374 			break;
2375 
2376 		case dp_nss_cfg_second_radio:
2377 			/*
2378 			 * Setting Tx ring for two nss offloaded radios
2379 			 */
2380 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2381 			break;
2382 
2383 		case dp_nss_cfg_dbdc:
2384 			/*
2385 			 * Setting Tx ring map for 2 nss offloaded radios
2386 			 */
2387 			soc->tx_ring_map[i] =
2388 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2389 			break;
2390 
2391 		case dp_nss_cfg_dbtc:
2392 			/*
2393 			 * Setting Tx ring map for 3 nss offloaded radios
2394 			 */
2395 			soc->tx_ring_map[i] =
2396 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2397 			break;
2398 
2399 		default:
2400 			dp_err("tx_ring_map failed due to invalid nss cfg");
2401 			break;
2402 		}
2403 	}
2404 }
2405 
2406 /**
2407  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
2408  *					  unused WMAC hw rings
2409  * @soc: DP Soc handle
2410  * @mac_num: wmac num
2411  *
2412  * Return: Return void
2413  */
2414 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
2415 						int mac_num)
2416 {
2417 	uint8_t *grp_mask = NULL;
2418 	int group_number;
2419 
2420 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2421 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2422 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2423 					  group_number, 0x0);
2424 
2425 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2426 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2427 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2428 				      group_number, 0x0);
2429 
2430 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2431 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2432 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2433 					  group_number, 0x0);
2434 
2435 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2436 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2437 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2438 					      group_number, 0x0);
2439 }
2440 
2441 #ifdef IPA_OFFLOAD
2442 #ifdef IPA_WDI3_VLAN_SUPPORT
2443 /**
2444  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
2445  *                                     ring for vlan tagged traffic
2446  * @soc: DP Soc handle
2447  *
2448  * Return: Return void
2449  */
2450 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2451 {
2452 	uint8_t *grp_mask = NULL;
2453 	int group_number, mask;
2454 
2455 	if (!wlan_ipa_is_vlan_enabled())
2456 		return;
2457 
2458 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2459 
2460 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
2461 	if (group_number < 0) {
2462 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2463 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
2464 		return;
2465 	}
2466 
2467 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2468 
2469 	/* reset the interrupt mask for offloaded ring */
2470 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
2471 
2472 	/*
2473 	 * set the interrupt mask to zero for rx offloaded radio.
2474 	 */
2475 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2476 }
2477 #else
2478 inline
2479 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2480 { }
2481 #endif /* IPA_WDI3_VLAN_SUPPORT */
2482 #else
2483 inline
2484 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2485 { }
2486 #endif /* IPA_OFFLOAD */
2487 
2488 /**
2489  * dp_soc_reset_intr_mask() - reset interrupt mask
2490  * @soc: DP Soc handle
2491  *
2492  * Return: Return void
2493  */
2494 void dp_soc_reset_intr_mask(struct dp_soc *soc)
2495 {
2496 	uint8_t j;
2497 	uint8_t *grp_mask = NULL;
2498 	int group_number, mask, num_ring;
2499 
2500 	/* number of tx ring */
2501 	num_ring = soc->num_tcl_data_rings;
2502 
2503 	/*
2504 	 * group mask for tx completion  ring.
2505 	 */
2506 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2507 
2508 	/* loop and reset the mask for only offloaded ring */
2509 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
2510 		/*
2511 		 * Group number corresponding to tx offloaded ring.
2512 		 */
2513 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2514 		if (group_number < 0) {
2515 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2516 				      soc, WBM2SW_RELEASE, j);
2517 			continue;
2518 		}
2519 
2520 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2521 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
2522 		    (!mask)) {
2523 			continue;
2524 		}
2525 
2526 		/* reset the tx mask for offloaded ring */
2527 		mask &= (~(1 << j));
2528 
2529 		/*
2530 		 * reset the interrupt mask for offloaded ring.
2531 		 */
2532 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2533 	}
2534 
2535 	/* number of rx rings */
2536 	num_ring = soc->num_reo_dest_rings;
2537 
2538 	/*
2539 	 * group mask for reo destination ring.
2540 	 */
2541 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2542 
2543 	/* loop and reset the mask for only offloaded ring */
2544 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
2545 		/*
2546 		 * Group number corresponding to rx offloaded ring.
2547 		 */
2548 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2549 		if (group_number < 0) {
2550 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2551 				      soc, REO_DST, j);
2552 			continue;
2553 		}
2554 
2555 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2556 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
2557 		    (!mask)) {
2558 			continue;
2559 		}
2560 
2561 		/* reset the interrupt mask for offloaded ring */
2562 		mask &= (~(1 << j));
2563 
2564 		/*
2565 		 * set the interrupt mask to zero for rx offloaded radio.
2566 		 */
2567 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2568 	}
2569 
2570 	/*
2571 	 * group mask for Rx buffer refill ring
2572 	 */
2573 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2574 
2575 	/* loop and reset the mask for only offloaded ring */
2576 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2577 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
2578 
2579 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2580 			continue;
2581 		}
2582 
2583 		/*
2584 		 * Group number corresponding to rx offloaded ring.
2585 		 */
2586 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
2587 		if (group_number < 0) {
2588 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2589 				      soc, REO_DST, lmac_id);
2590 			continue;
2591 		}
2592 
2593 		/* set the interrupt mask for offloaded ring */
2594 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2595 							  group_number);
2596 		mask &= (~(1 << lmac_id));
2597 
2598 		/*
2599 		 * set the interrupt mask to zero for rx offloaded radio.
2600 		 */
2601 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2602 						  group_number, mask);
2603 	}
2604 
2605 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
2606 
2607 	for (j = 0; j < num_ring; j++) {
2608 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
2609 			continue;
2610 		}
2611 
2612 		/*
2613 		 * Group number corresponding to rx err ring.
2614 		 */
2615 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2616 		if (group_number < 0) {
2617 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2618 				      soc, REO_EXCEPTION, j);
2619 			continue;
2620 		}
2621 
2622 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
2623 					      group_number, 0);
2624 	}
2625 }
2626 
2627 #ifdef IPA_OFFLOAD
2628 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
2629 			 uint32_t *remap1, uint32_t *remap2)
2630 {
2631 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
2632 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
2633 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
2634 
2635 	switch (soc->arch_id) {
2636 	case CDP_ARCH_TYPE_BE:
2637 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2638 					      soc->num_reo_dest_rings -
2639 					      USE_2_IPA_RX_REO_RINGS, remap1,
2640 					      remap2);
2641 		break;
2642 
2643 	case CDP_ARCH_TYPE_LI:
2644 		if (wlan_ipa_is_vlan_enabled()) {
2645 			hal_compute_reo_remap_ix2_ix3(
2646 					soc->hal_soc, ring,
2647 					soc->num_reo_dest_rings -
2648 					USE_2_IPA_RX_REO_RINGS, remap1,
2649 					remap2);
2650 
2651 		} else {
2652 			hal_compute_reo_remap_ix2_ix3(
2653 					soc->hal_soc, ring,
2654 					soc->num_reo_dest_rings -
2655 					USE_1_IPA_RX_REO_RING, remap1,
2656 					remap2);
2657 		}
2658 
2659 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
2660 		break;
2661 	default:
2662 		dp_err("unknown arch_id 0x%x", soc->arch_id);
2663 		QDF_BUG(0);
2664 	}
2665 
2666 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2667 
2668 	return true;
2669 }
2670 
2671 #ifdef IPA_WDI3_TX_TWO_PIPES
2672 static bool dp_ipa_is_alt_tx_ring(int index)
2673 {
2674 	return index == IPA_TX_ALT_RING_IDX;
2675 }
2676 
2677 static bool dp_ipa_is_alt_tx_comp_ring(int index)
2678 {
2679 	return index == IPA_TX_ALT_COMP_RING_IDX;
2680 }
2681 #else /* !IPA_WDI3_TX_TWO_PIPES */
2682 static bool dp_ipa_is_alt_tx_ring(int index)
2683 {
2684 	return false;
2685 }
2686 
2687 static bool dp_ipa_is_alt_tx_comp_ring(int index)
2688 {
2689 	return false;
2690 }
2691 #endif /* IPA_WDI3_TX_TWO_PIPES */
2692 
2693 /**
2694  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
2695  *
2696  * @tx_ring_num: Tx ring number
2697  * @tx_ipa_ring_sz: Return param only updated for IPA.
2698  * @soc_cfg_ctx: dp soc cfg context
2699  *
2700  * Return: None
2701  */
2702 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
2703 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2704 {
2705 	if (!soc_cfg_ctx->ipa_enabled)
2706 		return;
2707 
2708 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
2709 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
2710 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
2711 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
2712 }
2713 
2714 /**
2715  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
2716  *
2717  * @tx_comp_ring_num: Tx comp ring number
2718  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
2719  * @soc_cfg_ctx: dp soc cfg context
2720  *
2721  * Return: None
2722  */
2723 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
2724 					 int *tx_comp_ipa_ring_sz,
2725 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2726 {
2727 	if (!soc_cfg_ctx->ipa_enabled)
2728 		return;
2729 
2730 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
2731 		*tx_comp_ipa_ring_sz =
2732 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
2733 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
2734 		*tx_comp_ipa_ring_sz =
2735 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
2736 }
2737 #else
2738 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
2739 {
2740 	uint8_t num = 0;
2741 
2742 	switch (value) {
2743 	/* should we have all the different possible ring configs */
2744 	case 0xFF:
2745 		num = 8;
2746 		ring[0] = REO_REMAP_SW1;
2747 		ring[1] = REO_REMAP_SW2;
2748 		ring[2] = REO_REMAP_SW3;
2749 		ring[3] = REO_REMAP_SW4;
2750 		ring[4] = REO_REMAP_SW5;
2751 		ring[5] = REO_REMAP_SW6;
2752 		ring[6] = REO_REMAP_SW7;
2753 		ring[7] = REO_REMAP_SW8;
2754 		break;
2755 
2756 	case 0x3F:
2757 		num = 6;
2758 		ring[0] = REO_REMAP_SW1;
2759 		ring[1] = REO_REMAP_SW2;
2760 		ring[2] = REO_REMAP_SW3;
2761 		ring[3] = REO_REMAP_SW4;
2762 		ring[4] = REO_REMAP_SW5;
2763 		ring[5] = REO_REMAP_SW6;
2764 		break;
2765 
2766 	case 0xF:
2767 		num = 4;
2768 		ring[0] = REO_REMAP_SW1;
2769 		ring[1] = REO_REMAP_SW2;
2770 		ring[2] = REO_REMAP_SW3;
2771 		ring[3] = REO_REMAP_SW4;
2772 		break;
2773 	case 0xE:
2774 		num = 3;
2775 		ring[0] = REO_REMAP_SW2;
2776 		ring[1] = REO_REMAP_SW3;
2777 		ring[2] = REO_REMAP_SW4;
2778 		break;
2779 	case 0xD:
2780 		num = 3;
2781 		ring[0] = REO_REMAP_SW1;
2782 		ring[1] = REO_REMAP_SW3;
2783 		ring[2] = REO_REMAP_SW4;
2784 		break;
2785 	case 0xC:
2786 		num = 2;
2787 		ring[0] = REO_REMAP_SW3;
2788 		ring[1] = REO_REMAP_SW4;
2789 		break;
2790 	case 0xB:
2791 		num = 3;
2792 		ring[0] = REO_REMAP_SW1;
2793 		ring[1] = REO_REMAP_SW2;
2794 		ring[2] = REO_REMAP_SW4;
2795 		break;
2796 	case 0xA:
2797 		num = 2;
2798 		ring[0] = REO_REMAP_SW2;
2799 		ring[1] = REO_REMAP_SW4;
2800 		break;
2801 	case 0x9:
2802 		num = 2;
2803 		ring[0] = REO_REMAP_SW1;
2804 		ring[1] = REO_REMAP_SW4;
2805 		break;
2806 	case 0x8:
2807 		num = 1;
2808 		ring[0] = REO_REMAP_SW4;
2809 		break;
2810 	case 0x7:
2811 		num = 3;
2812 		ring[0] = REO_REMAP_SW1;
2813 		ring[1] = REO_REMAP_SW2;
2814 		ring[2] = REO_REMAP_SW3;
2815 		break;
2816 	case 0x6:
2817 		num = 2;
2818 		ring[0] = REO_REMAP_SW2;
2819 		ring[1] = REO_REMAP_SW3;
2820 		break;
2821 	case 0x5:
2822 		num = 2;
2823 		ring[0] = REO_REMAP_SW1;
2824 		ring[1] = REO_REMAP_SW3;
2825 		break;
2826 	case 0x4:
2827 		num = 1;
2828 		ring[0] = REO_REMAP_SW3;
2829 		break;
2830 	case 0x3:
2831 		num = 2;
2832 		ring[0] = REO_REMAP_SW1;
2833 		ring[1] = REO_REMAP_SW2;
2834 		break;
2835 	case 0x2:
2836 		num = 1;
2837 		ring[0] = REO_REMAP_SW2;
2838 		break;
2839 	case 0x1:
2840 		num = 1;
2841 		ring[0] = REO_REMAP_SW1;
2842 		break;
2843 	default:
2844 		dp_err("unknown reo ring map 0x%x", value);
2845 		QDF_BUG(0);
2846 	}
2847 	return num;
2848 }
2849 
2850 bool dp_reo_remap_config(struct dp_soc *soc,
2851 			 uint32_t *remap0,
2852 			 uint32_t *remap1,
2853 			 uint32_t *remap2)
2854 {
2855 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2856 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
2857 	uint8_t num;
2858 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
2859 	uint32_t value;
2860 
2861 	switch (offload_radio) {
2862 	case dp_nss_cfg_default:
2863 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
2864 		num = dp_reo_ring_selection(value, ring);
2865 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2866 					      num, remap1, remap2);
2867 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
2868 
2869 		break;
2870 	case dp_nss_cfg_first_radio:
2871 		value = reo_config & 0xE;
2872 		num = dp_reo_ring_selection(value, ring);
2873 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2874 					      num, remap1, remap2);
2875 
2876 		break;
2877 	case dp_nss_cfg_second_radio:
2878 		value = reo_config & 0xD;
2879 		num = dp_reo_ring_selection(value, ring);
2880 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2881 					      num, remap1, remap2);
2882 
2883 		break;
2884 	case dp_nss_cfg_dbdc:
2885 	case dp_nss_cfg_dbtc:
2886 		/* return false if both or all are offloaded to NSS */
2887 		return false;
2888 	}
2889 
2890 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2891 		 *remap1, *remap2, offload_radio);
2892 	return true;
2893 }
2894 
2895 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
2896 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2897 {
2898 }
2899 
2900 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
2901 					 int *tx_comp_ipa_ring_sz,
2902 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2903 {
2904 }
2905 #endif /* IPA_OFFLOAD */
2906 
2907 /**
2908  * dp_reo_frag_dst_set() - configure reo register to set the
2909  *                        fragment destination ring
2910  * @soc: Datapath soc
2911  * @frag_dst_ring: output parameter to set fragment destination ring
2912  *
2913  * Based on offload_radio below fragment destination rings is selected
2914  * 0 - TCL
2915  * 1 - SW1
2916  * 2 - SW2
2917  * 3 - SW3
2918  * 4 - SW4
2919  * 5 - Release
2920  * 6 - FW
2921  * 7 - alternate select
2922  *
2923  * Return: void
2924  */
2925 void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2926 {
2927 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2928 
2929 	switch (offload_radio) {
2930 	case dp_nss_cfg_default:
2931 		*frag_dst_ring = REO_REMAP_TCL;
2932 		break;
2933 	case dp_nss_cfg_first_radio:
2934 		/*
2935 		 * This configuration is valid for single band radio which
2936 		 * is also NSS offload.
2937 		 */
2938 	case dp_nss_cfg_dbdc:
2939 	case dp_nss_cfg_dbtc:
2940 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2941 		break;
2942 	default:
2943 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
2944 		break;
2945 	}
2946 }
2947 
2948 #ifdef WLAN_FEATURE_STATS_EXT
2949 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2950 {
2951 	qdf_event_create(&soc->rx_hw_stats_event);
2952 }
2953 #else
2954 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2955 {
2956 }
2957 #endif
2958 
2959 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
2960 {
2961 	int tcl_ring_num, wbm_ring_num;
2962 
2963 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
2964 						index,
2965 						&tcl_ring_num,
2966 						&wbm_ring_num);
2967 
2968 	if (tcl_ring_num == -1) {
2969 		dp_err("incorrect tcl ring num for index %u", index);
2970 		return;
2971 	}
2972 
2973 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
2974 			     soc->tcl_data_ring[index].alloc_size,
2975 			     soc->ctrl_psoc,
2976 			     WLAN_MD_DP_SRNG_TCL_DATA,
2977 			     "tcl_data_ring");
2978 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
2979 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
2980 		       tcl_ring_num);
2981 
2982 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
2983 		return;
2984 
2985 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
2986 			     soc->tx_comp_ring[index].alloc_size,
2987 			     soc->ctrl_psoc,
2988 			     WLAN_MD_DP_SRNG_TX_COMP,
2989 			     "tcl_comp_ring");
2990 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2991 		       wbm_ring_num);
2992 }
2993 
2994 /**
2995  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
2996  * ring pair
2997  * @soc: DP soc pointer
2998  * @index: index of soc->tcl_data or soc->tx_comp to initialize
2999  *
3000  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
3001  */
3002 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
3003 						uint8_t index)
3004 {
3005 	int tcl_ring_num, wbm_ring_num;
3006 	uint8_t bm_id;
3007 
3008 	if (index >= MAX_TCL_DATA_RINGS) {
3009 		dp_err("unexpected index!");
3010 		QDF_BUG(0);
3011 		goto fail1;
3012 	}
3013 
3014 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
3015 						index,
3016 						&tcl_ring_num,
3017 						&wbm_ring_num);
3018 
3019 	if (tcl_ring_num == -1) {
3020 		dp_err("incorrect tcl ring num for index %u", index);
3021 		goto fail1;
3022 	}
3023 
3024 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
3025 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
3026 			 tcl_ring_num, 0)) {
3027 		dp_err("dp_srng_init failed for tcl_data_ring");
3028 		goto fail1;
3029 	}
3030 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
3031 			  soc->tcl_data_ring[index].alloc_size,
3032 			  soc->ctrl_psoc,
3033 			  WLAN_MD_DP_SRNG_TCL_DATA,
3034 			  "tcl_data_ring");
3035 
3036 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
3037 		goto set_rbm;
3038 
3039 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3040 			 wbm_ring_num, 0)) {
3041 		dp_err("dp_srng_init failed for tx_comp_ring");
3042 		goto fail1;
3043 	}
3044 
3045 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
3046 			  soc->tx_comp_ring[index].alloc_size,
3047 			  soc->ctrl_psoc,
3048 			  WLAN_MD_DP_SRNG_TX_COMP,
3049 			  "tcl_comp_ring");
3050 set_rbm:
3051 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
3052 
3053 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
3054 
3055 	return QDF_STATUS_SUCCESS;
3056 
3057 fail1:
3058 	return QDF_STATUS_E_FAILURE;
3059 }
3060 
3061 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
3062 {
3063 	dp_debug("index %u", index);
3064 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
3065 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
3066 }
3067 
3068 /**
3069  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
3070  * ring pair for the given "index"
3071  * @soc: DP soc pointer
3072  * @index: index of soc->tcl_data or soc->tx_comp to initialize
3073  *
3074  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
3075  */
3076 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
3077 						 uint8_t index)
3078 {
3079 	int tx_ring_size;
3080 	int tx_comp_ring_size;
3081 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3082 	int cached = 0;
3083 
3084 	if (index >= MAX_TCL_DATA_RINGS) {
3085 		dp_err("unexpected index!");
3086 		QDF_BUG(0);
3087 		goto fail1;
3088 	}
3089 
3090 	dp_debug("index %u", index);
3091 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
3092 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
3093 
3094 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
3095 			  tx_ring_size, cached)) {
3096 		dp_err("dp_srng_alloc failed for tcl_data_ring");
3097 		goto fail1;
3098 	}
3099 
3100 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3101 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
3102 	/* Enable cached TCL desc if NSS offload is disabled */
3103 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3104 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3105 
3106 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
3107 	    INVALID_WBM_RING_NUM)
3108 		return QDF_STATUS_SUCCESS;
3109 
3110 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3111 			  tx_comp_ring_size, cached)) {
3112 		dp_err("dp_srng_alloc failed for tx_comp_ring");
3113 		goto fail1;
3114 	}
3115 
3116 	return QDF_STATUS_SUCCESS;
3117 
3118 fail1:
3119 	return QDF_STATUS_E_FAILURE;
3120 }
3121 
3122 /**
3123  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
3124  * @pdev: DP_PDEV handle
3125  *
3126  * Return: void
3127  */
3128 void
3129 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3130 {
3131 	uint8_t map_id;
3132 	struct dp_soc *soc = pdev->soc;
3133 
3134 	if (!soc)
3135 		return;
3136 
3137 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3138 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3139 			     default_dscp_tid_map,
3140 			     sizeof(default_dscp_tid_map));
3141 	}
3142 
3143 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3144 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3145 					default_dscp_tid_map,
3146 					map_id);
3147 	}
3148 }
3149 
3150 /**
3151  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
3152  * @pdev: DP_PDEV handle
3153  *
3154  * Return: void
3155  */
3156 void
3157 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3158 {
3159 	struct dp_soc *soc = pdev->soc;
3160 
3161 	if (!soc)
3162 		return;
3163 
3164 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3165 		     sizeof(default_pcp_tid_map));
3166 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3167 }
3168 
3169 #ifndef DP_UMAC_HW_RESET_SUPPORT
3170 static inline
3171 #endif
3172 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3173 {
3174 	struct reo_desc_list_node *desc;
3175 	struct dp_rx_tid *rx_tid;
3176 
3177 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3178 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3179 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3180 		rx_tid = &desc->rx_tid;
3181 		qdf_mem_unmap_nbytes_single(soc->osdev,
3182 			rx_tid->hw_qdesc_paddr,
3183 			QDF_DMA_BIDIRECTIONAL,
3184 			rx_tid->hw_qdesc_alloc_size);
3185 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3186 		qdf_mem_free(desc);
3187 	}
3188 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3189 	qdf_list_destroy(&soc->reo_desc_freelist);
3190 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3191 }
3192 
3193 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3194 /**
3195  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
3196  *                                          for deferred reo desc list
3197  * @soc: Datapath soc handle
3198  *
3199  * Return: void
3200  */
3201 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
3202 {
3203 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
3204 	qdf_list_create(&soc->reo_desc_deferred_freelist,
3205 			REO_DESC_DEFERRED_FREELIST_SIZE);
3206 	soc->reo_desc_deferred_freelist_init = true;
3207 }
3208 
3209 /**
3210  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
3211  *                                           free the leftover REO QDESCs
3212  * @soc: Datapath soc handle
3213  *
3214  * Return: void
3215  */
3216 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
3217 {
3218 	struct reo_desc_deferred_freelist_node *desc;
3219 
3220 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3221 	soc->reo_desc_deferred_freelist_init = false;
3222 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3223 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3224 		qdf_mem_unmap_nbytes_single(soc->osdev,
3225 					    desc->hw_qdesc_paddr,
3226 					    QDF_DMA_BIDIRECTIONAL,
3227 					    desc->hw_qdesc_alloc_size);
3228 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3229 		qdf_mem_free(desc);
3230 	}
3231 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3232 
3233 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
3234 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
3235 }
3236 #else
3237 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
3238 {
3239 }
3240 
3241 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
3242 {
3243 }
3244 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3245 
3246 /**
3247  * dp_soc_reset_txrx_ring_map() - reset tx ring map
3248  * @soc: DP SOC handle
3249  *
3250  */
3251 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
3252 {
3253 	uint32_t i;
3254 
3255 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
3256 		soc->tx_ring_map[i] = 0;
3257 }
3258 
3259 /**
3260  * dp_soc_deinit() - Deinitialize txrx SOC
3261  * @txrx_soc: Opaque DP SOC handle
3262  *
3263  * Return: None
3264  */
3265 void dp_soc_deinit(void *txrx_soc)
3266 {
3267 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3268 	struct htt_soc *htt_soc = soc->htt_handle;
3269 
3270 	dp_monitor_soc_deinit(soc);
3271 
3272 	/* free peer tables & AST tables allocated during peer_map_attach */
3273 	if (soc->peer_map_attach_success) {
3274 		dp_peer_find_detach(soc);
3275 		soc->arch_ops.txrx_peer_map_detach(soc);
3276 		soc->peer_map_attach_success = FALSE;
3277 	}
3278 
3279 	qdf_flush_work(&soc->htt_stats.work);
3280 	qdf_disable_work(&soc->htt_stats.work);
3281 
3282 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3283 
3284 	dp_soc_reset_txrx_ring_map(soc);
3285 
3286 	dp_reo_desc_freelist_destroy(soc);
3287 	dp_reo_desc_deferred_freelist_destroy(soc);
3288 
3289 	DEINIT_RX_HW_STATS_LOCK(soc);
3290 
3291 	qdf_spinlock_destroy(&soc->ast_lock);
3292 
3293 	dp_peer_mec_spinlock_destroy(soc);
3294 
3295 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3296 
3297 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
3298 
3299 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3300 
3301 	qdf_spinlock_destroy(&soc->vdev_map_lock);
3302 
3303 	dp_reo_cmdlist_destroy(soc);
3304 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3305 
3306 	dp_soc_tx_desc_sw_pools_deinit(soc);
3307 
3308 	dp_soc_srng_deinit(soc);
3309 
3310 	dp_hw_link_desc_ring_deinit(soc);
3311 
3312 	dp_soc_print_inactive_objects(soc);
3313 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
3314 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
3315 
3316 	htt_soc_htc_dealloc(soc->htt_handle);
3317 
3318 	htt_soc_detach(htt_soc);
3319 
3320 	/* Free wbm sg list and reset flags in down path */
3321 	dp_rx_wbm_sg_list_deinit(soc);
3322 
3323 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
3324 			     WLAN_MD_DP_SOC, "dp_soc");
3325 }
3326 
3327 #ifdef QCA_HOST2FW_RXBUF_RING
3328 void
3329 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
3330 				int lmac_id)
3331 {
3332 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
3333 		htt_srng_setup(soc->htt_handle, mac_id,
3334 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
3335 			       RXDMA_DST);
3336 }
3337 #endif
3338 
3339 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
3340 				  enum cdp_host_reo_dest_ring *reo_dest,
3341 				  bool *hash_based)
3342 {
3343 	struct dp_soc *soc;
3344 	struct dp_pdev *pdev;
3345 
3346 	pdev = vdev->pdev;
3347 	soc = pdev->soc;
3348 	/*
3349 	 * hash based steering is disabled for Radios which are offloaded
3350 	 * to NSS
3351 	 */
3352 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3353 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3354 
3355 	/*
3356 	 * Below line of code will ensure the proper reo_dest ring is chosen
3357 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3358 	 */
3359 	*reo_dest = pdev->reo_dest;
3360 }
3361 
3362 #ifdef IPA_OFFLOAD
3363 /**
3364  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
3365  * @vdev: Virtual device
3366  *
3367  * Return: true if the vdev is of subtype P2P
3368  *	   false if the vdev is of any other subtype
3369  */
3370 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
3371 {
3372 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
3373 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
3374 	    vdev->subtype == wlan_op_subtype_p2p_go)
3375 		return true;
3376 
3377 	return false;
3378 }
3379 
3380 /**
3381  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
3382  * @vdev: Datapath VDEV handle
3383  * @setup_info:
3384  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
3385  * @hash_based: pointer to hash value (enabled/disabled) to be populated
3386  * @lmac_peer_id_msb:
3387  *
3388  * If IPA is enabled in ini, for SAP mode, disable hash based
3389  * steering, use default reo_dst ring for RX. Use config values for other modes.
3390  *
3391  * Return: None
3392  */
3393 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
3394 				       struct cdp_peer_setup_info *setup_info,
3395 				       enum cdp_host_reo_dest_ring *reo_dest,
3396 				       bool *hash_based,
3397 				       uint8_t *lmac_peer_id_msb)
3398 {
3399 	struct dp_soc *soc;
3400 	struct dp_pdev *pdev;
3401 
3402 	pdev = vdev->pdev;
3403 	soc = pdev->soc;
3404 
3405 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
3406 
3407 	/* For P2P-GO interfaces we do not need to change the REO
3408 	 * configuration even if IPA config is enabled
3409 	 */
3410 	if (dp_is_vdev_subtype_p2p(vdev))
3411 		return;
3412 
3413 	/*
3414 	 * If IPA is enabled, disable hash-based flow steering and set
3415 	 * reo_dest_ring_4 as the REO ring to receive packets on.
3416 	 * IPA is configured to reap reo_dest_ring_4.
3417 	 *
3418 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
3419 	 * value enum value is from 1 - 4.
3420 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
3421 	 */
3422 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3423 		if (dp_ipa_is_mdm_platform()) {
3424 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
3425 			if (vdev->opmode == wlan_op_mode_ap)
3426 				*hash_based = 0;
3427 		} else {
3428 			dp_debug("opt_dp: default HOST reo ring is set");
3429 		}
3430 	}
3431 }
3432 
3433 #else
3434 
3435 /**
3436  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
3437  * @vdev: Datapath VDEV handle
3438  * @setup_info:
3439  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
3440  * @hash_based: pointer to hash value (enabled/disabled) to be populated
3441  * @lmac_peer_id_msb:
3442  *
3443  * Use system config values for hash based steering.
3444  * Return: None
3445  */
3446 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
3447 				       struct cdp_peer_setup_info *setup_info,
3448 				       enum cdp_host_reo_dest_ring *reo_dest,
3449 				       bool *hash_based,
3450 				       uint8_t *lmac_peer_id_msb)
3451 {
3452 	struct dp_soc *soc = vdev->pdev->soc;
3453 
3454 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
3455 					lmac_peer_id_msb);
3456 }
3457 #endif /* IPA_OFFLOAD */
3458 
3459 /**
3460  * dp_peer_setup_wifi3() - initialize the peer
3461  * @soc_hdl: soc handle object
3462  * @vdev_id: vdev_id of vdev object
3463  * @peer_mac: Peer's mac address
3464  * @setup_info: peer setup info for MLO
3465  *
3466  * Return: QDF_STATUS
3467  */
3468 QDF_STATUS
3469 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3470 		    uint8_t *peer_mac,
3471 		    struct cdp_peer_setup_info *setup_info)
3472 {
3473 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3474 	struct dp_pdev *pdev;
3475 	bool hash_based = 0;
3476 	enum cdp_host_reo_dest_ring reo_dest;
3477 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3478 	struct dp_vdev *vdev = NULL;
3479 	struct dp_peer *peer =
3480 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
3481 					       DP_MOD_ID_CDP);
3482 	struct dp_peer *mld_peer = NULL;
3483 	enum wlan_op_mode vdev_opmode;
3484 	uint8_t lmac_peer_id_msb = 0;
3485 
3486 	if (!peer)
3487 		return QDF_STATUS_E_FAILURE;
3488 
3489 	vdev = peer->vdev;
3490 	if (!vdev) {
3491 		status = QDF_STATUS_E_FAILURE;
3492 		goto fail;
3493 	}
3494 
3495 	/* save vdev related member in case vdev freed */
3496 	vdev_opmode = vdev->opmode;
3497 	pdev = vdev->pdev;
3498 	dp_peer_setup_get_reo_hash(vdev, setup_info,
3499 				   &reo_dest, &hash_based,
3500 				   &lmac_peer_id_msb);
3501 
3502 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
3503 					   peer, vdev, vdev->vdev_id,
3504 					   setup_info);
3505 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
3506 		"hash-based-steering:%d default-reo_dest:%u",
3507 		pdev->pdev_id, vdev->vdev_id,
3508 		vdev->opmode, peer,
3509 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
3510 
3511 	/*
3512 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
3513 	 * i.e both the devices have same MAC address. In these
3514 	 * cases we want such pkts to be processed in NULL Q handler
3515 	 * which is REO2TCL ring. for this reason we should
3516 	 * not setup reo_queues and default route for bss_peer.
3517 	 */
3518 	if (!IS_MLO_DP_MLD_PEER(peer))
3519 		dp_monitor_peer_tx_init(pdev, peer);
3520 
3521 	if (!setup_info)
3522 		if (dp_peer_legacy_setup(soc, peer) !=
3523 				QDF_STATUS_SUCCESS) {
3524 			status = QDF_STATUS_E_RESOURCES;
3525 			goto fail;
3526 		}
3527 
3528 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
3529 		status = QDF_STATUS_E_FAILURE;
3530 		goto fail;
3531 	}
3532 
3533 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3534 		/* TODO: Check the destination ring number to be passed to FW */
3535 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3536 				soc->ctrl_psoc,
3537 				peer->vdev->pdev->pdev_id,
3538 				peer->mac_addr.raw,
3539 				peer->vdev->vdev_id, hash_based, reo_dest,
3540 				lmac_peer_id_msb);
3541 	}
3542 
3543 	qdf_atomic_set(&peer->is_default_route_set, 1);
3544 
3545 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
3546 	if (QDF_IS_STATUS_ERROR(status)) {
3547 		dp_peer_err("peer mlo setup failed");
3548 		qdf_assert_always(0);
3549 	}
3550 
3551 	if (vdev_opmode != wlan_op_mode_monitor) {
3552 		/* In case of MLD peer, switch peer to mld peer and
3553 		 * do peer_rx_init.
3554 		 */
3555 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
3556 		    IS_MLO_DP_LINK_PEER(peer)) {
3557 			if (setup_info && setup_info->is_first_link) {
3558 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
3559 				if (mld_peer)
3560 					dp_peer_rx_init(pdev, mld_peer);
3561 				else
3562 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
3563 			}
3564 		} else {
3565 			dp_peer_rx_init(pdev, peer);
3566 		}
3567 	}
3568 
3569 	if (!IS_MLO_DP_MLD_PEER(peer))
3570 		dp_peer_ppdu_delayed_ba_init(peer);
3571 
3572 fail:
3573 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3574 	return status;
3575 }
3576 
3577 /**
3578  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
3579  * @txrx_soc: cdp soc handle
3580  * @ac: Access category
3581  * @value: timeout value in millisec
3582  *
3583  * Return: void
3584  */
3585 void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
3586 			     uint8_t ac, uint32_t value)
3587 {
3588 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3589 
3590 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
3591 }
3592 
3593 /**
3594  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
3595  * @txrx_soc: cdp soc handle
3596  * @ac: access category
3597  * @value: timeout value in millisec
3598  *
3599  * Return: void
3600  */
3601 void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
3602 			     uint8_t ac, uint32_t *value)
3603 {
3604 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3605 
3606 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
3607 }
3608 
3609 /**
3610  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3611  * @txrx_soc: cdp soc handle
3612  * @pdev_id: id of physical device object
3613  * @val: reo destination ring index (1 - 4)
3614  *
3615  * Return: QDF_STATUS
3616  */
3617 QDF_STATUS
3618 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
3619 		     enum cdp_host_reo_dest_ring val)
3620 {
3621 	struct dp_pdev *pdev =
3622 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
3623 						   pdev_id);
3624 
3625 	if (pdev) {
3626 		pdev->reo_dest = val;
3627 		return QDF_STATUS_SUCCESS;
3628 	}
3629 
3630 	return QDF_STATUS_E_FAILURE;
3631 }
3632 
3633 /**
3634  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3635  * @txrx_soc: cdp soc handle
3636  * @pdev_id: id of physical device object
3637  *
3638  * Return: reo destination ring index
3639  */
3640 enum cdp_host_reo_dest_ring
3641 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
3642 {
3643 	struct dp_pdev *pdev =
3644 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
3645 						   pdev_id);
3646 
3647 	if (pdev)
3648 		return pdev->reo_dest;
3649 	else
3650 		return cdp_host_reo_dest_ring_unknown;
3651 }
3652 
3653 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
3654 	union hal_reo_status *reo_status)
3655 {
3656 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
3657 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
3658 
3659 	if (!dp_check_pdev_exists(soc, pdev)) {
3660 		dp_err_rl("pdev doesn't exist");
3661 		return;
3662 	}
3663 
3664 	if (!qdf_atomic_read(&soc->cmn_init_done))
3665 		return;
3666 
3667 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
3668 		DP_PRINT_STATS("REO stats failure %d",
3669 			       queue_status->header.status);
3670 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
3671 		return;
3672 	}
3673 
3674 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
3675 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
3676 }
3677 
3678 /**
3679  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
3680  * @soc: dp soc.
3681  * @pdev: dp pdev.
3682  *
3683  * Return: None.
3684  */
3685 void
3686 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
3687 {
3688 	uint32_t hw_head;
3689 	uint32_t hw_tail;
3690 	struct dp_srng *srng;
3691 
3692 	if (!soc) {
3693 		dp_err("soc is NULL");
3694 		return;
3695 	}
3696 
3697 	if (!pdev) {
3698 		dp_err("pdev is NULL");
3699 		return;
3700 	}
3701 
3702 	srng = &pdev->soc->wbm_idle_link_ring;
3703 	if (!srng) {
3704 		dp_err("wbm_idle_link_ring srng is NULL");
3705 		return;
3706 	}
3707 
3708 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
3709 			&hw_tail, WBM_IDLE_LINK);
3710 
3711 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
3712 		 hw_head, hw_tail);
3713 }
3714 
3715 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3716 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
3717 				      uint32_t rx_limit)
3718 {
3719 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
3720 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
3721 }
3722 
3723 #else
3724 
3725 static inline
3726 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
3727 			       uint32_t rx_limit)
3728 {
3729 }
3730 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
3731 
3732 /**
3733  * dp_display_srng_info() - Dump the srng HP TP info
3734  * @soc_hdl: CDP Soc handle
3735  *
3736  * This function dumps the SW hp/tp values for the important rings.
3737  * HW hp/tp values are not being dumped, since it can lead to
3738  * READ NOC error when UMAC is in low power state. MCC does not have
3739  * device force wake working yet.
3740  *
3741  * Return: none
3742  */
3743 void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
3744 {
3745 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3746 	hal_soc_handle_t hal_soc = soc->hal_soc;
3747 	uint32_t hp, tp, i;
3748 
3749 	dp_info("SRNG HP-TP data:");
3750 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3751 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
3752 				&tp, &hp);
3753 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3754 
3755 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
3756 		    INVALID_WBM_RING_NUM)
3757 			continue;
3758 
3759 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
3760 				&tp, &hp);
3761 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3762 	}
3763 
3764 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3765 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
3766 				&tp, &hp);
3767 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3768 	}
3769 
3770 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
3771 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
3772 
3773 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
3774 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
3775 
3776 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
3777 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
3778 }
3779 
3780 /**
3781  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
3782  * @psoc: dp soc handle
3783  * @pdev_id: id of DP_PDEV handle
3784  * @pcp: pcp value
3785  * @tid: tid value passed by the user
3786  *
3787  * Return: QDF_STATUS_SUCCESS on success
3788  */
3789 QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
3790 					 uint8_t pdev_id,
3791 					 uint8_t pcp, uint8_t tid)
3792 {
3793 	struct dp_soc *soc = (struct dp_soc *)psoc;
3794 
3795 	soc->pcp_tid_map[pcp] = tid;
3796 
3797 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
3798 	return QDF_STATUS_SUCCESS;
3799 }
3800 
3801 /**
3802  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
3803  * @soc_hdl: DP soc handle
3804  * @vdev_id: id of DP_VDEV handle
3805  * @pcp: pcp value
3806  * @tid: tid value passed by the user
3807  *
3808  * Return: QDF_STATUS_SUCCESS on success
3809  */
3810 QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
3811 					 uint8_t vdev_id,
3812 					 uint8_t pcp, uint8_t tid)
3813 {
3814 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3815 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3816 						     DP_MOD_ID_CDP);
3817 
3818 	if (!vdev)
3819 		return QDF_STATUS_E_FAILURE;
3820 
3821 	vdev->pcp_tid_map[pcp] = tid;
3822 
3823 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3824 	return QDF_STATUS_SUCCESS;
3825 }
3826 
3827 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
3828 void dp_drain_txrx(struct cdp_soc_t *soc_handle)
3829 {
3830 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3831 	uint32_t cur_tx_limit, cur_rx_limit;
3832 	uint32_t budget = 0xffff;
3833 	uint32_t val;
3834 	int i;
3835 	int cpu = dp_srng_get_cpu();
3836 
3837 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
3838 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
3839 
3840 	/* Temporarily increase soft irq limits when going to drain
3841 	 * the UMAC/LMAC SRNGs and restore them after polling.
3842 	 * Though the budget is on higher side, the TX/RX reaping loops
3843 	 * will not execute longer as both TX and RX would be suspended
3844 	 * by the time this API is called.
3845 	 */
3846 	dp_update_soft_irq_limits(soc, budget, budget);
3847 
3848 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
3849 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
3850 
3851 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
3852 
3853 	/* Do a dummy read at offset 0; this will ensure all
3854 	 * pendings writes(HP/TP) are flushed before read returns.
3855 	 */
3856 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
3857 	dp_debug("Register value at offset 0: %u", val);
3858 }
3859 #endif
3860 
3861 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
3862 /**
3863  * dp_flush_ring_hptp() - Update ring shadow
3864  *			  register HP/TP address when runtime
3865  *                        resume
3866  * @soc: DP soc context
3867  * @hal_srng: srng
3868  *
3869  * Return: None
3870  */
3871 static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
3872 {
3873 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
3874 						 HAL_SRNG_FLUSH_EVENT)) {
3875 		/* Acquire the lock */
3876 		hal_srng_access_start(soc->hal_soc, hal_srng);
3877 
3878 		hal_srng_access_end(soc->hal_soc, hal_srng);
3879 
3880 		hal_srng_set_flush_last_ts(hal_srng);
3881 
3882 		dp_debug("flushed");
3883 	}
3884 }
3885 
3886 void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
3887 {
3888 	 uint8_t i;
3889 
3890 	if (force_flush_tx) {
3891 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3892 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
3893 					   HAL_SRNG_FLUSH_EVENT);
3894 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
3895 		}
3896 
3897 		return;
3898 	}
3899 
3900 	for (i = 0; i < soc->num_tcl_data_rings; i++)
3901 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
3902 
3903 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
3904 }
3905 #endif
3906 
3907 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
3908 /*
3909  * dp_flush_tcl_ring() - flush TCL ring hp
3910  * @pdev: dp pdev
3911  * @ring_id: TCL ring id
3912  *
3913  * Return: 0 on success and error code on failure
3914  */
3915 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
3916 {
3917 	struct dp_soc *soc = pdev->soc;
3918 	hal_ring_handle_t hal_ring_hdl =
3919 			soc->tcl_data_ring[ring_id].hal_srng;
3920 	int ret;
3921 
3922 	ret = hal_srng_try_access_start(soc->hal_soc, hal_ring_hdl);
3923 	if (ret)
3924 		return ret;
3925 
3926 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
3927 	if (ret) {
3928 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
3929 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
3930 		hal_srng_inc_flush_cnt(hal_ring_hdl);
3931 		return ret;
3932 	}
3933 
3934 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
3935 	hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
3936 
3937 	return ret;
3938 }
3939 #else
3940 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
3941 {
3942 	return QDF_STATUS_SUCCESS;
3943 }
3944 #endif
3945 
3946 #ifdef WLAN_FEATURE_STATS_EXT
3947 /* rx hw stats event wait timeout in ms */
3948 #define DP_REO_STATUS_STATS_TIMEOUT 100
3949 
3950 /**
3951  * dp_rx_hw_stats_cb() - request rx hw stats response callback
3952  * @soc: soc handle
3953  * @cb_ctxt: callback context
3954  * @reo_status: reo command response status
3955  *
3956  * Return: None
3957  */
3958 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
3959 			      union hal_reo_status *reo_status)
3960 {
3961 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
3962 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
3963 	bool is_query_timeout;
3964 
3965 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3966 	is_query_timeout = rx_hw_stats->is_query_timeout;
3967 	/* free the cb_ctxt if all pending tid stats query is received */
3968 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
3969 		if (!is_query_timeout) {
3970 			qdf_event_set(&soc->rx_hw_stats_event);
3971 			soc->is_last_stats_ctx_init = false;
3972 		}
3973 
3974 		qdf_mem_free(rx_hw_stats);
3975 	}
3976 
3977 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
3978 		dp_info("REO stats failure %d",
3979 			queue_status->header.status);
3980 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3981 		return;
3982 	}
3983 
3984 	if (!is_query_timeout) {
3985 		soc->ext_stats.rx_mpdu_received +=
3986 					queue_status->mpdu_frms_cnt;
3987 		soc->ext_stats.rx_mpdu_missed +=
3988 					queue_status->hole_cnt;
3989 	}
3990 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3991 }
3992 
3993 /**
3994  * dp_request_rx_hw_stats() - request rx hardware stats
3995  * @soc_hdl: soc handle
3996  * @vdev_id: vdev id
3997  *
3998  * Return: None
3999  */
4000 QDF_STATUS
4001 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
4002 {
4003 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4004 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4005 						     DP_MOD_ID_CDP);
4006 	struct dp_peer *peer = NULL;
4007 	QDF_STATUS status;
4008 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
4009 	int rx_stats_sent_cnt = 0;
4010 	uint32_t last_rx_mpdu_received;
4011 	uint32_t last_rx_mpdu_missed;
4012 
4013 	if (!vdev) {
4014 		dp_err("vdev is null for vdev_id: %u", vdev_id);
4015 		status = QDF_STATUS_E_INVAL;
4016 		goto out;
4017 	}
4018 
4019 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
4020 
4021 	if (!peer) {
4022 		dp_err("Peer is NULL");
4023 		status = QDF_STATUS_E_INVAL;
4024 		goto out;
4025 	}
4026 
4027 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
4028 
4029 	if (!rx_hw_stats) {
4030 		dp_err("malloc failed for hw stats structure");
4031 		status = QDF_STATUS_E_INVAL;
4032 		goto out;
4033 	}
4034 
4035 	qdf_event_reset(&soc->rx_hw_stats_event);
4036 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4037 	/* save the last soc cumulative stats and reset it to 0 */
4038 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
4039 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
4040 	soc->ext_stats.rx_mpdu_received = 0;
4041 	soc->ext_stats.rx_mpdu_missed = 0;
4042 
4043 	dp_debug("HW stats query start");
4044 	rx_stats_sent_cnt =
4045 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
4046 	if (!rx_stats_sent_cnt) {
4047 		dp_err("no tid stats sent successfully");
4048 		qdf_mem_free(rx_hw_stats);
4049 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4050 		status = QDF_STATUS_E_INVAL;
4051 		goto out;
4052 	}
4053 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
4054 		       rx_stats_sent_cnt);
4055 	rx_hw_stats->is_query_timeout = false;
4056 	soc->is_last_stats_ctx_init = true;
4057 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4058 
4059 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
4060 				       DP_REO_STATUS_STATS_TIMEOUT);
4061 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
4062 
4063 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4064 	if (status != QDF_STATUS_SUCCESS) {
4065 		dp_info("partial rx hw stats event collected with %d",
4066 			qdf_atomic_read(
4067 				&rx_hw_stats->pending_tid_stats_cnt));
4068 		if (soc->is_last_stats_ctx_init)
4069 			rx_hw_stats->is_query_timeout = true;
4070 		/*
4071 		 * If query timeout happened, use the last saved stats
4072 		 * for this time query.
4073 		 */
4074 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
4075 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
4076 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
4077 
4078 	}
4079 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4080 
4081 out:
4082 	if (peer)
4083 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4084 	if (vdev)
4085 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4086 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
4087 
4088 	return status;
4089 }
4090 
4091 /**
4092  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
4093  * @soc_hdl: soc handle
4094  *
4095  * Return: None
4096  */
4097 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
4098 {
4099 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4100 
4101 	soc->ext_stats.rx_mpdu_received = 0;
4102 	soc->ext_stats.rx_mpdu_missed = 0;
4103 }
4104 #endif /* WLAN_FEATURE_STATS_EXT */
4105 
4106 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
4107 {
4108 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4109 
4110 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
4111 }
4112 
4113 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
4114 {
4115 	uint32_t i;
4116 
4117 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4118 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
4119 	}
4120 }
4121 
4122 qdf_export_symbol(dp_soc_set_txrx_ring_map);
4123 
4124 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
4125 {
4126 	dp_init_info("DP soc Dump for Target = %d", target_type);
4127 	dp_init_info("ast_override_support = %d da_war_enabled = %d",
4128 		     soc->ast_override_support, soc->da_war_enabled);
4129 
4130 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
4131 }
4132 
4133 /**
4134  * dp_soc_cfg_init() - initialize target specific configuration
4135  *		       during dp_soc_init
4136  * @soc: dp soc handle
4137  */
4138 static void dp_soc_cfg_init(struct dp_soc *soc)
4139 {
4140 	uint32_t target_type;
4141 
4142 	target_type = hal_get_target_type(soc->hal_soc);
4143 	switch (target_type) {
4144 	case TARGET_TYPE_QCA6290:
4145 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4146 					       REO_DST_RING_SIZE_QCA6290);
4147 		soc->ast_override_support = 1;
4148 		soc->da_war_enabled = false;
4149 		break;
4150 	case TARGET_TYPE_QCA6390:
4151 	case TARGET_TYPE_QCA6490:
4152 	case TARGET_TYPE_QCA6750:
4153 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4154 					       REO_DST_RING_SIZE_QCA6290);
4155 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
4156 		soc->ast_override_support = 1;
4157 		if (soc->cdp_soc.ol_ops->get_con_mode &&
4158 		    soc->cdp_soc.ol_ops->get_con_mode() ==
4159 		    QDF_GLOBAL_MONITOR_MODE) {
4160 			int int_ctx;
4161 
4162 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
4163 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
4164 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
4165 			}
4166 		}
4167 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4168 		break;
4169 	case TARGET_TYPE_KIWI:
4170 	case TARGET_TYPE_MANGO:
4171 	case TARGET_TYPE_PEACH:
4172 		soc->ast_override_support = 1;
4173 		soc->per_tid_basize_max_tid = 8;
4174 
4175 		if (soc->cdp_soc.ol_ops->get_con_mode &&
4176 		    soc->cdp_soc.ol_ops->get_con_mode() ==
4177 		    QDF_GLOBAL_MONITOR_MODE) {
4178 			int int_ctx;
4179 
4180 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
4181 			     int_ctx++) {
4182 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
4183 				if (dp_is_monitor_mode_using_poll(soc))
4184 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
4185 			}
4186 		}
4187 
4188 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4189 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
4190 		break;
4191 	case TARGET_TYPE_QCA8074:
4192 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
4193 		soc->da_war_enabled = true;
4194 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4195 		break;
4196 	case TARGET_TYPE_QCA8074V2:
4197 	case TARGET_TYPE_QCA6018:
4198 	case TARGET_TYPE_QCA9574:
4199 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4200 		soc->ast_override_support = 1;
4201 		soc->per_tid_basize_max_tid = 8;
4202 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4203 		soc->da_war_enabled = false;
4204 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4205 		break;
4206 	case TARGET_TYPE_QCN9000:
4207 		soc->ast_override_support = 1;
4208 		soc->da_war_enabled = false;
4209 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4210 		soc->per_tid_basize_max_tid = 8;
4211 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4212 		soc->lmac_polled_mode = 0;
4213 		soc->wbm_release_desc_rx_sg_support = 1;
4214 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4215 		break;
4216 	case TARGET_TYPE_QCA5018:
4217 	case TARGET_TYPE_QCN6122:
4218 	case TARGET_TYPE_QCN9160:
4219 		soc->ast_override_support = 1;
4220 		soc->da_war_enabled = false;
4221 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4222 		soc->per_tid_basize_max_tid = 8;
4223 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
4224 		soc->disable_mac1_intr = 1;
4225 		soc->disable_mac2_intr = 1;
4226 		soc->wbm_release_desc_rx_sg_support = 1;
4227 		break;
4228 	case TARGET_TYPE_QCN9224:
4229 		soc->umac_reset_supported = true;
4230 		soc->ast_override_support = 1;
4231 		soc->da_war_enabled = false;
4232 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4233 		soc->per_tid_basize_max_tid = 8;
4234 		soc->wbm_release_desc_rx_sg_support = 1;
4235 		soc->rxdma2sw_rings_not_supported = 1;
4236 		soc->wbm_sg_last_msdu_war = 1;
4237 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
4238 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
4239 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4240 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
4241 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
4242 						  CFG_DP_HOST_AST_DB_ENABLE);
4243 		soc->features.wds_ext_ast_override_enable = true;
4244 		break;
4245 	case TARGET_TYPE_QCA5332:
4246 	case TARGET_TYPE_QCN6432:
4247 		soc->umac_reset_supported = true;
4248 		soc->ast_override_support = 1;
4249 		soc->da_war_enabled = false;
4250 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4251 		soc->per_tid_basize_max_tid = 8;
4252 		soc->wbm_release_desc_rx_sg_support = 1;
4253 		soc->rxdma2sw_rings_not_supported = 1;
4254 		soc->wbm_sg_last_msdu_war = 1;
4255 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
4256 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
4257 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
4258 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
4259 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
4260 						  CFG_DP_HOST_AST_DB_ENABLE);
4261 		soc->features.wds_ext_ast_override_enable = true;
4262 		break;
4263 	default:
4264 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
4265 		qdf_assert_always(0);
4266 		break;
4267 	}
4268 	dp_soc_cfg_dump(soc, target_type);
4269 }
4270 
4271 /**
4272  * dp_soc_get_ap_mld_mode() - store ap mld mode from ini
4273  * @soc: Opaque DP SOC handle
4274  *
4275  * Return: none
4276  */
4277 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
4278 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
4279 {
4280 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
4281 		soc->mld_mode_ap =
4282 		soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4283 					CDP_CFG_MLD_NETDEV_MODE_AP);
4284 	}
4285 	qdf_info("DP mld_mode_ap-%u\n", soc->mld_mode_ap);
4286 }
4287 #else
4288 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
4289 {
4290 	(void)soc;
4291 }
4292 #endif
4293 
4294 /**
4295  * dp_soc_init() - Initialize txrx SOC
4296  * @soc: Opaque DP SOC handle
4297  * @htc_handle: Opaque HTC handle
4298  * @hif_handle: Opaque HIF handle
4299  *
4300  * Return: DP SOC handle on success, NULL on failure
4301  */
4302 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
4303 		  struct hif_opaque_softc *hif_handle)
4304 {
4305 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
4306 	bool is_monitor_mode = false;
4307 	uint8_t i;
4308 	int num_dp_msi;
4309 	bool ppeds_attached = false;
4310 
4311 	htt_soc = htt_soc_attach(soc, htc_handle);
4312 	if (!htt_soc)
4313 		goto fail1;
4314 
4315 	soc->htt_handle = htt_soc;
4316 
4317 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
4318 		goto fail2;
4319 
4320 	htt_set_htc_handle(htt_soc, htc_handle);
4321 
4322 	dp_soc_cfg_init(soc);
4323 
4324 	dp_monitor_soc_cfg_init(soc);
4325 	/* Reset/Initialize wbm sg list and flags */
4326 	dp_rx_wbm_sg_list_reset(soc);
4327 
4328 	/* Note: Any SRNG ring initialization should happen only after
4329 	 * Interrupt mode is set and followed by filling up the
4330 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
4331 	 */
4332 	dp_soc_set_interrupt_mode(soc);
4333 	if (soc->cdp_soc.ol_ops->get_con_mode &&
4334 	    soc->cdp_soc.ol_ops->get_con_mode() ==
4335 	    QDF_GLOBAL_MONITOR_MODE) {
4336 		is_monitor_mode = true;
4337 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
4338 	} else {
4339 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
4340 	}
4341 
4342 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
4343 	if (num_dp_msi < 0) {
4344 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
4345 		goto fail3;
4346 	}
4347 
4348 	if (soc->arch_ops.ppeds_handle_attached)
4349 		ppeds_attached = soc->arch_ops.ppeds_handle_attached(soc);
4350 
4351 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
4352 				     soc->intr_mode, is_monitor_mode,
4353 				     ppeds_attached,
4354 				     soc->umac_reset_supported);
4355 
4356 	/* initialize WBM_IDLE_LINK ring */
4357 	if (dp_hw_link_desc_ring_init(soc)) {
4358 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
4359 		goto fail3;
4360 	}
4361 
4362 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
4363 
4364 	if (dp_soc_srng_init(soc)) {
4365 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
4366 		goto fail4;
4367 	}
4368 
4369 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
4370 			       htt_get_htc_handle(htt_soc),
4371 			       soc->hal_soc, soc->osdev) == NULL)
4372 		goto fail5;
4373 
4374 	/* Initialize descriptors in TCL Rings */
4375 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4376 		hal_tx_init_data_ring(soc->hal_soc,
4377 				      soc->tcl_data_ring[i].hal_srng);
4378 	}
4379 
4380 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
4381 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
4382 		goto fail6;
4383 	}
4384 
4385 	if (soc->arch_ops.txrx_soc_ppeds_start) {
4386 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
4387 			dp_init_err("%pK: ppeds start failed", soc);
4388 			goto fail7;
4389 		}
4390 	}
4391 
4392 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
4393 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
4394 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4395 	wlan_cfg_set_rx_rr(soc->wlan_cfg_ctx,
4396 			   cfg_get(soc->ctrl_psoc, CFG_DP_RX_RR));
4397 #endif
4398 	soc->cce_disable = false;
4399 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
4400 
4401 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
4402 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
4403 	qdf_spinlock_create(&soc->vdev_map_lock);
4404 	qdf_atomic_init(&soc->num_tx_outstanding);
4405 	qdf_atomic_init(&soc->num_tx_exception);
4406 	soc->num_tx_allowed =
4407 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
4408 	soc->num_tx_spl_allowed =
4409 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
4410 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
4411 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
4412 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4413 				CDP_CFG_MAX_PEER_ID);
4414 
4415 		if (ret != -EINVAL)
4416 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
4417 
4418 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4419 				CDP_CFG_CCE_DISABLE);
4420 		if (ret == 1)
4421 			soc->cce_disable = true;
4422 	}
4423 
4424 	/*
4425 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
4426 	 * and IPQ5018 WMAC2 is not there in these platforms.
4427 	 */
4428 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
4429 	    soc->disable_mac2_intr)
4430 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
4431 
4432 	/*
4433 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
4434 	 * WMAC1 is not there in this platform.
4435 	 */
4436 	if (soc->disable_mac1_intr)
4437 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
4438 
4439 	/* setup the global rx defrag waitlist */
4440 	TAILQ_INIT(&soc->rx.defrag.waitlist);
4441 	soc->rx.defrag.timeout_ms =
4442 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
4443 	soc->rx.defrag.next_flush_ms = 0;
4444 	soc->rx.flags.defrag_timeout_check =
4445 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
4446 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
4447 
4448 	dp_monitor_soc_init(soc);
4449 
4450 	qdf_atomic_set(&soc->cmn_init_done, 1);
4451 
4452 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
4453 
4454 	qdf_spinlock_create(&soc->ast_lock);
4455 	dp_peer_mec_spinlock_create(soc);
4456 
4457 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
4458 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
4459 	INIT_RX_HW_STATS_LOCK(soc);
4460 
4461 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
4462 	/* fill the tx/rx cpu ring map*/
4463 	dp_soc_set_txrx_ring_map(soc);
4464 
4465 	TAILQ_INIT(&soc->inactive_peer_list);
4466 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
4467 	TAILQ_INIT(&soc->inactive_vdev_list);
4468 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
4469 	qdf_spinlock_create(&soc->htt_stats.lock);
4470 	/* initialize work queue for stats processing */
4471 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4472 
4473 	dp_reo_desc_deferred_freelist_create(soc);
4474 
4475 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
4476 		qdf_dma_mem_stats_read(),
4477 		qdf_heap_mem_stats_read(),
4478 		qdf_skb_total_mem_stats_read());
4479 
4480 	soc->vdev_stats_id_map = 0;
4481 
4482 	dp_soc_get_ap_mld_mode(soc);
4483 
4484 	return soc;
4485 fail7:
4486 	dp_soc_tx_desc_sw_pools_deinit(soc);
4487 fail6:
4488 	htt_soc_htc_dealloc(soc->htt_handle);
4489 fail5:
4490 	dp_soc_srng_deinit(soc);
4491 fail4:
4492 	dp_hw_link_desc_ring_deinit(soc);
4493 fail3:
4494 	htt_htc_pkt_pool_free(htt_soc);
4495 fail2:
4496 	htt_soc_detach(htt_soc);
4497 fail1:
4498 	return NULL;
4499 }
4500 
4501 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
4502 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
4503 {
4504 	QDF_STATUS status;
4505 
4506 	if (soc->init_tcl_cmd_cred_ring) {
4507 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
4508 				       TCL_CMD_CREDIT, 0, 0);
4509 		if (QDF_IS_STATUS_ERROR(status))
4510 			return status;
4511 
4512 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
4513 				  soc->tcl_cmd_credit_ring.alloc_size,
4514 				  soc->ctrl_psoc,
4515 				  WLAN_MD_DP_SRNG_TCL_CMD,
4516 				  "wbm_desc_rel_ring");
4517 	}
4518 
4519 	return QDF_STATUS_SUCCESS;
4520 }
4521 
4522 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
4523 {
4524 	if (soc->init_tcl_cmd_cred_ring) {
4525 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
4526 				     soc->tcl_cmd_credit_ring.alloc_size,
4527 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
4528 				     "wbm_desc_rel_ring");
4529 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
4530 			       TCL_CMD_CREDIT, 0);
4531 	}
4532 }
4533 
4534 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
4535 {
4536 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4537 	uint32_t entries;
4538 	QDF_STATUS status;
4539 
4540 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
4541 	if (soc->init_tcl_cmd_cred_ring) {
4542 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
4543 				       TCL_CMD_CREDIT, entries, 0);
4544 		if (QDF_IS_STATUS_ERROR(status))
4545 			return status;
4546 	}
4547 
4548 	return QDF_STATUS_SUCCESS;
4549 }
4550 
4551 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
4552 {
4553 	if (soc->init_tcl_cmd_cred_ring)
4554 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
4555 }
4556 
4557 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
4558 {
4559 	if (soc->init_tcl_cmd_cred_ring)
4560 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
4561 					    soc->tcl_cmd_credit_ring.hal_srng);
4562 }
4563 #else
4564 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
4565 {
4566 	return QDF_STATUS_SUCCESS;
4567 }
4568 
4569 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
4570 {
4571 }
4572 
4573 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
4574 {
4575 	return QDF_STATUS_SUCCESS;
4576 }
4577 
4578 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
4579 {
4580 }
4581 
4582 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
4583 {
4584 }
4585 #endif
4586 
4587 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
4588 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
4589 {
4590 	QDF_STATUS status;
4591 
4592 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
4593 	if (QDF_IS_STATUS_ERROR(status))
4594 		return status;
4595 
4596 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
4597 			  soc->tcl_status_ring.alloc_size,
4598 			  soc->ctrl_psoc,
4599 			  WLAN_MD_DP_SRNG_TCL_STATUS,
4600 			  "wbm_desc_rel_ring");
4601 
4602 	return QDF_STATUS_SUCCESS;
4603 }
4604 
4605 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
4606 {
4607 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
4608 			     soc->tcl_status_ring.alloc_size,
4609 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
4610 			     "wbm_desc_rel_ring");
4611 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4612 }
4613 
4614 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
4615 {
4616 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4617 	uint32_t entries;
4618 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4619 
4620 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
4621 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
4622 			       TCL_STATUS, entries, 0);
4623 
4624 	return status;
4625 }
4626 
4627 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
4628 {
4629 	dp_srng_free(soc, &soc->tcl_status_ring);
4630 }
4631 #else
4632 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
4633 {
4634 	return QDF_STATUS_SUCCESS;
4635 }
4636 
4637 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
4638 {
4639 }
4640 
4641 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
4642 {
4643 	return QDF_STATUS_SUCCESS;
4644 }
4645 
4646 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
4647 {
4648 }
4649 #endif
4650 
4651 /**
4652  * dp_soc_srng_deinit() - de-initialize soc srng rings
4653  * @soc: Datapath soc handle
4654  *
4655  */
4656 void dp_soc_srng_deinit(struct dp_soc *soc)
4657 {
4658 	uint32_t i;
4659 
4660 	if (soc->arch_ops.txrx_soc_srng_deinit)
4661 		soc->arch_ops.txrx_soc_srng_deinit(soc);
4662 
4663 	/* Free the ring memories */
4664 	/* Common rings */
4665 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
4666 			     soc->wbm_desc_rel_ring.alloc_size,
4667 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
4668 			     "wbm_desc_rel_ring");
4669 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4670 
4671 	/* Tx data rings */
4672 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4673 		dp_deinit_tx_pair_by_index(soc, i);
4674 
4675 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4676 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
4677 		dp_ipa_deinit_alt_tx_ring(soc);
4678 	}
4679 
4680 	/* TCL command and status rings */
4681 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
4682 	dp_soc_tcl_status_srng_deinit(soc);
4683 
4684 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4685 		/* TODO: Get number of rings and ring sizes
4686 		 * from wlan_cfg
4687 		 */
4688 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
4689 				     soc->reo_dest_ring[i].alloc_size,
4690 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
4691 				     "reo_dest_ring");
4692 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
4693 	}
4694 
4695 	/* REO reinjection ring */
4696 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
4697 			     soc->reo_reinject_ring.alloc_size,
4698 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
4699 			     "reo_reinject_ring");
4700 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4701 
4702 	/* Rx release ring */
4703 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
4704 			     soc->rx_rel_ring.alloc_size,
4705 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
4706 			     "reo_release_ring");
4707 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4708 
4709 	/* Rx exception ring */
4710 	/* TODO: Better to store ring_type and ring_num in
4711 	 * dp_srng during setup
4712 	 */
4713 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
4714 			     soc->reo_exception_ring.alloc_size,
4715 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
4716 			     "reo_exception_ring");
4717 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4718 
4719 	/* REO command and status rings */
4720 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
4721 			     soc->reo_cmd_ring.alloc_size,
4722 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
4723 			     "reo_cmd_ring");
4724 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4725 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
4726 			     soc->reo_status_ring.alloc_size,
4727 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
4728 			     "reo_status_ring");
4729 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
4730 }
4731 
4732 /**
4733  * dp_soc_srng_init() - Initialize soc level srng rings
4734  * @soc: Datapath soc handle
4735  *
4736  * Return: QDF_STATUS_SUCCESS on success
4737  *	   QDF_STATUS_E_FAILURE on failure
4738  */
4739 QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
4740 {
4741 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4742 	uint8_t i;
4743 	uint8_t wbm2_sw_rx_rel_ring_id;
4744 
4745 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4746 
4747 	dp_enable_verbose_debug(soc);
4748 
4749 	/* WBM descriptor release ring */
4750 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
4751 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
4752 		goto fail1;
4753 	}
4754 
4755 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
4756 			  soc->wbm_desc_rel_ring.alloc_size,
4757 			  soc->ctrl_psoc,
4758 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
4759 			  "wbm_desc_rel_ring");
4760 
4761 	/* TCL command and status rings */
4762 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
4763 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
4764 		goto fail1;
4765 	}
4766 
4767 	if (dp_soc_tcl_status_srng_init(soc)) {
4768 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
4769 		goto fail1;
4770 	}
4771 
4772 	/* REO reinjection ring */
4773 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
4774 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
4775 		goto fail1;
4776 	}
4777 
4778 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
4779 			  soc->reo_reinject_ring.alloc_size,
4780 			  soc->ctrl_psoc,
4781 			  WLAN_MD_DP_SRNG_REO_REINJECT,
4782 			  "reo_reinject_ring");
4783 
4784 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
4785 	/* Rx release ring */
4786 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
4787 			 wbm2_sw_rx_rel_ring_id, 0)) {
4788 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
4789 		goto fail1;
4790 	}
4791 
4792 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
4793 			  soc->rx_rel_ring.alloc_size,
4794 			  soc->ctrl_psoc,
4795 			  WLAN_MD_DP_SRNG_RX_REL,
4796 			  "reo_release_ring");
4797 
4798 	/* Rx exception ring */
4799 	if (dp_srng_init(soc, &soc->reo_exception_ring,
4800 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
4801 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
4802 		goto fail1;
4803 	}
4804 
4805 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
4806 			  soc->reo_exception_ring.alloc_size,
4807 			  soc->ctrl_psoc,
4808 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
4809 			  "reo_exception_ring");
4810 
4811 	/* REO command and status rings */
4812 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
4813 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
4814 		goto fail1;
4815 	}
4816 
4817 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
4818 			  soc->reo_cmd_ring.alloc_size,
4819 			  soc->ctrl_psoc,
4820 			  WLAN_MD_DP_SRNG_REO_CMD,
4821 			  "reo_cmd_ring");
4822 
4823 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
4824 	TAILQ_INIT(&soc->rx.reo_cmd_list);
4825 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
4826 
4827 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
4828 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
4829 		goto fail1;
4830 	}
4831 
4832 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
4833 			  soc->reo_status_ring.alloc_size,
4834 			  soc->ctrl_psoc,
4835 			  WLAN_MD_DP_SRNG_REO_STATUS,
4836 			  "reo_status_ring");
4837 
4838 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4839 		if (dp_init_tx_ring_pair_by_index(soc, i))
4840 			goto fail1;
4841 	}
4842 
4843 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4844 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
4845 			goto fail1;
4846 
4847 		if (dp_ipa_init_alt_tx_ring(soc))
4848 			goto fail1;
4849 	}
4850 
4851 	dp_create_ext_stats_event(soc);
4852 
4853 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4854 		/* Initialize REO destination ring */
4855 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
4856 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
4857 			goto fail1;
4858 		}
4859 
4860 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
4861 				  soc->reo_dest_ring[i].alloc_size,
4862 				  soc->ctrl_psoc,
4863 				  WLAN_MD_DP_SRNG_REO_DEST,
4864 				  "reo_dest_ring");
4865 	}
4866 
4867 	if (soc->arch_ops.txrx_soc_srng_init) {
4868 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
4869 			dp_init_err("%pK: dp_srng_init failed for arch rings",
4870 				    soc);
4871 			goto fail1;
4872 		}
4873 	}
4874 
4875 	return QDF_STATUS_SUCCESS;
4876 fail1:
4877 	/*
4878 	 * Cleanup will be done as part of soc_detach, which will
4879 	 * be called on pdev attach failure
4880 	 */
4881 	dp_soc_srng_deinit(soc);
4882 	return QDF_STATUS_E_FAILURE;
4883 }
4884 
4885 /**
4886  * dp_soc_srng_free() - free soc level srng rings
4887  * @soc: Datapath soc handle
4888  *
4889  */
4890 void dp_soc_srng_free(struct dp_soc *soc)
4891 {
4892 	uint32_t i;
4893 
4894 	if (soc->arch_ops.txrx_soc_srng_free)
4895 		soc->arch_ops.txrx_soc_srng_free(soc);
4896 
4897 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
4898 
4899 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4900 		dp_free_tx_ring_pair_by_index(soc, i);
4901 
4902 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
4903 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4904 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
4905 		dp_ipa_free_alt_tx_ring(soc);
4906 	}
4907 
4908 	dp_soc_tcl_cmd_cred_srng_free(soc);
4909 	dp_soc_tcl_status_srng_free(soc);
4910 
4911 	for (i = 0; i < soc->num_reo_dest_rings; i++)
4912 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
4913 
4914 	dp_srng_free(soc, &soc->reo_reinject_ring);
4915 	dp_srng_free(soc, &soc->rx_rel_ring);
4916 
4917 	dp_srng_free(soc, &soc->reo_exception_ring);
4918 
4919 	dp_srng_free(soc, &soc->reo_cmd_ring);
4920 	dp_srng_free(soc, &soc->reo_status_ring);
4921 }
4922 
4923 /**
4924  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
4925  * @soc: Datapath soc handle
4926  *
4927  * Return: QDF_STATUS_SUCCESS on success
4928  *	   QDF_STATUS_E_NOMEM on failure
4929  */
4930 QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
4931 {
4932 	uint32_t entries;
4933 	uint32_t i;
4934 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4935 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
4936 	uint32_t reo_dst_ring_size;
4937 
4938 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4939 
4940 	/* sw2wbm link descriptor release ring */
4941 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
4942 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
4943 			  entries, 0)) {
4944 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
4945 		goto fail1;
4946 	}
4947 
4948 	/* TCL command and status rings */
4949 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
4950 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
4951 		goto fail1;
4952 	}
4953 
4954 	if (dp_soc_tcl_status_srng_alloc(soc)) {
4955 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
4956 		goto fail1;
4957 	}
4958 
4959 	/* REO reinjection ring */
4960 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
4961 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
4962 			  entries, 0)) {
4963 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
4964 		goto fail1;
4965 	}
4966 
4967 	/* Rx release ring */
4968 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
4969 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
4970 			  entries, 0)) {
4971 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
4972 		goto fail1;
4973 	}
4974 
4975 	/* Rx exception ring */
4976 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
4977 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
4978 			  entries, 0)) {
4979 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
4980 		goto fail1;
4981 	}
4982 
4983 	/* REO command and status rings */
4984 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
4985 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
4986 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
4987 		goto fail1;
4988 	}
4989 
4990 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
4991 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
4992 			  entries, 0)) {
4993 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
4994 		goto fail1;
4995 	}
4996 
4997 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
4998 
4999 	/* Disable cached desc if NSS offload is enabled */
5000 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
5001 		cached = 0;
5002 
5003 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
5004 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
5005 			goto fail1;
5006 	}
5007 
5008 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
5009 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5010 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
5011 			goto fail1;
5012 
5013 		if (dp_ipa_alloc_alt_tx_ring(soc))
5014 			goto fail1;
5015 	}
5016 
5017 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
5018 		/* Setup REO destination ring */
5019 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
5020 				  reo_dst_ring_size, cached)) {
5021 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
5022 			goto fail1;
5023 		}
5024 	}
5025 
5026 	if (soc->arch_ops.txrx_soc_srng_alloc) {
5027 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
5028 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
5029 				    soc);
5030 			goto fail1;
5031 		}
5032 	}
5033 
5034 	return QDF_STATUS_SUCCESS;
5035 
5036 fail1:
5037 	dp_soc_srng_free(soc);
5038 	return QDF_STATUS_E_NOMEM;
5039 }
5040 
5041 /**
5042  * dp_soc_cfg_attach() - set target specific configuration in
5043  *			 dp soc cfg.
5044  * @soc: dp soc handle
5045  */
5046 void dp_soc_cfg_attach(struct dp_soc *soc)
5047 {
5048 	int target_type;
5049 	int nss_cfg = 0;
5050 
5051 	target_type = hal_get_target_type(soc->hal_soc);
5052 	switch (target_type) {
5053 	case TARGET_TYPE_QCA6290:
5054 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
5055 					       REO_DST_RING_SIZE_QCA6290);
5056 		break;
5057 	case TARGET_TYPE_QCA6390:
5058 	case TARGET_TYPE_QCA6490:
5059 	case TARGET_TYPE_QCA6750:
5060 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
5061 					       REO_DST_RING_SIZE_QCA6290);
5062 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5063 		break;
5064 	case TARGET_TYPE_KIWI:
5065 	case TARGET_TYPE_MANGO:
5066 	case TARGET_TYPE_PEACH:
5067 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5068 		break;
5069 	case TARGET_TYPE_QCA8074:
5070 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5071 		break;
5072 	case TARGET_TYPE_QCA8074V2:
5073 	case TARGET_TYPE_QCA6018:
5074 	case TARGET_TYPE_QCA9574:
5075 	case TARGET_TYPE_QCN6122:
5076 	case TARGET_TYPE_QCA5018:
5077 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5078 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5079 		break;
5080 	case TARGET_TYPE_QCN9160:
5081 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5082 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5083 		break;
5084 	case TARGET_TYPE_QCN9000:
5085 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5086 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5087 		break;
5088 	case TARGET_TYPE_QCN9224:
5089 	case TARGET_TYPE_QCA5332:
5090 	case TARGET_TYPE_QCN6432:
5091 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5092 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5093 		break;
5094 	default:
5095 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
5096 		qdf_assert_always(0);
5097 		break;
5098 	}
5099 
5100 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
5101 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
5102 
5103 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
5104 
5105 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
5106 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
5107 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
5108 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
5109 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
5110 		soc->init_tcl_cmd_cred_ring = false;
5111 		soc->num_tcl_data_rings =
5112 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
5113 		soc->num_reo_dest_rings =
5114 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
5115 
5116 	} else {
5117 		soc->init_tcl_cmd_cred_ring = true;
5118 		soc->num_tx_comp_rings =
5119 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
5120 		soc->num_tcl_data_rings =
5121 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
5122 		soc->num_reo_dest_rings =
5123 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
5124 	}
5125 
5126 }
5127 
5128 void dp_pdev_set_default_reo(struct dp_pdev *pdev)
5129 {
5130 	struct dp_soc *soc = pdev->soc;
5131 
5132 	switch (pdev->pdev_id) {
5133 	case 0:
5134 		pdev->reo_dest =
5135 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
5136 		break;
5137 
5138 	case 1:
5139 		pdev->reo_dest =
5140 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
5141 		break;
5142 
5143 	case 2:
5144 		pdev->reo_dest =
5145 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
5146 		break;
5147 
5148 	default:
5149 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
5150 			    soc, pdev->pdev_id);
5151 		break;
5152 	}
5153 }
5154 
5155