xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rings_main.c (revision 58e80d75bdc3ffa28b0428c0e80529f876b8d53d)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_rings.h"
34 #include "dp_internal.h"
35 #include "dp_tx.h"
36 #include "dp_tx_desc.h"
37 #include "dp_rx.h"
38 #ifdef DP_RATETABLE_SUPPORT
39 #include "dp_ratetable.h"
40 #endif
41 #include <cdp_txrx_handle.h>
42 #include <wlan_cfg.h>
43 #include <wlan_utility.h>
44 #include "cdp_txrx_cmn_struct.h"
45 #include "cdp_txrx_stats_struct.h"
46 #include "cdp_txrx_cmn_reg.h"
47 #include <qdf_util.h>
48 #include "dp_peer.h"
49 #include "htt_stats.h"
50 #include "dp_htt.h"
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 #include <wlan_module_ids.h>
55 
56 #ifdef WIFI_MONITOR_SUPPORT
57 #include <dp_mon.h>
58 #endif
59 
60 #ifdef WLAN_FEATURE_STATS_EXT
61 #define INIT_RX_HW_STATS_LOCK(_soc) \
62 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
63 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
64 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
65 #else
66 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
67 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
68 #endif
69 
70 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
71 #define TXCOMP_RING4_NUM 3
72 #else
73 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
74 #endif
75 
76 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
77 						uint8_t index);
78 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
79 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
80 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
81 						 uint8_t index);
82 
83 /* default_dscp_tid_map - Default DSCP-TID mapping
84  *
85  * DSCP        TID
86  * 000000      0
87  * 001000      1
88  * 010000      2
89  * 011000      3
90  * 100000      4
91  * 101000      5
92  * 110000      6
93  * 111000      7
94  */
95 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
96 	0, 0, 0, 0, 0, 0, 0, 0,
97 	1, 1, 1, 1, 1, 1, 1, 1,
98 	2, 2, 2, 2, 2, 2, 2, 2,
99 	3, 3, 3, 3, 3, 3, 3, 3,
100 	4, 4, 4, 4, 4, 4, 4, 4,
101 	5, 5, 5, 5, 5, 5, 5, 5,
102 	6, 6, 6, 6, 6, 6, 6, 6,
103 	7, 7, 7, 7, 7, 7, 7, 7,
104 };
105 
106 /* default_pcp_tid_map - Default PCP-TID mapping
107  *
108  * PCP     TID
109  * 000      0
110  * 001      1
111  * 010      2
112  * 011      3
113  * 100      4
114  * 101      5
115  * 110      6
116  * 111      7
117  */
118 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
119 	0, 1, 2, 3, 4, 5, 6, 7,
120 };
121 
122 uint8_t
123 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
124 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
125 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
126 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
127 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
128 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
129 #ifdef WLAN_TX_PKT_CAPTURE_ENH
130 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
131 #endif
132 };
133 
134 qdf_export_symbol(dp_cpu_ring_map);
135 
136 /**
137  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
138  * @soc: DP soc handle
139  * @ring_type: ring type
140  * @ring_num: ring_num
141  *
142  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
143  */
144 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
145 					    enum hal_ring_type ring_type,
146 					    int ring_num)
147 {
148 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
149 	uint8_t status = 0;
150 
151 	switch (ring_type) {
152 	case WBM2SW_RELEASE:
153 	case REO_DST:
154 	case RXDMA_BUF:
155 	case REO_EXCEPTION:
156 		status = ((nss_config) & (1 << ring_num));
157 		break;
158 	default:
159 		break;
160 	}
161 
162 	return status;
163 }
164 
165 /* MCL specific functions */
166 #if defined(DP_CON_MON)
167 
168 #ifdef DP_CON_MON_MSI_ENABLED
169 /**
170  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
171  * @soc: pointer to dp_soc handle
172  * @intr_ctx_num: interrupt context number for which mon mask is needed
173  *
174  * For MCL, monitor mode rings are being processed in timer contexts (polled).
175  * This function is returning 0, since in interrupt mode(softirq based RX),
176  * we donot want to process monitor mode rings in a softirq.
177  *
178  * So, in case packet log is enabled for SAP/STA/P2P modes,
179  * regular interrupt processing will not process monitor mode rings. It would be
180  * done in a separate timer context.
181  *
182  * Return: 0
183  */
184 static inline uint32_t
185 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
186 {
187 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
188 }
189 #else
190 /**
191  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
192  * @soc: pointer to dp_soc handle
193  * @intr_ctx_num: interrupt context number for which mon mask is needed
194  *
195  * For MCL, monitor mode rings are being processed in timer contexts (polled).
196  * This function is returning 0, since in interrupt mode(softirq based RX),
197  * we donot want to process monitor mode rings in a softirq.
198  *
199  * So, in case packet log is enabled for SAP/STA/P2P modes,
200  * regular interrupt processing will not process monitor mode rings. It would be
201  * done in a separate timer context.
202  *
203  * Return: 0
204  */
205 static inline uint32_t
206 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
207 {
208 	return 0;
209 }
210 #endif
211 
212 #else
213 
214 /**
215  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
216  * @soc: pointer to dp_soc handle
217  * @intr_ctx_num: interrupt context number for which mon mask is needed
218  *
219  * Return: mon mask value
220  */
221 static inline
222 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc,
223 						int intr_ctx_num)
224 {
225 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
226 }
227 
228 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
229 {
230 	int i;
231 
232 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
233 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
234 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
235 	}
236 }
237 
238 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
239 
240 void dp_service_lmac_rings(void *arg)
241 {
242 	struct dp_soc *soc = (struct dp_soc *)arg;
243 	int ring = 0, i;
244 	struct dp_pdev *pdev = NULL;
245 	union dp_rx_desc_list_elem_t *desc_list = NULL;
246 	union dp_rx_desc_list_elem_t *tail = NULL;
247 
248 	/* Process LMAC interrupts */
249 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
250 		int mac_for_pdev = ring;
251 		struct dp_srng *rx_refill_buf_ring;
252 
253 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
254 		if (!pdev)
255 			continue;
256 
257 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
258 
259 		dp_monitor_process(soc, NULL, mac_for_pdev,
260 				   QCA_NAPI_BUDGET);
261 
262 		for (i = 0;
263 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
264 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
265 					     mac_for_pdev,
266 					     QCA_NAPI_BUDGET);
267 
268 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
269 						  mac_for_pdev))
270 			dp_rx_buffers_replenish(soc, mac_for_pdev,
271 						rx_refill_buf_ring,
272 						&soc->rx_desc_buf[mac_for_pdev],
273 						0, &desc_list, &tail, false);
274 	}
275 
276 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
277 }
278 
279 #endif
280 
281 /**
282  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
283  * @ring_num: ring num of the ring being queried
284  * @grp_mask: the grp_mask array for the ring type in question.
285  *
286  * The grp_mask array is indexed by group number and the bit fields correspond
287  * to ring numbers.  We are finding which interrupt group a ring belongs to.
288  *
289  * Return: the index in the grp_mask array with the ring number.
290  * -QDF_STATUS_E_NOENT if no entry is found
291  */
292 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
293 {
294 	int ext_group_num;
295 	uint8_t mask = 1 << ring_num;
296 
297 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
298 	     ext_group_num++) {
299 		if (mask & grp_mask[ext_group_num])
300 			return ext_group_num;
301 	}
302 
303 	return -QDF_STATUS_E_NOENT;
304 }
305 
306 /**
307  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
308  * @soc: dp_soc
309  * @msi_group_number: MSI group number.
310  * @msi_data_count: MSI data count.
311  *
312  * Return: true if msi_group_number is invalid.
313  */
314 static bool dp_is_msi_group_number_invalid(struct dp_soc *soc,
315 					   int msi_group_number,
316 					   int msi_data_count)
317 {
318 	if (soc && soc->osdev && soc->osdev->dev &&
319 	    pld_is_one_msi(soc->osdev->dev))
320 		return false;
321 
322 	return msi_group_number > msi_data_count;
323 }
324 
325 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
326 /**
327  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
328  *				rx_near_full_grp1 mask
329  * @soc: Datapath SoC Handle
330  * @ring_num: REO ring number
331  *
332  * Return: 1 if the ring_num belongs to reo_nf_grp1,
333  *	   0, otherwise.
334  */
335 static inline int
336 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
337 {
338 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
339 }
340 
341 /**
342  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
343  *				rx_near_full_grp2 mask
344  * @soc: Datapath SoC Handle
345  * @ring_num: REO ring number
346  *
347  * Return: 1 if the ring_num belongs to reo_nf_grp2,
348  *	   0, otherwise.
349  */
350 static inline int
351 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
352 {
353 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
354 }
355 
356 /**
357  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
358  *				ring type and number
359  * @soc: Datapath SoC handle
360  * @ring_type: SRNG type
361  * @ring_num: ring num
362  *
363  * Return: near-full irq mask pointer
364  */
365 static inline
366 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
367 					enum hal_ring_type ring_type,
368 					int ring_num)
369 {
370 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
371 	uint8_t wbm2_sw_rx_rel_ring_id;
372 	uint8_t *nf_irq_mask = NULL;
373 
374 	switch (ring_type) {
375 	case WBM2SW_RELEASE:
376 		wbm2_sw_rx_rel_ring_id =
377 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
378 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
379 			nf_irq_mask = &soc->wlan_cfg_ctx->
380 					int_tx_ring_near_full_irq_mask[0];
381 		}
382 		break;
383 	case REO_DST:
384 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
385 			nf_irq_mask =
386 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
387 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
388 			nf_irq_mask =
389 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
390 		else
391 			qdf_assert(0);
392 		break;
393 	default:
394 		break;
395 	}
396 
397 	return nf_irq_mask;
398 }
399 
400 /**
401  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
402  * @soc: Datapath SoC handle
403  * @ring_params: srng params handle
404  * @msi2_addr: MSI2 addr to be set for the SRNG
405  * @msi2_data: MSI2 data to be set for the SRNG
406  *
407  * Return: None
408  */
409 static inline
410 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
411 				  struct hal_srng_params *ring_params,
412 				  qdf_dma_addr_t msi2_addr,
413 				  uint32_t msi2_data)
414 {
415 	ring_params->msi2_addr = msi2_addr;
416 	ring_params->msi2_data = msi2_data;
417 }
418 
419 /**
420  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
421  * @soc: Datapath SoC handle
422  * @ring_params: ring_params for SRNG
423  * @ring_type: SENG type
424  * @ring_num: ring number for the SRNG
425  * @nf_msi_grp_num: near full msi group number
426  *
427  * Return: None
428  */
429 static inline void
430 dp_srng_msi2_setup(struct dp_soc *soc,
431 		   struct hal_srng_params *ring_params,
432 		   int ring_type, int ring_num, int nf_msi_grp_num)
433 {
434 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
435 	int msi_data_count, ret;
436 
437 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
438 					  &msi_data_count, &msi_data_start,
439 					  &msi_irq_start);
440 	if (ret)
441 		return;
442 
443 	if (nf_msi_grp_num < 0) {
444 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
445 			     soc, ring_type, ring_num);
446 		ring_params->msi2_addr = 0;
447 		ring_params->msi2_data = 0;
448 		return;
449 	}
450 
451 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
452 					   msi_data_count)) {
453 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
454 			     soc, nf_msi_grp_num);
455 		QDF_ASSERT(0);
456 	}
457 
458 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
459 
460 	ring_params->nf_irq_support = 1;
461 	ring_params->msi2_addr = addr_low;
462 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
463 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
464 		+ msi_data_start;
465 	ring_params->flags |= HAL_SRNG_MSI_INTR;
466 }
467 
468 /* Percentage of ring entries considered as nearly full */
469 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
470 /* Percentage of ring entries considered as critically full */
471 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
472 /* Percentage of ring entries considered as safe threshold */
473 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
474 
475 /**
476  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
477  *			near full irq
478  * @soc: Datapath SoC handle
479  * @ring_params: ring params for SRNG
480  * @ring_type: ring type
481  */
482 static inline void
483 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
484 					  struct hal_srng_params *ring_params,
485 					  int ring_type)
486 {
487 	if (ring_params->nf_irq_support) {
488 		ring_params->high_thresh = (ring_params->num_entries *
489 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
490 		ring_params->crit_thresh = (ring_params->num_entries *
491 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
492 		ring_params->safe_thresh = (ring_params->num_entries *
493 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
494 	}
495 }
496 
497 /**
498  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
499  *			structure from the ring params
500  * @soc: Datapath SoC handle
501  * @srng: SRNG handle
502  * @ring_params: ring params for a SRNG
503  *
504  * Return: None
505  */
506 static inline void
507 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
508 			  struct hal_srng_params *ring_params)
509 {
510 	srng->crit_thresh = ring_params->crit_thresh;
511 	srng->safe_thresh = ring_params->safe_thresh;
512 }
513 
514 #else
515 static inline
516 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
517 					enum hal_ring_type ring_type,
518 					int ring_num)
519 {
520 	return NULL;
521 }
522 
523 static inline
524 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
525 				  struct hal_srng_params *ring_params,
526 				  qdf_dma_addr_t msi2_addr,
527 				  uint32_t msi2_data)
528 {
529 }
530 
531 static inline void
532 dp_srng_msi2_setup(struct dp_soc *soc,
533 		   struct hal_srng_params *ring_params,
534 		   int ring_type, int ring_num, int nf_msi_grp_num)
535 {
536 }
537 
538 static inline void
539 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
540 					  struct hal_srng_params *ring_params,
541 					  int ring_type)
542 {
543 }
544 
545 static inline void
546 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
547 			  struct hal_srng_params *ring_params)
548 {
549 }
550 #endif
551 
552 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
553 				       enum hal_ring_type ring_type,
554 				       int ring_num,
555 				       int *reg_msi_grp_num,
556 				       bool nf_irq_support,
557 				       int *nf_msi_grp_num)
558 {
559 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
560 	uint8_t *grp_mask, *nf_irq_mask = NULL;
561 	bool nf_irq_enabled = false;
562 	uint8_t wbm2_sw_rx_rel_ring_id;
563 
564 	switch (ring_type) {
565 	case WBM2SW_RELEASE:
566 		wbm2_sw_rx_rel_ring_id =
567 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
568 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
569 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
570 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
571 			ring_num = 0;
572 		} else if (ring_num == WBM2_SW_PPE_REL_RING_ID) {
573 			grp_mask = &cfg_ctx->int_ppeds_wbm_release_ring_mask[0];
574 			ring_num = 0;
575 		}  else { /* dp_tx_comp_handler - soc->tx_comp_ring */
576 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
577 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
578 								     ring_type,
579 								     ring_num);
580 			if (nf_irq_mask)
581 				nf_irq_enabled = true;
582 
583 			/*
584 			 * Using ring 4 as 4th tx completion ring since ring 3
585 			 * is Rx error ring
586 			 */
587 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
588 				ring_num = TXCOMP_RING4_NUM;
589 		}
590 	break;
591 
592 	case REO_EXCEPTION:
593 		/* dp_rx_err_process - &soc->reo_exception_ring */
594 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
595 	break;
596 
597 	case REO_DST:
598 		/* dp_rx_process - soc->reo_dest_ring */
599 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
600 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
601 							     ring_num);
602 		if (nf_irq_mask)
603 			nf_irq_enabled = true;
604 	break;
605 
606 	case REO_STATUS:
607 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
608 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
609 	break;
610 
611 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
612 	case RXDMA_MONITOR_STATUS:
613 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
614 	case RXDMA_MONITOR_DST:
615 		/* dp_mon_process */
616 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
617 	break;
618 	case TX_MONITOR_DST:
619 		/* dp_tx_mon_process */
620 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
621 	break;
622 	case RXDMA_DST:
623 		/* dp_rxdma_err_process */
624 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
625 	break;
626 
627 	case RXDMA_BUF:
628 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
629 	break;
630 
631 	case RXDMA_MONITOR_BUF:
632 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
633 	break;
634 
635 	case TX_MONITOR_BUF:
636 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
637 	break;
638 
639 	case REO2PPE:
640 		grp_mask = &soc->wlan_cfg_ctx->int_reo2ppe_ring_mask[0];
641 	break;
642 
643 	case PPE2TCL:
644 		grp_mask = &soc->wlan_cfg_ctx->int_ppe2tcl_ring_mask[0];
645 	break;
646 
647 	case TCL_DATA:
648 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
649 	case TCL_CMD_CREDIT:
650 	case REO_CMD:
651 	case SW2WBM_RELEASE:
652 	case WBM_IDLE_LINK:
653 		/* normally empty SW_TO_HW rings */
654 		return -QDF_STATUS_E_NOENT;
655 	break;
656 
657 	case TCL_STATUS:
658 	case REO_REINJECT:
659 		/* misc unused rings */
660 		return -QDF_STATUS_E_NOENT;
661 	break;
662 
663 	case CE_SRC:
664 	case CE_DST:
665 	case CE_DST_STATUS:
666 		/* CE_rings - currently handled by hif */
667 	default:
668 		return -QDF_STATUS_E_NOENT;
669 	break;
670 	}
671 
672 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
673 
674 	if (nf_irq_support && nf_irq_enabled) {
675 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
676 							    nf_irq_mask);
677 	}
678 
679 	return QDF_STATUS_SUCCESS;
680 }
681 
682 /**
683  * dp_get_num_msi_available()- API to get number of MSIs available
684  * @soc: DP soc Handle
685  * @interrupt_mode: Mode of interrupts
686  *
687  * Return: Number of MSIs available or 0 in case of integrated
688  */
689 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
690 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
691 {
692 	return 0;
693 }
694 #else
695 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
696 {
697 	int msi_data_count;
698 	int msi_data_start;
699 	int msi_irq_start;
700 	int ret;
701 
702 	if (interrupt_mode == DP_INTR_INTEGRATED) {
703 		return 0;
704 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
705 		   DP_INTR_POLL) {
706 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
707 						  &msi_data_count,
708 						  &msi_data_start,
709 						  &msi_irq_start);
710 		if (ret) {
711 			qdf_err("Unable to get DP MSI assignment %d",
712 				interrupt_mode);
713 			return -EINVAL;
714 		}
715 		return msi_data_count;
716 	}
717 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
718 	return -EINVAL;
719 }
720 #endif
721 
722 #if defined(IPA_OFFLOAD) && defined(IPA_WDI3_VLAN_SUPPORT)
723 static void
724 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
725 			   int ring_num)
726 {
727 	if (wlan_ipa_is_vlan_enabled()) {
728 		if ((ring_type == REO_DST) &&
729 				(ring_num == IPA_ALT_REO_DEST_RING_IDX)) {
730 			ring_params->msi_addr = 0;
731 			ring_params->msi_data = 0;
732 			ring_params->flags &= ~HAL_SRNG_MSI_INTR;
733 		}
734 	}
735 }
736 #else
737 static inline void
738 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
739 			   int ring_num)
740 {
741 }
742 #endif
743 
744 static void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng,
745 			      struct hal_srng_params *ring_params,
746 			      int ring_type, int ring_num)
747 {
748 	int reg_msi_grp_num;
749 	/*
750 	 * nf_msi_grp_num needs to be initialized with negative value,
751 	 * to avoid configuring near-full msi for WBM2SW3 ring
752 	 */
753 	int nf_msi_grp_num = -1;
754 	int msi_data_count;
755 	int ret;
756 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
757 	bool nf_irq_support;
758 	int vector;
759 
760 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
761 					  &msi_data_count, &msi_data_start,
762 					  &msi_irq_start);
763 
764 	if (ret)
765 		return;
766 
767 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
768 							     ring_type,
769 							     ring_num);
770 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
771 					  &reg_msi_grp_num,
772 					  nf_irq_support,
773 					  &nf_msi_grp_num);
774 	if (ret < 0) {
775 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
776 			     soc, ring_type, ring_num);
777 		ring_params->msi_addr = 0;
778 		ring_params->msi_data = 0;
779 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
780 		return;
781 	}
782 
783 	if (reg_msi_grp_num < 0) {
784 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
785 			     soc, ring_type, ring_num);
786 		ring_params->msi_addr = 0;
787 		ring_params->msi_data = 0;
788 		goto configure_msi2;
789 	}
790 
791 	if (dp_is_msi_group_number_invalid(soc, reg_msi_grp_num,
792 					   msi_data_count)) {
793 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
794 			     soc, reg_msi_grp_num);
795 		QDF_ASSERT(0);
796 	}
797 
798 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
799 
800 	ring_params->msi_addr = addr_low;
801 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
802 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
803 		+ msi_data_start;
804 	ring_params->flags |= HAL_SRNG_MSI_INTR;
805 
806 	dp_ipa_vlan_srng_msi_setup(ring_params, ring_type, ring_num);
807 
808 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
809 		 ring_type, ring_num, ring_params->msi_data,
810 		 (uint64_t)ring_params->msi_addr);
811 
812 	vector = msi_irq_start + (reg_msi_grp_num % msi_data_count);
813 
814 	/*
815 	 * During umac reset ppeds interrupts free is not called.
816 	 * Avoid registering interrupts again.
817 	 *
818 	 */
819 	if (dp_check_umac_reset_in_progress(soc))
820 		goto configure_msi2;
821 
822 	if (soc->arch_ops.dp_register_ppeds_interrupts)
823 		if (soc->arch_ops.dp_register_ppeds_interrupts(soc, srng,
824 							       vector,
825 							       ring_type,
826 							       ring_num))
827 			return;
828 
829 configure_msi2:
830 	if (!nf_irq_support) {
831 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
832 		return;
833 	}
834 
835 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
836 			   nf_msi_grp_num);
837 }
838 
839 /**
840  * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
841  * update threshold value from wlan_cfg_ctx
842  * @soc: device handle
843  * @ring_params: per ring specific parameters
844  * @ring_type: Ring type
845  * @ring_num: Ring number for a given ring type
846  * @num_entries: number of entries to fill
847  *
848  * Fill the ring params with the pointer update threshold
849  * configuration parameters available in wlan_cfg_ctx
850  *
851  * Return: None
852  */
853 static void
854 dp_srng_configure_pointer_update_thresholds(
855 				struct dp_soc *soc,
856 				struct hal_srng_params *ring_params,
857 				int ring_type, int ring_num,
858 				int num_entries)
859 {
860 	if (ring_type == REO_DST) {
861 		ring_params->pointer_timer_threshold =
862 			wlan_cfg_get_pointer_timer_threshold_rx(
863 						soc->wlan_cfg_ctx);
864 		ring_params->pointer_num_threshold =
865 			wlan_cfg_get_pointer_num_threshold_rx(
866 						soc->wlan_cfg_ctx);
867 	}
868 }
869 
870 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
871 /**
872  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
873  * threshold values from the wlan_srng_cfg table for each ring type
874  * @soc: device handle
875  * @ring_params: per ring specific parameters
876  * @ring_type: Ring type
877  * @ring_num: Ring number for a given ring type
878  * @num_entries: number of entries to fill
879  *
880  * Fill the ring params with the interrupt threshold
881  * configuration parameters available in the per ring type wlan_srng_cfg
882  * table.
883  *
884  * Return: None
885  */
886 static void
887 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
888 				       struct hal_srng_params *ring_params,
889 				       int ring_type, int ring_num,
890 				       int num_entries)
891 {
892 	uint8_t wbm2_sw_rx_rel_ring_id;
893 
894 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
895 
896 	if (ring_type == REO_DST) {
897 		ring_params->intr_timer_thres_us =
898 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
899 		ring_params->intr_batch_cntr_thres_entries =
900 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
901 	} else if (ring_type == WBM2SW_RELEASE &&
902 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
903 		ring_params->intr_timer_thres_us =
904 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
905 		ring_params->intr_batch_cntr_thres_entries =
906 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
907 	} else {
908 		ring_params->intr_timer_thres_us =
909 				soc->wlan_srng_cfg[ring_type].timer_threshold;
910 		ring_params->intr_batch_cntr_thres_entries =
911 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
912 	}
913 	ring_params->low_threshold =
914 			soc->wlan_srng_cfg[ring_type].low_threshold;
915 	if (ring_params->low_threshold)
916 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
917 
918 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
919 }
920 #else
921 static void
922 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
923 				       struct hal_srng_params *ring_params,
924 				       int ring_type, int ring_num,
925 				       int num_entries)
926 {
927 	uint8_t wbm2_sw_rx_rel_ring_id;
928 	bool rx_refill_lt_disable;
929 
930 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
931 
932 	if (ring_type == REO_DST || ring_type == REO2PPE) {
933 		ring_params->intr_timer_thres_us =
934 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
935 		ring_params->intr_batch_cntr_thres_entries =
936 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
937 	} else if (ring_type == WBM2SW_RELEASE &&
938 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
939 		   ring_num == WBM2SW_TXCOMP_RING4_NUM ||
940 		   ring_num == WBM2_SW_PPE_REL_RING_ID)) {
941 		ring_params->intr_timer_thres_us =
942 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
943 		ring_params->intr_batch_cntr_thres_entries =
944 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
945 	} else if (ring_type == RXDMA_BUF) {
946 		rx_refill_lt_disable =
947 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
948 							(soc->wlan_cfg_ctx);
949 		ring_params->intr_timer_thres_us =
950 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
951 
952 		if (!rx_refill_lt_disable) {
953 			ring_params->low_threshold = num_entries >> 3;
954 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
955 			ring_params->intr_batch_cntr_thres_entries = 0;
956 		}
957 	} else {
958 		ring_params->intr_timer_thres_us =
959 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
960 		ring_params->intr_batch_cntr_thres_entries =
961 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
962 	}
963 
964 	/* These rings donot require interrupt to host. Make them zero */
965 	switch (ring_type) {
966 	case REO_REINJECT:
967 	case REO_CMD:
968 	case TCL_DATA:
969 	case TCL_CMD_CREDIT:
970 	case TCL_STATUS:
971 	case WBM_IDLE_LINK:
972 	case SW2WBM_RELEASE:
973 	case SW2RXDMA_NEW:
974 		ring_params->intr_timer_thres_us = 0;
975 		ring_params->intr_batch_cntr_thres_entries = 0;
976 		break;
977 	case PPE2TCL:
978 		ring_params->intr_timer_thres_us =
979 			wlan_cfg_get_int_timer_threshold_ppe2tcl(soc->wlan_cfg_ctx);
980 		ring_params->intr_batch_cntr_thres_entries =
981 			wlan_cfg_get_int_batch_threshold_ppe2tcl(soc->wlan_cfg_ctx);
982 		break;
983 	}
984 
985 	/* Enable low threshold interrupts for rx buffer rings (regular and
986 	 * monitor buffer rings.
987 	 * TODO: See if this is required for any other ring
988 	 */
989 	if ((ring_type == RXDMA_MONITOR_BUF) ||
990 	    (ring_type == RXDMA_MONITOR_STATUS ||
991 	    (ring_type == TX_MONITOR_BUF))) {
992 		/* TODO: Setting low threshold to 1/8th of ring size
993 		 * see if this needs to be configurable
994 		 */
995 		ring_params->low_threshold = num_entries >> 3;
996 		ring_params->intr_timer_thres_us =
997 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
998 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
999 		ring_params->intr_batch_cntr_thres_entries = 0;
1000 	}
1001 
1002 	/* During initialisation monitor rings are only filled with
1003 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1004 	 * a value less than that. Low threshold value is reconfigured again
1005 	 * to 1/8th of the ring size when monitor vap is created.
1006 	 */
1007 	if (ring_type == RXDMA_MONITOR_BUF)
1008 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1009 
1010 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1011 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1012 	 * Keep batch threshold as 8 so that interrupt is received for
1013 	 * every 4 packets in MONITOR_STATUS ring
1014 	 */
1015 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1016 	    (soc->intr_mode == DP_INTR_MSI))
1017 		ring_params->intr_batch_cntr_thres_entries = 4;
1018 }
1019 #endif
1020 
1021 #ifdef DISABLE_MON_RING_MSI_CFG
1022 /**
1023  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
1024  * @soc: DP SoC context
1025  * @ring_type: sring type
1026  *
1027  * Return: True if msi cfg should be skipped for srng type else false
1028  */
1029 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1030 {
1031 	if (ring_type == RXDMA_MONITOR_STATUS)
1032 		return true;
1033 
1034 	return false;
1035 }
1036 #else
1037 #ifdef DP_CON_MON_MSI_ENABLED
1038 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1039 {
1040 	if (soc->cdp_soc.ol_ops->get_con_mode &&
1041 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
1042 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
1043 			return true;
1044 	} else if (ring_type == RXDMA_MONITOR_STATUS &&
1045 		  !wlan_cfg_get_local_pkt_capture(soc->wlan_cfg_ctx)) {
1046 		return true;
1047 	}
1048 
1049 	return false;
1050 }
1051 #else
1052 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
1053 {
1054 	return false;
1055 }
1056 #endif /* DP_CON_MON_MSI_ENABLED */
1057 #endif /* DISABLE_MON_RING_MSI_CFG */
1058 
1059 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
1060 			    int ring_type, int ring_num, int mac_id,
1061 			    uint32_t idx)
1062 {
1063 	bool idle_check;
1064 
1065 	hal_soc_handle_t hal_soc = soc->hal_soc;
1066 	struct hal_srng_params ring_params;
1067 
1068 	if (srng->hal_srng) {
1069 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
1070 			    soc, ring_type, ring_num);
1071 		return QDF_STATUS_SUCCESS;
1072 	}
1073 
1074 	/* memset the srng ring to zero */
1075 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1076 
1077 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
1078 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1079 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1080 
1081 	ring_params.num_entries = srng->num_entries;
1082 
1083 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1084 		ring_type, ring_num,
1085 		(void *)ring_params.ring_base_vaddr,
1086 		(void *)ring_params.ring_base_paddr,
1087 		ring_params.num_entries);
1088 
1089 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
1090 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
1091 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1092 				 ring_type, ring_num);
1093 	} else {
1094 		ring_params.msi_data = 0;
1095 		ring_params.msi_addr = 0;
1096 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
1097 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1098 				 ring_type, ring_num);
1099 	}
1100 
1101 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1102 					       ring_type, ring_num,
1103 					       srng->num_entries);
1104 
1105 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
1106 	dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
1107 						    ring_type, ring_num,
1108 						    srng->num_entries);
1109 
1110 	if (srng->cached)
1111 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1112 
1113 	idle_check = dp_check_umac_reset_in_progress(soc);
1114 
1115 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
1116 					    mac_id, &ring_params, idle_check,
1117 					    idx);
1118 
1119 	if (!srng->hal_srng) {
1120 		dp_srng_free(soc, srng);
1121 		return QDF_STATUS_E_FAILURE;
1122 	}
1123 
1124 	return QDF_STATUS_SUCCESS;
1125 }
1126 
1127 qdf_export_symbol(dp_srng_init_idx);
1128 
1129 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
1130 				     struct dp_intr *int_ctx,
1131 				     int mac_for_pdev,
1132 				     int total_budget)
1133 {
1134 	uint32_t target_type;
1135 
1136 	target_type = hal_get_target_type(soc->hal_soc);
1137 	if (target_type == TARGET_TYPE_QCN9160)
1138 		return dp_monitor_process(soc, int_ctx,
1139 					  mac_for_pdev, total_budget);
1140 	else
1141 		return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
1142 					    total_budget);
1143 }
1144 
1145 /**
1146  * dp_process_lmac_rings() - Process LMAC rings
1147  * @int_ctx: interrupt context
1148  * @total_budget: budget of work which can be done
1149  *
1150  * Return: work done
1151  */
1152 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
1153 {
1154 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1155 	struct dp_soc *soc = int_ctx->soc;
1156 	uint32_t remaining_quota = total_budget;
1157 	struct dp_pdev *pdev = NULL;
1158 	uint32_t work_done  = 0;
1159 	int budget = total_budget;
1160 	int ring = 0;
1161 	bool rx_refill_lt_disable;
1162 
1163 	rx_refill_lt_disable =
1164 		wlan_cfg_get_dp_soc_rxdma_refill_lt_disable(soc->wlan_cfg_ctx);
1165 
1166 	/* Process LMAC interrupts */
1167 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1168 		int mac_for_pdev = ring;
1169 
1170 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1171 		if (!pdev)
1172 			continue;
1173 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1174 			work_done = dp_monitor_process(soc, int_ctx,
1175 						       mac_for_pdev,
1176 						       remaining_quota);
1177 			if (work_done)
1178 				intr_stats->num_rx_mon_ring_masks++;
1179 			budget -= work_done;
1180 			if (budget <= 0)
1181 				goto budget_done;
1182 			remaining_quota = budget;
1183 		}
1184 
1185 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
1186 			work_done = dp_tx_mon_process(soc, int_ctx,
1187 						      mac_for_pdev,
1188 						      remaining_quota);
1189 			if (work_done)
1190 				intr_stats->num_tx_mon_ring_masks++;
1191 			budget -= work_done;
1192 			if (budget <= 0)
1193 				goto budget_done;
1194 			remaining_quota = budget;
1195 		}
1196 
1197 		if (int_ctx->rxdma2host_ring_mask &
1198 				(1 << mac_for_pdev)) {
1199 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
1200 							      mac_for_pdev,
1201 							      remaining_quota);
1202 			if (work_done)
1203 				intr_stats->num_rxdma2host_ring_masks++;
1204 			budget -=  work_done;
1205 			if (budget <= 0)
1206 				goto budget_done;
1207 			remaining_quota = budget;
1208 		}
1209 
1210 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
1211 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1212 			union dp_rx_desc_list_elem_t *tail = NULL;
1213 			struct dp_srng *rx_refill_buf_ring;
1214 			struct rx_desc_pool *rx_desc_pool;
1215 
1216 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
1217 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1218 				rx_refill_buf_ring =
1219 					&soc->rx_refill_buf_ring[mac_for_pdev];
1220 			else
1221 				rx_refill_buf_ring =
1222 					&soc->rx_refill_buf_ring[pdev->lmac_id];
1223 
1224 			intr_stats->num_host2rxdma_ring_masks++;
1225 
1226 			if (!rx_refill_lt_disable)
1227 				dp_rx_buffers_lt_replenish_simple(soc,
1228 							  mac_for_pdev,
1229 							  rx_refill_buf_ring,
1230 							  rx_desc_pool,
1231 							  0,
1232 							  &desc_list,
1233 							  &tail);
1234 		}
1235 	}
1236 
1237 	if (int_ctx->host2rxdma_mon_ring_mask)
1238 		dp_rx_mon_buf_refill(int_ctx);
1239 
1240 	if (int_ctx->host2txmon_ring_mask)
1241 		dp_tx_mon_buf_refill(int_ctx);
1242 
1243 budget_done:
1244 	return total_budget - budget;
1245 }
1246 
1247 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1248 /**
1249  * dp_service_near_full_srngs() - Bottom half handler to process the near
1250  *				full IRQ on a SRNG
1251  * @dp_ctx: Datapath SoC handle
1252  * @dp_budget: Number of SRNGs which can be processed in a single attempt
1253  *		without rescheduling
1254  * @cpu: cpu id
1255  *
1256  * Return: remaining budget/quota for the soc device
1257  */
1258 static
1259 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1260 {
1261 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1262 	struct dp_soc *soc = int_ctx->soc;
1263 
1264 	/*
1265 	 * dp_service_near_full_srngs arch ops should be initialized always
1266 	 * if the NEAR FULL IRQ feature is enabled.
1267 	 */
1268 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
1269 							dp_budget);
1270 }
1271 #endif
1272 
1273 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1274 
1275 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1276 {
1277 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1278 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1279 	struct dp_soc *soc = int_ctx->soc;
1280 	int ring = 0;
1281 	int index;
1282 	uint32_t work_done  = 0;
1283 	int budget = dp_budget;
1284 	uint32_t remaining_quota = dp_budget;
1285 	uint8_t tx_mask = 0;
1286 	uint8_t rx_mask = 0;
1287 	uint8_t rx_err_mask = 0;
1288 	uint8_t rx_wbm_rel_mask = 0;
1289 	uint8_t reo_status_mask = 0;
1290 
1291 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
1292 
1293 	tx_mask = int_ctx->tx_ring_mask;
1294 	rx_mask = int_ctx->rx_ring_mask;
1295 	rx_err_mask = int_ctx->rx_err_ring_mask;
1296 	rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1297 	reo_status_mask = int_ctx->reo_status_ring_mask;
1298 
1299 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x",
1300 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1301 			 reo_status_mask,
1302 			 int_ctx->rx_mon_ring_mask,
1303 			 int_ctx->host2rxdma_ring_mask,
1304 			 int_ctx->rxdma2host_ring_mask);
1305 
1306 	/* Process Tx completion interrupts first to return back buffers */
1307 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
1308 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
1309 			continue;
1310 		work_done = dp_tx_comp_handler(int_ctx,
1311 					       soc,
1312 					       soc->tx_comp_ring[index].hal_srng,
1313 					       index, remaining_quota);
1314 		if (work_done) {
1315 			intr_stats->num_tx_ring_masks[index]++;
1316 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
1317 					 tx_mask, index, budget,
1318 					 work_done);
1319 		}
1320 		budget -= work_done;
1321 		if (budget <= 0)
1322 			goto budget_done;
1323 
1324 		remaining_quota = budget;
1325 	}
1326 
1327 	/* Process REO Exception ring interrupt */
1328 	if (rx_err_mask) {
1329 		work_done = dp_rx_err_process(int_ctx, soc,
1330 					      soc->reo_exception_ring.hal_srng,
1331 					      remaining_quota);
1332 
1333 		if (work_done) {
1334 			intr_stats->num_rx_err_ring_masks++;
1335 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1336 					 work_done, budget);
1337 		}
1338 
1339 		budget -=  work_done;
1340 		if (budget <= 0) {
1341 			goto budget_done;
1342 		}
1343 		remaining_quota = budget;
1344 	}
1345 
1346 	/* Process Rx WBM release ring interrupt */
1347 	if (rx_wbm_rel_mask) {
1348 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1349 						  soc->rx_rel_ring.hal_srng,
1350 						  remaining_quota);
1351 
1352 		if (work_done) {
1353 			intr_stats->num_rx_wbm_rel_ring_masks++;
1354 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1355 					 work_done, budget);
1356 		}
1357 
1358 		budget -=  work_done;
1359 		if (budget <= 0) {
1360 			goto budget_done;
1361 		}
1362 		remaining_quota = budget;
1363 	}
1364 
1365 	/* Process Rx interrupts */
1366 	if (rx_mask) {
1367 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1368 			if (!(rx_mask & (1 << ring)))
1369 				continue;
1370 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
1371 						  soc->reo_dest_ring[ring].hal_srng,
1372 						  ring,
1373 						  remaining_quota);
1374 			if (work_done) {
1375 				intr_stats->num_rx_ring_masks[ring]++;
1376 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1377 						 rx_mask, ring,
1378 						 work_done, budget);
1379 				budget -=  work_done;
1380 				if (budget <= 0)
1381 					goto budget_done;
1382 				remaining_quota = budget;
1383 			}
1384 		}
1385 	}
1386 
1387 	if (reo_status_mask) {
1388 		if (dp_reo_status_ring_handler(int_ctx, soc))
1389 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1390 	}
1391 
1392 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
1393 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1394 		if (work_done) {
1395 			budget -=  work_done;
1396 			if (budget <= 0)
1397 				goto budget_done;
1398 			remaining_quota = budget;
1399 		}
1400 	}
1401 
1402 	qdf_lro_flush(int_ctx->lro_ctx);
1403 	intr_stats->num_masks++;
1404 
1405 budget_done:
1406 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
1407 
1408 	if (soc->notify_fw_callback)
1409 		soc->notify_fw_callback(soc);
1410 
1411 	return dp_budget - budget;
1412 }
1413 
1414 #else /* QCA_HOST_MODE_WIFI_DISABLED */
1415 
1416 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
1417 {
1418 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1419 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1420 	struct dp_soc *soc = int_ctx->soc;
1421 	uint32_t remaining_quota = dp_budget;
1422 	uint32_t work_done  = 0;
1423 	int budget = dp_budget;
1424 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1425 
1426 	if (reo_status_mask) {
1427 		if (dp_reo_status_ring_handler(int_ctx, soc))
1428 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1429 	}
1430 
1431 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
1432 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1433 		if (work_done) {
1434 			budget -=  work_done;
1435 			if (budget <= 0)
1436 				goto budget_done;
1437 			remaining_quota = budget;
1438 		}
1439 	}
1440 
1441 	qdf_lro_flush(int_ctx->lro_ctx);
1442 	intr_stats->num_masks++;
1443 
1444 budget_done:
1445 	return dp_budget - budget;
1446 }
1447 
1448 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1449 
1450 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1451 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1452 					struct dp_intr *intr_ctx)
1453 {
1454 	if (intr_ctx->rx_mon_ring_mask)
1455 		return true;
1456 
1457 	return false;
1458 }
1459 #else
1460 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1461 					struct dp_intr *intr_ctx)
1462 {
1463 	return false;
1464 }
1465 #endif
1466 
1467 QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1468 {
1469 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1470 	int i;
1471 	int lmac_id = 0;
1472 
1473 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1474 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
1475 	soc->intr_mode = DP_INTR_POLL;
1476 
1477 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1478 		soc->intr_ctx[i].dp_intr_id = i;
1479 		soc->intr_ctx[i].tx_ring_mask =
1480 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1481 		soc->intr_ctx[i].rx_ring_mask =
1482 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1483 		soc->intr_ctx[i].rx_mon_ring_mask =
1484 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1485 		soc->intr_ctx[i].rx_err_ring_mask =
1486 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1487 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1488 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1489 		soc->intr_ctx[i].reo_status_ring_mask =
1490 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1491 		soc->intr_ctx[i].rxdma2host_ring_mask =
1492 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1493 		soc->intr_ctx[i].soc = soc;
1494 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1495 
1496 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
1497 			hif_event_history_init(soc->hif_handle, i);
1498 			soc->mon_intr_id_lmac_map[lmac_id] = i;
1499 			lmac_id++;
1500 		}
1501 	}
1502 
1503 	qdf_timer_init(soc->osdev, &soc->int_timer,
1504 		       dp_interrupt_timer, (void *)soc,
1505 		       QDF_TIMER_TYPE_WAKE_APPS);
1506 
1507 	return QDF_STATUS_SUCCESS;
1508 }
1509 
1510 void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1511 {
1512 	uint32_t msi_base_data, msi_vector_start;
1513 	int msi_vector_count, ret;
1514 
1515 	soc->intr_mode = DP_INTR_INTEGRATED;
1516 
1517 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1518 	    (dp_is_monitor_mode_using_poll(soc) &&
1519 	     soc->cdp_soc.ol_ops->get_con_mode &&
1520 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1521 		soc->intr_mode = DP_INTR_POLL;
1522 	} else {
1523 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1524 						  &msi_vector_count,
1525 						  &msi_base_data,
1526 						  &msi_vector_start);
1527 		if (ret)
1528 			return;
1529 
1530 		soc->intr_mode = DP_INTR_MSI;
1531 	}
1532 }
1533 
1534 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
1535 /**
1536  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy() -
1537  * Calculate interrupt map for legacy interrupts
1538  * @soc: DP soc handle
1539  * @intr_ctx_num: Interrupt context number
1540  * @irq_id_map: IRQ map
1541  * @num_irq_r: Number of interrupts assigned for this context
1542  *
1543  * Return: void
1544  */
1545 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
1546 							    int intr_ctx_num,
1547 							    int *irq_id_map,
1548 							    int *num_irq_r)
1549 {
1550 	int j;
1551 	int num_irq = 0;
1552 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1553 					soc->wlan_cfg_ctx, intr_ctx_num);
1554 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1555 					soc->wlan_cfg_ctx, intr_ctx_num);
1556 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1557 					soc->wlan_cfg_ctx, intr_ctx_num);
1558 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1559 					soc->wlan_cfg_ctx, intr_ctx_num);
1560 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1561 					soc->wlan_cfg_ctx, intr_ctx_num);
1562 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1563 					soc->wlan_cfg_ctx, intr_ctx_num);
1564 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1565 					soc->wlan_cfg_ctx, intr_ctx_num);
1566 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1567 					soc->wlan_cfg_ctx, intr_ctx_num);
1568 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1569 					soc->wlan_cfg_ctx, intr_ctx_num);
1570 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
1571 					soc->wlan_cfg_ctx, intr_ctx_num);
1572 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
1573 					soc->wlan_cfg_ctx, intr_ctx_num);
1574 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
1575 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1576 		if (tx_mask & (1 << j))
1577 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
1578 		if (rx_mask & (1 << j))
1579 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
1580 		if (rx_mon_mask & (1 << j))
1581 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
1582 		if (rx_err_ring_mask & (1 << j))
1583 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
1584 		if (rx_wbm_rel_ring_mask & (1 << j))
1585 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
1586 		if (reo_status_ring_mask & (1 << j))
1587 			irq_id_map[num_irq++] = (reo_status - j);
1588 		if (rxdma2host_ring_mask & (1 << j))
1589 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
1590 		if (host2rxdma_ring_mask & (1 << j))
1591 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
1592 		if (host2rxdma_mon_ring_mask & (1 << j))
1593 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
1594 		if (host2txmon_ring_mask & (1 << j))
1595 			irq_id_map[num_irq++] = sw2txmon_src_ring;
1596 		if (txmon2host_mon_ring_mask & (1 << j))
1597 			irq_id_map[num_irq++] = (txmon2sw_p0_dest0 - j);
1598 	}
1599 	*num_irq_r = num_irq;
1600 }
1601 #else
1602 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
1603 							    int intr_ctx_num,
1604 							    int *irq_id_map,
1605 							    int *num_irq_r)
1606 {
1607 }
1608 #endif
1609 
1610 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1611 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1612 {
1613 	int j;
1614 	int num_irq = 0;
1615 
1616 	int tx_mask =
1617 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1618 	int rx_mask =
1619 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1620 	int rx_mon_mask =
1621 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1622 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1623 					soc->wlan_cfg_ctx, intr_ctx_num);
1624 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1625 					soc->wlan_cfg_ctx, intr_ctx_num);
1626 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1627 					soc->wlan_cfg_ctx, intr_ctx_num);
1628 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1629 					soc->wlan_cfg_ctx, intr_ctx_num);
1630 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1631 					soc->wlan_cfg_ctx, intr_ctx_num);
1632 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1633 					soc->wlan_cfg_ctx, intr_ctx_num);
1634 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
1635 					soc->wlan_cfg_ctx, intr_ctx_num);
1636 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
1637 					soc->wlan_cfg_ctx, intr_ctx_num);
1638 
1639 	soc->intr_mode = DP_INTR_INTEGRATED;
1640 
1641 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1642 
1643 		if (tx_mask & (1 << j)) {
1644 			irq_id_map[num_irq++] =
1645 				(wbm2host_tx_completions_ring1 - j);
1646 		}
1647 
1648 		if (rx_mask & (1 << j)) {
1649 			irq_id_map[num_irq++] =
1650 				(reo2host_destination_ring1 - j);
1651 		}
1652 
1653 		if (rxdma2host_ring_mask & (1 << j)) {
1654 			irq_id_map[num_irq++] =
1655 				rxdma2host_destination_ring_mac1 - j;
1656 		}
1657 
1658 		if (host2rxdma_ring_mask & (1 << j)) {
1659 			irq_id_map[num_irq++] =
1660 				host2rxdma_host_buf_ring_mac1 -	j;
1661 		}
1662 
1663 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1664 			irq_id_map[num_irq++] =
1665 				host2rxdma_monitor_ring1 - j;
1666 		}
1667 
1668 		if (rx_mon_mask & (1 << j)) {
1669 			irq_id_map[num_irq++] =
1670 				ppdu_end_interrupts_mac1 - j;
1671 			irq_id_map[num_irq++] =
1672 				rxdma2host_monitor_status_ring_mac1 - j;
1673 			irq_id_map[num_irq++] =
1674 				rxdma2host_monitor_destination_mac1 - j;
1675 		}
1676 
1677 		if (rx_wbm_rel_ring_mask & (1 << j))
1678 			irq_id_map[num_irq++] = wbm2host_rx_release;
1679 
1680 		if (rx_err_ring_mask & (1 << j))
1681 			irq_id_map[num_irq++] = reo2host_exception;
1682 
1683 		if (reo_status_ring_mask & (1 << j))
1684 			irq_id_map[num_irq++] = reo2host_status;
1685 
1686 		if (host2txmon_ring_mask & (1 << j))
1687 			irq_id_map[num_irq++] = host2tx_monitor_ring1;
1688 
1689 		if (txmon2host_mon_ring_mask & (1 << j)) {
1690 			irq_id_map[num_irq++] =
1691 				(txmon2host_monitor_destination_mac1 - j);
1692 		}
1693 
1694 	}
1695 	*num_irq_r = num_irq;
1696 }
1697 
1698 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1699 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1700 		int msi_vector_count, int msi_vector_start)
1701 {
1702 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1703 					soc->wlan_cfg_ctx, intr_ctx_num);
1704 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1705 					soc->wlan_cfg_ctx, intr_ctx_num);
1706 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1707 					soc->wlan_cfg_ctx, intr_ctx_num);
1708 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
1709 					soc->wlan_cfg_ctx, intr_ctx_num);
1710 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1711 					soc->wlan_cfg_ctx, intr_ctx_num);
1712 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1713 					soc->wlan_cfg_ctx, intr_ctx_num);
1714 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1715 					soc->wlan_cfg_ctx, intr_ctx_num);
1716 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1717 					soc->wlan_cfg_ctx, intr_ctx_num);
1718 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1719 					soc->wlan_cfg_ctx, intr_ctx_num);
1720 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1721 					soc->wlan_cfg_ctx, intr_ctx_num);
1722 	int rx_near_full_grp_1_mask =
1723 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
1724 						     intr_ctx_num);
1725 	int rx_near_full_grp_2_mask =
1726 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
1727 						     intr_ctx_num);
1728 	int tx_ring_near_full_mask =
1729 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
1730 						    intr_ctx_num);
1731 
1732 	int host2txmon_ring_mask =
1733 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
1734 						  intr_ctx_num);
1735 	unsigned int vector =
1736 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1737 	int num_irq = 0;
1738 
1739 	soc->intr_mode = DP_INTR_MSI;
1740 
1741 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
1742 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
1743 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
1744 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
1745 	    tx_ring_near_full_mask | host2txmon_ring_mask)
1746 		irq_id_map[num_irq++] =
1747 			pld_get_msi_irq(soc->osdev->dev, vector);
1748 
1749 	*num_irq_r = num_irq;
1750 }
1751 
1752 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1753 				    int *irq_id_map, int *num_irq)
1754 {
1755 	int msi_vector_count, ret;
1756 	uint32_t msi_base_data, msi_vector_start;
1757 
1758 	if (pld_get_enable_intx(soc->osdev->dev)) {
1759 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
1760 				intr_ctx_num, irq_id_map, num_irq);
1761 	}
1762 
1763 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1764 					  &msi_vector_count,
1765 					  &msi_base_data,
1766 					  &msi_vector_start);
1767 	if (ret)
1768 		return dp_soc_interrupt_map_calculate_integrated(soc,
1769 				intr_ctx_num, irq_id_map, num_irq);
1770 
1771 	else
1772 		dp_soc_interrupt_map_calculate_msi(soc,
1773 				intr_ctx_num, irq_id_map, num_irq,
1774 				msi_vector_count, msi_vector_start);
1775 }
1776 
1777 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1778 /**
1779  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
1780  * @soc: DP soc handle
1781  * @num_irq: IRQ number
1782  * @irq_id_map: IRQ map
1783  * @intr_id: interrupt context ID
1784  *
1785  * Return: 0 for success. nonzero for failure.
1786  */
1787 static inline int
1788 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
1789 				  int irq_id_map[], int intr_id)
1790 {
1791 	return hif_register_ext_group(soc->hif_handle,
1792 				      num_irq, irq_id_map,
1793 				      dp_service_near_full_srngs,
1794 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
1795 				      HIF_EXEC_NAPI_TYPE,
1796 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1797 }
1798 #else
1799 static inline int
1800 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
1801 				  int *irq_id_map, int intr_id)
1802 {
1803 	return 0;
1804 }
1805 #endif
1806 
1807 #ifdef DP_CON_MON_MSI_SKIP_SET
1808 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
1809 {
1810 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
1811 		 QDF_GLOBAL_MONITOR_MODE &&
1812 		 !wlan_cfg_get_local_pkt_capture(soc->wlan_cfg_ctx));
1813 }
1814 #else
1815 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
1816 {
1817 	return false;
1818 }
1819 #endif
1820 
1821 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
1822 {
1823 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1824 	int i;
1825 
1826 	if (soc->intr_mode == DP_INTR_POLL) {
1827 		qdf_timer_free(&soc->int_timer);
1828 	} else {
1829 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
1830 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1831 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
1832 	}
1833 
1834 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1835 		soc->intr_ctx[i].tx_ring_mask = 0;
1836 		soc->intr_ctx[i].rx_ring_mask = 0;
1837 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1838 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1839 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1840 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1841 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1842 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1843 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1844 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
1845 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
1846 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
1847 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
1848 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
1849 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
1850 
1851 		hif_event_history_deinit(soc->hif_handle, i);
1852 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1853 	}
1854 
1855 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1856 		    sizeof(soc->mon_intr_id_lmac_map),
1857 		    DP_MON_INVALID_LMAC_ID);
1858 }
1859 
1860 QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
1861 {
1862 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1863 
1864 	int i = 0;
1865 	int num_irq = 0;
1866 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
1867 	int lmac_id = 0;
1868 	int napi_scale;
1869 
1870 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1871 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
1872 
1873 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1874 		int ret = 0;
1875 
1876 		/* Map of IRQ ids registered with one interrupt context */
1877 		int irq_id_map[HIF_MAX_GRP_IRQ];
1878 
1879 		int tx_mask =
1880 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1881 		int rx_mask =
1882 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1883 		int rx_mon_mask =
1884 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1885 		int tx_mon_ring_mask =
1886 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1887 		int rx_err_ring_mask =
1888 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1889 		int rx_wbm_rel_ring_mask =
1890 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1891 		int reo_status_ring_mask =
1892 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1893 		int rxdma2host_ring_mask =
1894 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1895 		int host2rxdma_ring_mask =
1896 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1897 		int host2rxdma_mon_ring_mask =
1898 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1899 				soc->wlan_cfg_ctx, i);
1900 		int rx_near_full_grp_1_mask =
1901 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
1902 							     i);
1903 		int rx_near_full_grp_2_mask =
1904 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
1905 							     i);
1906 		int tx_ring_near_full_mask =
1907 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
1908 							    i);
1909 		int host2txmon_ring_mask =
1910 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
1911 		int umac_reset_intr_mask =
1912 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
1913 
1914 		if (dp_skip_rx_mon_ring_mask_set(soc))
1915 			rx_mon_mask = 0;
1916 
1917 		soc->intr_ctx[i].dp_intr_id = i;
1918 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1919 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1920 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1921 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1922 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1923 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1924 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1925 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1926 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1927 			 host2rxdma_mon_ring_mask;
1928 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
1929 						rx_near_full_grp_1_mask;
1930 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
1931 						rx_near_full_grp_2_mask;
1932 		soc->intr_ctx[i].tx_ring_near_full_mask =
1933 						tx_ring_near_full_mask;
1934 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
1935 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
1936 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
1937 
1938 		soc->intr_ctx[i].soc = soc;
1939 
1940 		num_irq = 0;
1941 
1942 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1943 					       &num_irq);
1944 
1945 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
1946 		    tx_ring_near_full_mask) {
1947 			dp_soc_near_full_interrupt_attach(soc, num_irq,
1948 							  irq_id_map, i);
1949 		} else {
1950 			napi_scale = wlan_cfg_get_napi_scale_factor(
1951 							    soc->wlan_cfg_ctx);
1952 			if (!napi_scale)
1953 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
1954 
1955 			ret = hif_register_ext_group(soc->hif_handle,
1956 				num_irq, irq_id_map, dp_service_srngs,
1957 				&soc->intr_ctx[i], "dp_intr",
1958 				HIF_EXEC_NAPI_TYPE, napi_scale);
1959 		}
1960 
1961 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
1962 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
1963 
1964 		if (ret) {
1965 			dp_init_err("%pK: failed, ret = %d", soc, ret);
1966 			dp_soc_interrupt_detach(txrx_soc);
1967 			return QDF_STATUS_E_FAILURE;
1968 		}
1969 
1970 		hif_event_history_init(soc->hif_handle, i);
1971 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1972 
1973 		if (rx_err_ring_mask)
1974 			rx_err_ring_intr_ctxt_id = i;
1975 
1976 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
1977 			soc->mon_intr_id_lmac_map[lmac_id] = i;
1978 			lmac_id++;
1979 		}
1980 	}
1981 
1982 	hif_configure_ext_group_interrupts(soc->hif_handle);
1983 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
1984 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
1985 						  rx_err_ring_intr_ctxt_id, 0);
1986 
1987 	return QDF_STATUS_SUCCESS;
1988 }
1989 
1990 #define AVG_MAX_MPDUS_PER_TID 128
1991 #define AVG_TIDS_PER_CLIENT 2
1992 #define AVG_FLOWS_PER_TID 2
1993 #define AVG_MSDUS_PER_FLOW 128
1994 #define AVG_MSDUS_PER_MPDU 4
1995 
1996 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
1997 {
1998 	struct qdf_mem_multi_page_t *pages;
1999 
2000 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2001 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
2002 	} else {
2003 		pages = &soc->link_desc_pages;
2004 	}
2005 
2006 	if (!pages) {
2007 		dp_err("can not get link desc pages");
2008 		QDF_ASSERT(0);
2009 		return;
2010 	}
2011 
2012 	if (pages->dma_pages) {
2013 		wlan_minidump_remove((void *)
2014 				     pages->dma_pages->page_v_addr_start,
2015 				     pages->num_pages * pages->page_size,
2016 				     soc->ctrl_psoc,
2017 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2018 				     "hw_link_desc_bank");
2019 		dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE,
2020 					     pages, 0, false);
2021 	}
2022 }
2023 
2024 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
2025 
2026 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2027 {
2028 	hal_soc_handle_t hal_soc = soc->hal_soc;
2029 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2030 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2031 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2032 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2033 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2034 	uint32_t num_mpdu_links_per_queue_desc =
2035 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2036 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2037 	uint32_t *total_link_descs, total_mem_size;
2038 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2039 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2040 	uint32_t num_entries;
2041 	struct qdf_mem_multi_page_t *pages;
2042 	struct dp_srng *dp_srng;
2043 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2044 
2045 	/* Only Tx queue descriptors are allocated from common link descriptor
2046 	 * pool Rx queue descriptors are not included in this because (REO queue
2047 	 * extension descriptors) they are expected to be allocated contiguously
2048 	 * with REO queue descriptors
2049 	 */
2050 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2051 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
2052 		/* dp_monitor_get_link_desc_pages returns NULL only
2053 		 * if monitor SOC is  NULL
2054 		 */
2055 		if (!pages) {
2056 			dp_err("can not get link desc pages");
2057 			QDF_ASSERT(0);
2058 			return QDF_STATUS_E_FAULT;
2059 		}
2060 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2061 		num_entries = dp_srng->alloc_size /
2062 			hal_srng_get_entrysize(soc->hal_soc,
2063 					       RXDMA_MONITOR_DESC);
2064 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
2065 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2066 			      MINIDUMP_STR_SIZE);
2067 	} else {
2068 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2069 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2070 
2071 		num_mpdu_queue_descs = num_mpdu_link_descs /
2072 			num_mpdu_links_per_queue_desc;
2073 
2074 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2075 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2076 			num_msdus_per_link_desc;
2077 
2078 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2079 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2080 
2081 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2082 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2083 
2084 		pages = &soc->link_desc_pages;
2085 		total_link_descs = &soc->total_link_descs;
2086 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2087 			      MINIDUMP_STR_SIZE);
2088 	}
2089 
2090 	/* If link descriptor banks are allocated, return from here */
2091 	if (pages->num_pages)
2092 		return QDF_STATUS_SUCCESS;
2093 
2094 	/* Round up to power of 2 */
2095 	*total_link_descs = 1;
2096 	while (*total_link_descs < num_entries)
2097 		*total_link_descs <<= 1;
2098 
2099 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
2100 		     soc, *total_link_descs, link_desc_size);
2101 	total_mem_size =  *total_link_descs * link_desc_size;
2102 	total_mem_size += link_desc_align;
2103 
2104 	dp_init_info("%pK: total_mem_size: %d",
2105 		     soc, total_mem_size);
2106 
2107 	dp_set_max_page_size(pages, max_alloc_size);
2108 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE,
2109 				      pages,
2110 				      link_desc_size,
2111 				      *total_link_descs,
2112 				      0, false);
2113 	if (!pages->num_pages) {
2114 		dp_err("Multi page alloc fail for hw link desc pool");
2115 		return QDF_STATUS_E_FAULT;
2116 	}
2117 
2118 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
2119 			  pages->num_pages * pages->page_size,
2120 			  soc->ctrl_psoc,
2121 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2122 			  "hw_link_desc_bank");
2123 
2124 	return QDF_STATUS_SUCCESS;
2125 }
2126 
2127 void dp_hw_link_desc_ring_free(struct dp_soc *soc)
2128 {
2129 	uint32_t i;
2130 	uint32_t size = soc->wbm_idle_scatter_buf_size;
2131 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
2132 	qdf_dma_addr_t paddr;
2133 
2134 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
2135 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2136 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2137 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2138 			if (vaddr) {
2139 				qdf_mem_free_consistent(soc->osdev,
2140 							soc->osdev->dev,
2141 							size,
2142 							vaddr,
2143 							paddr,
2144 							0);
2145 				vaddr = NULL;
2146 			}
2147 		}
2148 	} else {
2149 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2150 				     soc->wbm_idle_link_ring.alloc_size,
2151 				     soc->ctrl_psoc,
2152 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2153 				     "wbm_idle_link_ring");
2154 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
2155 	}
2156 }
2157 
2158 QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
2159 {
2160 	uint32_t entry_size, i;
2161 	uint32_t total_mem_size;
2162 	qdf_dma_addr_t *baseaddr = NULL;
2163 	struct dp_srng *dp_srng;
2164 	uint32_t ring_type;
2165 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2166 	uint32_t tlds;
2167 
2168 	ring_type = WBM_IDLE_LINK;
2169 	dp_srng = &soc->wbm_idle_link_ring;
2170 	tlds = soc->total_link_descs;
2171 
2172 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
2173 	total_mem_size = entry_size * tlds;
2174 
2175 	if (total_mem_size <= max_alloc_size) {
2176 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
2177 			dp_init_err("%pK: Link desc idle ring setup failed",
2178 				    soc);
2179 			goto fail;
2180 		}
2181 
2182 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2183 				  soc->wbm_idle_link_ring.alloc_size,
2184 				  soc->ctrl_psoc,
2185 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2186 				  "wbm_idle_link_ring");
2187 	} else {
2188 		uint32_t num_scatter_bufs;
2189 		uint32_t buf_size = 0;
2190 
2191 		soc->wbm_idle_scatter_buf_size =
2192 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2193 		hal_idle_scatter_buf_num_entries(
2194 					soc->hal_soc,
2195 					soc->wbm_idle_scatter_buf_size);
2196 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2197 					soc->hal_soc, total_mem_size,
2198 					soc->wbm_idle_scatter_buf_size);
2199 
2200 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2201 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2202 				  FL("scatter bufs size out of bounds"));
2203 			goto fail;
2204 		}
2205 
2206 		for (i = 0; i < num_scatter_bufs; i++) {
2207 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2208 			buf_size = soc->wbm_idle_scatter_buf_size;
2209 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
2210 				qdf_mem_alloc_consistent(soc->osdev,
2211 							 soc->osdev->dev,
2212 							 buf_size,
2213 							 baseaddr);
2214 
2215 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2216 				QDF_TRACE(QDF_MODULE_ID_DP,
2217 					  QDF_TRACE_LEVEL_ERROR,
2218 					  FL("Scatter lst memory alloc fail"));
2219 				goto fail;
2220 			}
2221 		}
2222 		soc->num_scatter_bufs = num_scatter_bufs;
2223 	}
2224 	return QDF_STATUS_SUCCESS;
2225 
2226 fail:
2227 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2228 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2229 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2230 
2231 		if (vaddr) {
2232 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2233 						soc->wbm_idle_scatter_buf_size,
2234 						vaddr,
2235 						paddr, 0);
2236 			vaddr = NULL;
2237 		}
2238 	}
2239 	return QDF_STATUS_E_NOMEM;
2240 }
2241 
2242 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
2243 
2244 QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
2245 {
2246 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
2247 
2248 	if (dp_srng->base_vaddr_unaligned) {
2249 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
2250 			return QDF_STATUS_E_FAILURE;
2251 	}
2252 	return QDF_STATUS_SUCCESS;
2253 }
2254 
2255 void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
2256 {
2257 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
2258 }
2259 
2260 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
2261 {
2262 	uint32_t cookie = 0;
2263 	uint32_t page_idx = 0;
2264 	struct qdf_mem_multi_page_t *pages;
2265 	struct qdf_mem_dma_page_t *dma_pages;
2266 	uint32_t offset = 0;
2267 	uint32_t count = 0;
2268 	uint32_t desc_id = 0;
2269 	void *desc_srng;
2270 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2271 	uint32_t *total_link_descs_addr;
2272 	uint32_t total_link_descs;
2273 	uint32_t scatter_buf_num;
2274 	uint32_t num_entries_per_buf = 0;
2275 	uint32_t rem_entries;
2276 	uint32_t num_descs_per_page;
2277 	uint32_t num_scatter_bufs = 0;
2278 	uint8_t *scatter_buf_ptr;
2279 	void *desc;
2280 
2281 	num_scatter_bufs = soc->num_scatter_bufs;
2282 
2283 	if (mac_id == WLAN_INVALID_PDEV_ID) {
2284 		pages = &soc->link_desc_pages;
2285 		total_link_descs = soc->total_link_descs;
2286 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
2287 	} else {
2288 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
2289 		/* dp_monitor_get_link_desc_pages returns NULL only
2290 		 * if monitor SOC is  NULL
2291 		 */
2292 		if (!pages) {
2293 			dp_err("can not get link desc pages");
2294 			QDF_ASSERT(0);
2295 			return;
2296 		}
2297 		total_link_descs_addr =
2298 				dp_monitor_get_total_link_descs(soc, mac_id);
2299 		total_link_descs = *total_link_descs_addr;
2300 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
2301 	}
2302 
2303 	dma_pages = pages->dma_pages;
2304 	do {
2305 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
2306 			     pages->page_size);
2307 		page_idx++;
2308 	} while (page_idx < pages->num_pages);
2309 
2310 	if (desc_srng) {
2311 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
2312 		page_idx = 0;
2313 		count = 0;
2314 		offset = 0;
2315 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
2316 						     desc_srng)) &&
2317 			(count < total_link_descs)) {
2318 			page_idx = count / pages->num_element_per_page;
2319 			if (desc_id == pages->num_element_per_page)
2320 				desc_id = 0;
2321 
2322 			offset = count % pages->num_element_per_page;
2323 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
2324 						  soc->link_desc_id_start);
2325 
2326 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
2327 					       dma_pages[page_idx].page_p_addr
2328 					       + (offset * link_desc_size),
2329 					       soc->idle_link_bm_id);
2330 			count++;
2331 			desc_id++;
2332 		}
2333 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
2334 	} else {
2335 		/* Populate idle list scatter buffers with link descriptor
2336 		 * pointers
2337 		 */
2338 		scatter_buf_num = 0;
2339 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2340 					soc->hal_soc,
2341 					soc->wbm_idle_scatter_buf_size);
2342 
2343 		scatter_buf_ptr = (uint8_t *)(
2344 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2345 		rem_entries = num_entries_per_buf;
2346 		page_idx = 0; count = 0;
2347 		offset = 0;
2348 		num_descs_per_page = pages->num_element_per_page;
2349 
2350 		while (count < total_link_descs) {
2351 			page_idx = count / num_descs_per_page;
2352 			offset = count % num_descs_per_page;
2353 			if (desc_id == pages->num_element_per_page)
2354 				desc_id = 0;
2355 
2356 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
2357 						  soc->link_desc_id_start);
2358 			hal_set_link_desc_addr(soc->hal_soc,
2359 					       (void *)scatter_buf_ptr,
2360 					       cookie,
2361 					       dma_pages[page_idx].page_p_addr +
2362 					       (offset * link_desc_size),
2363 					       soc->idle_link_bm_id);
2364 			rem_entries--;
2365 			if (rem_entries) {
2366 				scatter_buf_ptr += link_desc_size;
2367 			} else {
2368 				rem_entries = num_entries_per_buf;
2369 				scatter_buf_num++;
2370 				if (scatter_buf_num >= num_scatter_bufs)
2371 					break;
2372 				scatter_buf_ptr = (uint8_t *)
2373 					(soc->wbm_idle_scatter_buf_base_vaddr[
2374 					 scatter_buf_num]);
2375 			}
2376 			count++;
2377 			desc_id++;
2378 		}
2379 		/* Setup link descriptor idle list in HW */
2380 		hal_setup_link_idle_list(soc->hal_soc,
2381 			soc->wbm_idle_scatter_buf_base_paddr,
2382 			soc->wbm_idle_scatter_buf_base_vaddr,
2383 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2384 			(uint32_t)(scatter_buf_ptr -
2385 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2386 			scatter_buf_num-1])), total_link_descs);
2387 	}
2388 }
2389 
2390 qdf_export_symbol(dp_link_desc_ring_replenish);
2391 
2392 #ifdef IPA_OFFLOAD
2393 #define USE_1_IPA_RX_REO_RING 1
2394 #define USE_2_IPA_RX_REO_RINGS 2
2395 #define REO_DST_RING_SIZE_QCA6290 1023
2396 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2397 #define REO_DST_RING_SIZE_QCA8074 1023
2398 #define REO_DST_RING_SIZE_QCN9000 2048
2399 #else
2400 #define REO_DST_RING_SIZE_QCA8074 8
2401 #define REO_DST_RING_SIZE_QCN9000 8
2402 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2403 
2404 #ifdef IPA_WDI3_TX_TWO_PIPES
2405 #ifdef DP_MEMORY_OPT
2406 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2407 {
2408 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2409 }
2410 
2411 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2412 {
2413 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2414 }
2415 
2416 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2417 {
2418 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2419 }
2420 
2421 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2422 {
2423 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
2424 }
2425 
2426 #else /* !DP_MEMORY_OPT */
2427 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2428 {
2429 	return 0;
2430 }
2431 
2432 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2433 {
2434 }
2435 
2436 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2437 {
2438 	return 0;
2439 }
2440 
2441 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2442 {
2443 }
2444 #endif /* DP_MEMORY_OPT */
2445 
2446 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2447 {
2448 	hal_tx_init_data_ring(soc->hal_soc,
2449 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
2450 }
2451 
2452 #else /* !IPA_WDI3_TX_TWO_PIPES */
2453 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2454 {
2455 	return 0;
2456 }
2457 
2458 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2459 {
2460 }
2461 
2462 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2463 {
2464 	return 0;
2465 }
2466 
2467 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2468 {
2469 }
2470 
2471 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2472 {
2473 }
2474 
2475 #endif /* IPA_WDI3_TX_TWO_PIPES */
2476 
2477 #else
2478 
2479 #define REO_DST_RING_SIZE_QCA6290 1024
2480 
2481 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
2482 {
2483 	return 0;
2484 }
2485 
2486 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
2487 {
2488 }
2489 
2490 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
2491 {
2492 	return 0;
2493 }
2494 
2495 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
2496 {
2497 }
2498 
2499 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
2500 {
2501 }
2502 
2503 #endif /* IPA_OFFLOAD */
2504 
2505 /**
2506  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
2507  * @soc: Datapath soc handler
2508  *
2509  * This api resets the default cpu ring map
2510  */
2511 void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2512 {
2513 	uint8_t i;
2514 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2515 
2516 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2517 		switch (nss_config) {
2518 		case dp_nss_cfg_first_radio:
2519 			/*
2520 			 * Setting Tx ring map for one nss offloaded radio
2521 			 */
2522 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2523 			break;
2524 
2525 		case dp_nss_cfg_second_radio:
2526 			/*
2527 			 * Setting Tx ring for two nss offloaded radios
2528 			 */
2529 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2530 			break;
2531 
2532 		case dp_nss_cfg_dbdc:
2533 			/*
2534 			 * Setting Tx ring map for 2 nss offloaded radios
2535 			 */
2536 			soc->tx_ring_map[i] =
2537 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2538 			break;
2539 
2540 		case dp_nss_cfg_dbtc:
2541 			/*
2542 			 * Setting Tx ring map for 3 nss offloaded radios
2543 			 */
2544 			soc->tx_ring_map[i] =
2545 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2546 			break;
2547 
2548 		default:
2549 			dp_err("tx_ring_map failed due to invalid nss cfg");
2550 			break;
2551 		}
2552 	}
2553 }
2554 
2555 /**
2556  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
2557  *					  unused WMAC hw rings
2558  * @soc: DP Soc handle
2559  * @mac_num: wmac num
2560  *
2561  * Return: Return void
2562  */
2563 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
2564 						int mac_num)
2565 {
2566 	uint8_t *grp_mask = NULL;
2567 	int group_number;
2568 
2569 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2570 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2571 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2572 					  group_number, 0x0);
2573 
2574 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2575 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2576 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2577 				      group_number, 0x0);
2578 
2579 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2580 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2581 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2582 					  group_number, 0x0);
2583 
2584 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2585 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2586 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2587 					      group_number, 0x0);
2588 }
2589 
2590 #ifdef IPA_OFFLOAD
2591 #ifdef IPA_WDI3_VLAN_SUPPORT
2592 /**
2593  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
2594  *                                     ring for vlan tagged traffic
2595  * @soc: DP Soc handle
2596  *
2597  * Return: Return void
2598  */
2599 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2600 {
2601 	uint8_t *grp_mask = NULL;
2602 	int group_number, mask;
2603 
2604 	if (!wlan_ipa_is_vlan_enabled())
2605 		return;
2606 
2607 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2608 
2609 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
2610 	if (group_number < 0) {
2611 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2612 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
2613 		return;
2614 	}
2615 
2616 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2617 
2618 	/* reset the interrupt mask for offloaded ring */
2619 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
2620 
2621 	/*
2622 	 * set the interrupt mask to zero for rx offloaded radio.
2623 	 */
2624 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2625 }
2626 #else
2627 inline
2628 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2629 { }
2630 #endif /* IPA_WDI3_VLAN_SUPPORT */
2631 #else
2632 inline
2633 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
2634 { }
2635 #endif /* IPA_OFFLOAD */
2636 
2637 /**
2638  * dp_soc_reset_intr_mask() - reset interrupt mask
2639  * @soc: DP Soc handle
2640  *
2641  * Return: Return void
2642  */
2643 void dp_soc_reset_intr_mask(struct dp_soc *soc)
2644 {
2645 	uint8_t j;
2646 	uint8_t *grp_mask = NULL;
2647 	int group_number, mask, num_ring;
2648 
2649 	/* number of tx ring */
2650 	num_ring = soc->num_tcl_data_rings;
2651 
2652 	/*
2653 	 * group mask for tx completion  ring.
2654 	 */
2655 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2656 
2657 	/* loop and reset the mask for only offloaded ring */
2658 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
2659 		/*
2660 		 * Group number corresponding to tx offloaded ring.
2661 		 */
2662 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2663 		if (group_number < 0) {
2664 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2665 				      soc, WBM2SW_RELEASE, j);
2666 			continue;
2667 		}
2668 
2669 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2670 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
2671 		    (!mask)) {
2672 			continue;
2673 		}
2674 
2675 		/* reset the tx mask for offloaded ring */
2676 		mask &= (~(1 << j));
2677 
2678 		/*
2679 		 * reset the interrupt mask for offloaded ring.
2680 		 */
2681 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2682 	}
2683 
2684 	/* number of rx rings */
2685 	num_ring = soc->num_reo_dest_rings;
2686 
2687 	/*
2688 	 * group mask for reo destination ring.
2689 	 */
2690 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2691 
2692 	/* loop and reset the mask for only offloaded ring */
2693 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
2694 		/*
2695 		 * Group number corresponding to rx offloaded ring.
2696 		 */
2697 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2698 		if (group_number < 0) {
2699 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2700 				      soc, REO_DST, j);
2701 			continue;
2702 		}
2703 
2704 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2705 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
2706 		    (!mask)) {
2707 			continue;
2708 		}
2709 
2710 		/* reset the interrupt mask for offloaded ring */
2711 		mask &= (~(1 << j));
2712 
2713 		/*
2714 		 * set the interrupt mask to zero for rx offloaded radio.
2715 		 */
2716 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2717 	}
2718 
2719 	/*
2720 	 * group mask for Rx buffer refill ring
2721 	 */
2722 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2723 
2724 	/* loop and reset the mask for only offloaded ring */
2725 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2726 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
2727 
2728 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2729 			continue;
2730 		}
2731 
2732 		/*
2733 		 * Group number corresponding to rx offloaded ring.
2734 		 */
2735 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
2736 		if (group_number < 0) {
2737 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2738 				      soc, REO_DST, lmac_id);
2739 			continue;
2740 		}
2741 
2742 		/* set the interrupt mask for offloaded ring */
2743 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2744 							  group_number);
2745 		mask &= (~(1 << lmac_id));
2746 
2747 		/*
2748 		 * set the interrupt mask to zero for rx offloaded radio.
2749 		 */
2750 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2751 						  group_number, mask);
2752 	}
2753 
2754 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
2755 
2756 	for (j = 0; j < num_ring; j++) {
2757 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
2758 			continue;
2759 		}
2760 
2761 		/*
2762 		 * Group number corresponding to rx err ring.
2763 		 */
2764 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2765 		if (group_number < 0) {
2766 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
2767 				      soc, REO_EXCEPTION, j);
2768 			continue;
2769 		}
2770 
2771 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
2772 					      group_number, 0);
2773 	}
2774 }
2775 
2776 #ifdef IPA_OFFLOAD
2777 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
2778 			 uint32_t *remap1, uint32_t *remap2)
2779 {
2780 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
2781 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
2782 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
2783 
2784 	switch (soc->arch_id) {
2785 	case CDP_ARCH_TYPE_BE:
2786 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2787 					      soc->num_reo_dest_rings -
2788 					      USE_2_IPA_RX_REO_RINGS, remap1,
2789 					      remap2);
2790 		break;
2791 
2792 	case CDP_ARCH_TYPE_LI:
2793 		if (wlan_ipa_is_vlan_enabled()) {
2794 			hal_compute_reo_remap_ix2_ix3(
2795 					soc->hal_soc, ring,
2796 					soc->num_reo_dest_rings -
2797 					USE_2_IPA_RX_REO_RINGS, remap1,
2798 					remap2);
2799 
2800 		} else {
2801 			hal_compute_reo_remap_ix2_ix3(
2802 					soc->hal_soc, ring,
2803 					soc->num_reo_dest_rings -
2804 					USE_1_IPA_RX_REO_RING, remap1,
2805 					remap2);
2806 		}
2807 
2808 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
2809 		break;
2810 	default:
2811 		dp_err("unknown arch_id 0x%x", soc->arch_id);
2812 		QDF_BUG(0);
2813 	}
2814 
2815 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2816 
2817 	return true;
2818 }
2819 
2820 #ifdef IPA_WDI3_TX_TWO_PIPES
2821 static bool dp_ipa_is_alt_tx_ring(int index)
2822 {
2823 	return index == IPA_TX_ALT_RING_IDX;
2824 }
2825 
2826 static bool dp_ipa_is_alt_tx_comp_ring(int index)
2827 {
2828 	return index == IPA_TX_ALT_COMP_RING_IDX;
2829 }
2830 #else /* !IPA_WDI3_TX_TWO_PIPES */
2831 static bool dp_ipa_is_alt_tx_ring(int index)
2832 {
2833 	return false;
2834 }
2835 
2836 static bool dp_ipa_is_alt_tx_comp_ring(int index)
2837 {
2838 	return false;
2839 }
2840 #endif /* IPA_WDI3_TX_TWO_PIPES */
2841 
2842 /**
2843  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
2844  *
2845  * @tx_ring_num: Tx ring number
2846  * @tx_ipa_ring_sz: Return param only updated for IPA.
2847  * @soc_cfg_ctx: dp soc cfg context
2848  *
2849  * Return: None
2850  */
2851 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
2852 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2853 {
2854 	if (!soc_cfg_ctx->ipa_enabled)
2855 		return;
2856 
2857 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
2858 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
2859 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
2860 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
2861 }
2862 
2863 /**
2864  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
2865  *
2866  * @tx_comp_ring_num: Tx comp ring number
2867  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
2868  * @soc_cfg_ctx: dp soc cfg context
2869  *
2870  * Return: None
2871  */
2872 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
2873 					 int *tx_comp_ipa_ring_sz,
2874 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
2875 {
2876 	if (!soc_cfg_ctx->ipa_enabled)
2877 		return;
2878 
2879 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
2880 		*tx_comp_ipa_ring_sz =
2881 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
2882 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
2883 		*tx_comp_ipa_ring_sz =
2884 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
2885 }
2886 #else
2887 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
2888 {
2889 	uint8_t num = 0;
2890 
2891 	switch (value) {
2892 	/* should we have all the different possible ring configs */
2893 	case 0xFF:
2894 		num = 8;
2895 		ring[0] = REO_REMAP_SW1;
2896 		ring[1] = REO_REMAP_SW2;
2897 		ring[2] = REO_REMAP_SW3;
2898 		ring[3] = REO_REMAP_SW4;
2899 		ring[4] = REO_REMAP_SW5;
2900 		ring[5] = REO_REMAP_SW6;
2901 		ring[6] = REO_REMAP_SW7;
2902 		ring[7] = REO_REMAP_SW8;
2903 		break;
2904 
2905 	case 0x3F:
2906 		num = 6;
2907 		ring[0] = REO_REMAP_SW1;
2908 		ring[1] = REO_REMAP_SW2;
2909 		ring[2] = REO_REMAP_SW3;
2910 		ring[3] = REO_REMAP_SW4;
2911 		ring[4] = REO_REMAP_SW5;
2912 		ring[5] = REO_REMAP_SW6;
2913 		break;
2914 
2915 	case 0xF:
2916 		num = 4;
2917 		ring[0] = REO_REMAP_SW1;
2918 		ring[1] = REO_REMAP_SW2;
2919 		ring[2] = REO_REMAP_SW3;
2920 		ring[3] = REO_REMAP_SW4;
2921 		break;
2922 	case 0xE:
2923 		num = 3;
2924 		ring[0] = REO_REMAP_SW2;
2925 		ring[1] = REO_REMAP_SW3;
2926 		ring[2] = REO_REMAP_SW4;
2927 		break;
2928 	case 0xD:
2929 		num = 3;
2930 		ring[0] = REO_REMAP_SW1;
2931 		ring[1] = REO_REMAP_SW3;
2932 		ring[2] = REO_REMAP_SW4;
2933 		break;
2934 	case 0xC:
2935 		num = 2;
2936 		ring[0] = REO_REMAP_SW3;
2937 		ring[1] = REO_REMAP_SW4;
2938 		break;
2939 	case 0xB:
2940 		num = 3;
2941 		ring[0] = REO_REMAP_SW1;
2942 		ring[1] = REO_REMAP_SW2;
2943 		ring[2] = REO_REMAP_SW4;
2944 		break;
2945 	case 0xA:
2946 		num = 2;
2947 		ring[0] = REO_REMAP_SW2;
2948 		ring[1] = REO_REMAP_SW4;
2949 		break;
2950 	case 0x9:
2951 		num = 2;
2952 		ring[0] = REO_REMAP_SW1;
2953 		ring[1] = REO_REMAP_SW4;
2954 		break;
2955 	case 0x8:
2956 		num = 1;
2957 		ring[0] = REO_REMAP_SW4;
2958 		break;
2959 	case 0x7:
2960 		num = 3;
2961 		ring[0] = REO_REMAP_SW1;
2962 		ring[1] = REO_REMAP_SW2;
2963 		ring[2] = REO_REMAP_SW3;
2964 		break;
2965 	case 0x6:
2966 		num = 2;
2967 		ring[0] = REO_REMAP_SW2;
2968 		ring[1] = REO_REMAP_SW3;
2969 		break;
2970 	case 0x5:
2971 		num = 2;
2972 		ring[0] = REO_REMAP_SW1;
2973 		ring[1] = REO_REMAP_SW3;
2974 		break;
2975 	case 0x4:
2976 		num = 1;
2977 		ring[0] = REO_REMAP_SW3;
2978 		break;
2979 	case 0x3:
2980 		num = 2;
2981 		ring[0] = REO_REMAP_SW1;
2982 		ring[1] = REO_REMAP_SW2;
2983 		break;
2984 	case 0x2:
2985 		num = 1;
2986 		ring[0] = REO_REMAP_SW2;
2987 		break;
2988 	case 0x1:
2989 		num = 1;
2990 		ring[0] = REO_REMAP_SW1;
2991 		break;
2992 	default:
2993 		dp_err("unknown reo ring map 0x%x", value);
2994 		QDF_BUG(0);
2995 	}
2996 	return num;
2997 }
2998 
2999 bool dp_reo_remap_config(struct dp_soc *soc,
3000 			 uint32_t *remap0,
3001 			 uint32_t *remap1,
3002 			 uint32_t *remap2)
3003 {
3004 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3005 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
3006 	uint8_t num;
3007 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
3008 	uint32_t value;
3009 
3010 	switch (offload_radio) {
3011 	case dp_nss_cfg_default:
3012 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
3013 		num = dp_reo_ring_selection(value, ring);
3014 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3015 					      num, remap1, remap2);
3016 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
3017 
3018 		break;
3019 	case dp_nss_cfg_first_radio:
3020 		value = reo_config & 0xE;
3021 		num = dp_reo_ring_selection(value, ring);
3022 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3023 					      num, remap1, remap2);
3024 
3025 		break;
3026 	case dp_nss_cfg_second_radio:
3027 		value = reo_config & 0xD;
3028 		num = dp_reo_ring_selection(value, ring);
3029 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3030 					      num, remap1, remap2);
3031 
3032 		break;
3033 	case dp_nss_cfg_dbdc:
3034 	case dp_nss_cfg_dbtc:
3035 		/* return false if both or all are offloaded to NSS */
3036 		return false;
3037 	}
3038 
3039 	dp_debug("remap1 %x remap2 %x offload_radio %u",
3040 		 *remap1, *remap2, offload_radio);
3041 	return true;
3042 }
3043 
3044 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
3045 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3046 {
3047 }
3048 
3049 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3050 					 int *tx_comp_ipa_ring_sz,
3051 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3052 {
3053 }
3054 #endif /* IPA_OFFLOAD */
3055 
3056 /**
3057  * dp_reo_frag_dst_set() - configure reo register to set the
3058  *                        fragment destination ring
3059  * @soc: Datapath soc
3060  * @frag_dst_ring: output parameter to set fragment destination ring
3061  *
3062  * Based on offload_radio below fragment destination rings is selected
3063  * 0 - TCL
3064  * 1 - SW1
3065  * 2 - SW2
3066  * 3 - SW3
3067  * 4 - SW4
3068  * 5 - Release
3069  * 6 - FW
3070  * 7 - alternate select
3071  *
3072  * Return: void
3073  */
3074 void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
3075 {
3076 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3077 
3078 	switch (offload_radio) {
3079 	case dp_nss_cfg_default:
3080 		*frag_dst_ring = REO_REMAP_TCL;
3081 		break;
3082 	case dp_nss_cfg_first_radio:
3083 		/*
3084 		 * This configuration is valid for single band radio which
3085 		 * is also NSS offload.
3086 		 */
3087 	case dp_nss_cfg_dbdc:
3088 	case dp_nss_cfg_dbtc:
3089 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
3090 		break;
3091 	default:
3092 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
3093 		break;
3094 	}
3095 }
3096 
3097 #ifdef WLAN_FEATURE_STATS_EXT
3098 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3099 {
3100 	qdf_event_create(&soc->rx_hw_stats_event);
3101 }
3102 #else
3103 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3104 {
3105 }
3106 #endif
3107 
3108 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
3109 {
3110 	int tcl_ring_num, wbm_ring_num;
3111 
3112 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
3113 						index,
3114 						&tcl_ring_num,
3115 						&wbm_ring_num);
3116 
3117 	if (tcl_ring_num == -1) {
3118 		dp_err("incorrect tcl ring num for index %u", index);
3119 		return;
3120 	}
3121 
3122 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
3123 			     soc->tcl_data_ring[index].alloc_size,
3124 			     soc->ctrl_psoc,
3125 			     WLAN_MD_DP_SRNG_TCL_DATA,
3126 			     "tcl_data_ring");
3127 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
3128 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
3129 		       tcl_ring_num);
3130 
3131 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
3132 		return;
3133 
3134 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
3135 			     soc->tx_comp_ring[index].alloc_size,
3136 			     soc->ctrl_psoc,
3137 			     WLAN_MD_DP_SRNG_TX_COMP,
3138 			     "tcl_comp_ring");
3139 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3140 		       wbm_ring_num);
3141 }
3142 
3143 /**
3144  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
3145  * ring pair
3146  * @soc: DP soc pointer
3147  * @index: index of soc->tcl_data or soc->tx_comp to initialize
3148  *
3149  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
3150  */
3151 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
3152 						uint8_t index)
3153 {
3154 	int tcl_ring_num, wbm_ring_num;
3155 	uint8_t bm_id;
3156 
3157 	if (index >= MAX_TCL_DATA_RINGS) {
3158 		dp_err("unexpected index!");
3159 		QDF_BUG(0);
3160 		goto fail1;
3161 	}
3162 
3163 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
3164 						index,
3165 						&tcl_ring_num,
3166 						&wbm_ring_num);
3167 
3168 	if (tcl_ring_num == -1) {
3169 		dp_err("incorrect tcl ring num for index %u", index);
3170 		goto fail1;
3171 	}
3172 
3173 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
3174 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
3175 			 tcl_ring_num, 0)) {
3176 		dp_err("dp_srng_init failed for tcl_data_ring");
3177 		goto fail1;
3178 	}
3179 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
3180 			  soc->tcl_data_ring[index].alloc_size,
3181 			  soc->ctrl_psoc,
3182 			  WLAN_MD_DP_SRNG_TCL_DATA,
3183 			  "tcl_data_ring");
3184 
3185 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
3186 		goto set_rbm;
3187 
3188 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3189 			 wbm_ring_num, 0)) {
3190 		dp_err("dp_srng_init failed for tx_comp_ring");
3191 		goto fail1;
3192 	}
3193 
3194 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
3195 			  soc->tx_comp_ring[index].alloc_size,
3196 			  soc->ctrl_psoc,
3197 			  WLAN_MD_DP_SRNG_TX_COMP,
3198 			  "tcl_comp_ring");
3199 set_rbm:
3200 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
3201 
3202 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
3203 
3204 	return QDF_STATUS_SUCCESS;
3205 
3206 fail1:
3207 	return QDF_STATUS_E_FAILURE;
3208 }
3209 
3210 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
3211 {
3212 	dp_debug("index %u", index);
3213 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
3214 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
3215 }
3216 
3217 /**
3218  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
3219  * ring pair for the given "index"
3220  * @soc: DP soc pointer
3221  * @index: index of soc->tcl_data or soc->tx_comp to initialize
3222  *
3223  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
3224  */
3225 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
3226 						 uint8_t index)
3227 {
3228 	int tx_ring_size;
3229 	int tx_comp_ring_size;
3230 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3231 	int cached = 0;
3232 
3233 	if (index >= MAX_TCL_DATA_RINGS) {
3234 		dp_err("unexpected index!");
3235 		QDF_BUG(0);
3236 		goto fail1;
3237 	}
3238 
3239 	dp_debug("index %u", index);
3240 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
3241 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
3242 
3243 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
3244 			  tx_ring_size, cached)) {
3245 		dp_err("dp_srng_alloc failed for tcl_data_ring");
3246 		goto fail1;
3247 	}
3248 
3249 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3250 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
3251 	/* Enable cached TCL desc if NSS offload is disabled */
3252 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3253 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3254 
3255 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
3256 	    INVALID_WBM_RING_NUM)
3257 		return QDF_STATUS_SUCCESS;
3258 
3259 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3260 			  tx_comp_ring_size, cached)) {
3261 		dp_err("dp_srng_alloc failed for tx_comp_ring");
3262 		goto fail1;
3263 	}
3264 
3265 	return QDF_STATUS_SUCCESS;
3266 
3267 fail1:
3268 	return QDF_STATUS_E_FAILURE;
3269 }
3270 
3271 /**
3272  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
3273  * @pdev: DP_PDEV handle
3274  *
3275  * Return: void
3276  */
3277 void
3278 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3279 {
3280 	uint8_t map_id;
3281 	struct dp_soc *soc = pdev->soc;
3282 
3283 	if (!soc)
3284 		return;
3285 
3286 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3287 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3288 			     default_dscp_tid_map,
3289 			     sizeof(default_dscp_tid_map));
3290 	}
3291 
3292 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3293 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3294 					default_dscp_tid_map,
3295 					map_id);
3296 	}
3297 }
3298 
3299 /**
3300  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
3301  * @pdev: DP_PDEV handle
3302  *
3303  * Return: void
3304  */
3305 void
3306 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3307 {
3308 	struct dp_soc *soc = pdev->soc;
3309 
3310 	if (!soc)
3311 		return;
3312 
3313 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3314 		     sizeof(default_pcp_tid_map));
3315 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3316 }
3317 
3318 #ifndef DP_UMAC_HW_RESET_SUPPORT
3319 static inline
3320 #endif
3321 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3322 {
3323 	struct reo_desc_list_node *desc;
3324 	struct dp_rx_tid *rx_tid;
3325 
3326 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3327 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3328 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3329 		rx_tid = &desc->rx_tid;
3330 		qdf_mem_unmap_nbytes_single(soc->osdev,
3331 			rx_tid->hw_qdesc_paddr,
3332 			QDF_DMA_BIDIRECTIONAL,
3333 			rx_tid->hw_qdesc_alloc_size);
3334 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3335 		qdf_mem_free(desc);
3336 	}
3337 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3338 	qdf_list_destroy(&soc->reo_desc_freelist);
3339 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3340 }
3341 
3342 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3343 /**
3344  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
3345  *                                          for deferred reo desc list
3346  * @soc: Datapath soc handle
3347  *
3348  * Return: void
3349  */
3350 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
3351 {
3352 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
3353 	qdf_list_create(&soc->reo_desc_deferred_freelist,
3354 			REO_DESC_DEFERRED_FREELIST_SIZE);
3355 	soc->reo_desc_deferred_freelist_init = true;
3356 }
3357 
3358 /**
3359  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
3360  *                                           free the leftover REO QDESCs
3361  * @soc: Datapath soc handle
3362  *
3363  * Return: void
3364  */
3365 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
3366 {
3367 	struct reo_desc_deferred_freelist_node *desc;
3368 
3369 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
3370 	soc->reo_desc_deferred_freelist_init = false;
3371 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
3372 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3373 		qdf_mem_unmap_nbytes_single(soc->osdev,
3374 					    desc->hw_qdesc_paddr,
3375 					    QDF_DMA_BIDIRECTIONAL,
3376 					    desc->hw_qdesc_alloc_size);
3377 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
3378 		qdf_mem_free(desc);
3379 	}
3380 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
3381 
3382 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
3383 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
3384 }
3385 #else
3386 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
3387 {
3388 }
3389 
3390 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
3391 {
3392 }
3393 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
3394 
3395 /**
3396  * dp_soc_reset_txrx_ring_map() - reset tx ring map
3397  * @soc: DP SOC handle
3398  *
3399  */
3400 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
3401 {
3402 	uint32_t i;
3403 
3404 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
3405 		soc->tx_ring_map[i] = 0;
3406 }
3407 
3408 /**
3409  * dp_soc_deinit() - Deinitialize txrx SOC
3410  * @txrx_soc: Opaque DP SOC handle
3411  *
3412  * Return: None
3413  */
3414 void dp_soc_deinit(void *txrx_soc)
3415 {
3416 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3417 	struct htt_soc *htt_soc = soc->htt_handle;
3418 
3419 	dp_monitor_soc_deinit(soc);
3420 
3421 	/* free peer tables & AST tables allocated during peer_map_attach */
3422 	if (soc->peer_map_attach_success) {
3423 		dp_peer_find_detach(soc);
3424 		soc->arch_ops.txrx_peer_map_detach(soc);
3425 		soc->peer_map_attach_success = FALSE;
3426 	}
3427 
3428 	qdf_flush_work(&soc->htt_stats.work);
3429 	qdf_disable_work(&soc->htt_stats.work);
3430 
3431 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3432 
3433 	dp_soc_reset_txrx_ring_map(soc);
3434 
3435 	dp_reo_desc_freelist_destroy(soc);
3436 	dp_reo_desc_deferred_freelist_destroy(soc);
3437 
3438 	DEINIT_RX_HW_STATS_LOCK(soc);
3439 
3440 	qdf_spinlock_destroy(&soc->ast_lock);
3441 
3442 	dp_peer_mec_spinlock_destroy(soc);
3443 
3444 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3445 
3446 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
3447 
3448 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3449 
3450 	qdf_spinlock_destroy(&soc->vdev_map_lock);
3451 
3452 	dp_reo_cmdlist_destroy(soc);
3453 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3454 
3455 	dp_soc_tx_desc_sw_pools_deinit(soc);
3456 
3457 	dp_soc_srng_deinit(soc);
3458 
3459 	dp_hw_link_desc_ring_deinit(soc);
3460 
3461 	dp_soc_print_inactive_objects(soc);
3462 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
3463 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
3464 
3465 	htt_soc_htc_dealloc(soc->htt_handle);
3466 
3467 	htt_soc_detach(htt_soc);
3468 
3469 	/* Free wbm sg list and reset flags in down path */
3470 	dp_rx_wbm_sg_list_deinit(soc);
3471 
3472 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
3473 			     WLAN_MD_DP_SOC, "dp_soc");
3474 }
3475 
3476 #ifdef QCA_HOST2FW_RXBUF_RING
3477 void
3478 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
3479 				int lmac_id)
3480 {
3481 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
3482 		htt_srng_setup(soc->htt_handle, mac_id,
3483 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
3484 			       RXDMA_DST);
3485 }
3486 #endif
3487 
3488 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
3489 				  enum cdp_host_reo_dest_ring *reo_dest,
3490 				  bool *hash_based)
3491 {
3492 	struct dp_soc *soc;
3493 	struct dp_pdev *pdev;
3494 
3495 	pdev = vdev->pdev;
3496 	soc = pdev->soc;
3497 	/*
3498 	 * hash based steering is disabled for Radios which are offloaded
3499 	 * to NSS
3500 	 */
3501 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3502 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3503 
3504 	/*
3505 	 * Below line of code will ensure the proper reo_dest ring is chosen
3506 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3507 	 */
3508 	*reo_dest = pdev->reo_dest;
3509 }
3510 
3511 #ifdef IPA_OFFLOAD
3512 /**
3513  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
3514  * @vdev: Virtual device
3515  *
3516  * Return: true if the vdev is of subtype P2P
3517  *	   false if the vdev is of any other subtype
3518  */
3519 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
3520 {
3521 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
3522 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
3523 	    vdev->subtype == wlan_op_subtype_p2p_go)
3524 		return true;
3525 
3526 	return false;
3527 }
3528 
3529 /**
3530  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
3531  * @vdev: Datapath VDEV handle
3532  * @setup_info:
3533  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
3534  * @hash_based: pointer to hash value (enabled/disabled) to be populated
3535  * @lmac_peer_id_msb:
3536  *
3537  * If IPA is enabled in ini, for SAP mode, disable hash based
3538  * steering, use default reo_dst ring for RX. Use config values for other modes.
3539  *
3540  * Return: None
3541  */
3542 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
3543 				       struct cdp_peer_setup_info *setup_info,
3544 				       enum cdp_host_reo_dest_ring *reo_dest,
3545 				       bool *hash_based,
3546 				       uint8_t *lmac_peer_id_msb)
3547 {
3548 	struct dp_soc *soc;
3549 	struct dp_pdev *pdev;
3550 
3551 	pdev = vdev->pdev;
3552 	soc = pdev->soc;
3553 
3554 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
3555 
3556 	/* For P2P-GO interfaces we do not need to change the REO
3557 	 * configuration even if IPA config is enabled
3558 	 */
3559 	if (dp_is_vdev_subtype_p2p(vdev))
3560 		return;
3561 
3562 	/*
3563 	 * If IPA is enabled, disable hash-based flow steering and set
3564 	 * reo_dest_ring_4 as the REO ring to receive packets on.
3565 	 * IPA is configured to reap reo_dest_ring_4.
3566 	 *
3567 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
3568 	 * value enum value is from 1 - 4.
3569 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
3570 	 */
3571 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3572 		if (dp_ipa_is_mdm_platform()) {
3573 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
3574 			if (vdev->opmode == wlan_op_mode_ap)
3575 				*hash_based = 0;
3576 		} else {
3577 			dp_debug("opt_dp: default HOST reo ring is set");
3578 		}
3579 	}
3580 }
3581 
3582 #else
3583 
3584 /**
3585  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
3586  * @vdev: Datapath VDEV handle
3587  * @setup_info:
3588  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
3589  * @hash_based: pointer to hash value (enabled/disabled) to be populated
3590  * @lmac_peer_id_msb:
3591  *
3592  * Use system config values for hash based steering.
3593  * Return: None
3594  */
3595 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
3596 				       struct cdp_peer_setup_info *setup_info,
3597 				       enum cdp_host_reo_dest_ring *reo_dest,
3598 				       bool *hash_based,
3599 				       uint8_t *lmac_peer_id_msb)
3600 {
3601 	struct dp_soc *soc = vdev->pdev->soc;
3602 
3603 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
3604 					lmac_peer_id_msb);
3605 }
3606 #endif /* IPA_OFFLOAD */
3607 
3608 /**
3609  * dp_peer_setup_wifi3() - initialize the peer
3610  * @soc_hdl: soc handle object
3611  * @vdev_id: vdev_id of vdev object
3612  * @peer_mac: Peer's mac address
3613  * @setup_info: peer setup info for MLO
3614  *
3615  * Return: QDF_STATUS
3616  */
3617 QDF_STATUS
3618 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3619 		    uint8_t *peer_mac,
3620 		    struct cdp_peer_setup_info *setup_info)
3621 {
3622 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3623 	struct dp_pdev *pdev;
3624 	bool hash_based = 0;
3625 	enum cdp_host_reo_dest_ring reo_dest;
3626 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3627 	struct dp_vdev *vdev = NULL;
3628 	struct dp_peer *peer =
3629 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
3630 					       DP_MOD_ID_CDP);
3631 	struct dp_peer *mld_peer = NULL;
3632 	enum wlan_op_mode vdev_opmode;
3633 	uint8_t lmac_peer_id_msb = 0;
3634 
3635 	if (!peer)
3636 		return QDF_STATUS_E_FAILURE;
3637 
3638 	vdev = peer->vdev;
3639 	if (!vdev) {
3640 		status = QDF_STATUS_E_FAILURE;
3641 		goto fail;
3642 	}
3643 
3644 	/* save vdev related member in case vdev freed */
3645 	vdev_opmode = vdev->opmode;
3646 	pdev = vdev->pdev;
3647 	dp_peer_setup_get_reo_hash(vdev, setup_info,
3648 				   &reo_dest, &hash_based,
3649 				   &lmac_peer_id_msb);
3650 
3651 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
3652 					   peer, vdev, vdev->vdev_id,
3653 					   setup_info);
3654 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
3655 		"hash-based-steering:%d default-reo_dest:%u",
3656 		pdev->pdev_id, vdev->vdev_id,
3657 		vdev->opmode, peer,
3658 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
3659 
3660 	/*
3661 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
3662 	 * i.e both the devices have same MAC address. In these
3663 	 * cases we want such pkts to be processed in NULL Q handler
3664 	 * which is REO2TCL ring. for this reason we should
3665 	 * not setup reo_queues and default route for bss_peer.
3666 	 */
3667 	if (!IS_MLO_DP_MLD_PEER(peer))
3668 		dp_monitor_peer_tx_init(pdev, peer);
3669 
3670 	if (!setup_info)
3671 		if (dp_peer_legacy_setup(soc, peer) !=
3672 				QDF_STATUS_SUCCESS) {
3673 			status = QDF_STATUS_E_RESOURCES;
3674 			goto fail;
3675 		}
3676 
3677 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
3678 		status = QDF_STATUS_E_FAILURE;
3679 		goto fail;
3680 	}
3681 
3682 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3683 		/* TODO: Check the destination ring number to be passed to FW */
3684 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3685 				soc->ctrl_psoc,
3686 				peer->vdev->pdev->pdev_id,
3687 				peer->mac_addr.raw,
3688 				peer->vdev->vdev_id, hash_based, reo_dest,
3689 				lmac_peer_id_msb);
3690 	}
3691 
3692 	qdf_atomic_set(&peer->is_default_route_set, 1);
3693 
3694 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
3695 	if (QDF_IS_STATUS_ERROR(status)) {
3696 		dp_peer_err("peer mlo setup failed");
3697 		qdf_assert_always(0);
3698 	}
3699 
3700 	if (vdev_opmode != wlan_op_mode_monitor) {
3701 		/* In case of MLD peer, switch peer to mld peer and
3702 		 * do peer_rx_init.
3703 		 */
3704 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
3705 		    IS_MLO_DP_LINK_PEER(peer)) {
3706 			if (setup_info && setup_info->is_first_link) {
3707 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
3708 				if (mld_peer)
3709 					dp_peer_rx_init(pdev, mld_peer);
3710 				else
3711 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
3712 			}
3713 		} else {
3714 			dp_peer_rx_init(pdev, peer);
3715 		}
3716 	}
3717 
3718 	if (!IS_MLO_DP_MLD_PEER(peer))
3719 		dp_peer_ppdu_delayed_ba_init(peer);
3720 
3721 fail:
3722 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3723 	return status;
3724 }
3725 
3726 /**
3727  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
3728  * @txrx_soc: cdp soc handle
3729  * @ac: Access category
3730  * @value: timeout value in millisec
3731  *
3732  * Return: void
3733  */
3734 void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
3735 			     uint8_t ac, uint32_t value)
3736 {
3737 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3738 
3739 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
3740 }
3741 
3742 /**
3743  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
3744  * @txrx_soc: cdp soc handle
3745  * @ac: access category
3746  * @value: timeout value in millisec
3747  *
3748  * Return: void
3749  */
3750 void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
3751 			     uint8_t ac, uint32_t *value)
3752 {
3753 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3754 
3755 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
3756 }
3757 
3758 /**
3759  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3760  * @txrx_soc: cdp soc handle
3761  * @pdev_id: id of physical device object
3762  * @val: reo destination ring index (1 - 4)
3763  *
3764  * Return: QDF_STATUS
3765  */
3766 QDF_STATUS
3767 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
3768 		     enum cdp_host_reo_dest_ring val)
3769 {
3770 	struct dp_pdev *pdev =
3771 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
3772 						   pdev_id);
3773 
3774 	if (pdev) {
3775 		pdev->reo_dest = val;
3776 		return QDF_STATUS_SUCCESS;
3777 	}
3778 
3779 	return QDF_STATUS_E_FAILURE;
3780 }
3781 
3782 /**
3783  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3784  * @txrx_soc: cdp soc handle
3785  * @pdev_id: id of physical device object
3786  *
3787  * Return: reo destination ring index
3788  */
3789 enum cdp_host_reo_dest_ring
3790 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
3791 {
3792 	struct dp_pdev *pdev =
3793 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
3794 						   pdev_id);
3795 
3796 	if (pdev)
3797 		return pdev->reo_dest;
3798 	else
3799 		return cdp_host_reo_dest_ring_unknown;
3800 }
3801 
3802 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
3803 	union hal_reo_status *reo_status)
3804 {
3805 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
3806 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
3807 
3808 	if (!dp_check_pdev_exists(soc, pdev)) {
3809 		dp_err_rl("pdev doesn't exist");
3810 		return;
3811 	}
3812 
3813 	if (!qdf_atomic_read(&soc->cmn_init_done))
3814 		return;
3815 
3816 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
3817 		DP_PRINT_STATS("REO stats failure %d",
3818 			       queue_status->header.status);
3819 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
3820 		return;
3821 	}
3822 
3823 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
3824 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
3825 }
3826 
3827 /**
3828  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
3829  * @soc: dp soc.
3830  * @pdev: dp pdev.
3831  *
3832  * Return: None.
3833  */
3834 void
3835 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
3836 {
3837 	uint32_t hw_head;
3838 	uint32_t hw_tail;
3839 	struct dp_srng *srng;
3840 
3841 	if (!soc) {
3842 		dp_err("soc is NULL");
3843 		return;
3844 	}
3845 
3846 	if (!pdev) {
3847 		dp_err("pdev is NULL");
3848 		return;
3849 	}
3850 
3851 	srng = &pdev->soc->wbm_idle_link_ring;
3852 	if (!srng) {
3853 		dp_err("wbm_idle_link_ring srng is NULL");
3854 		return;
3855 	}
3856 
3857 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
3858 			&hw_tail, WBM_IDLE_LINK);
3859 
3860 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
3861 		 hw_head, hw_tail);
3862 }
3863 
3864 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
3865 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
3866 				      uint32_t rx_limit)
3867 {
3868 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
3869 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
3870 }
3871 
3872 #else
3873 
3874 static inline
3875 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
3876 			       uint32_t rx_limit)
3877 {
3878 }
3879 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
3880 
3881 /**
3882  * dp_display_srng_info() - Dump the srng HP TP info
3883  * @soc_hdl: CDP Soc handle
3884  *
3885  * This function dumps the SW hp/tp values for the important rings.
3886  * HW hp/tp values are not being dumped, since it can lead to
3887  * READ NOC error when UMAC is in low power state. MCC does not have
3888  * device force wake working yet.
3889  *
3890  * Return: none
3891  */
3892 void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
3893 {
3894 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3895 	hal_soc_handle_t hal_soc = soc->hal_soc;
3896 	uint32_t hp, tp, i;
3897 
3898 	dp_info("SRNG HP-TP data:");
3899 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3900 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
3901 				&tp, &hp);
3902 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3903 
3904 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
3905 		    INVALID_WBM_RING_NUM)
3906 			continue;
3907 
3908 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
3909 				&tp, &hp);
3910 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3911 	}
3912 
3913 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3914 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
3915 				&tp, &hp);
3916 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
3917 	}
3918 
3919 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
3920 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
3921 
3922 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
3923 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
3924 
3925 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
3926 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
3927 }
3928 
3929 /**
3930  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
3931  * @psoc: dp soc handle
3932  * @pdev_id: id of DP_PDEV handle
3933  * @pcp: pcp value
3934  * @tid: tid value passed by the user
3935  *
3936  * Return: QDF_STATUS_SUCCESS on success
3937  */
3938 QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
3939 					 uint8_t pdev_id,
3940 					 uint8_t pcp, uint8_t tid)
3941 {
3942 	struct dp_soc *soc = (struct dp_soc *)psoc;
3943 
3944 	soc->pcp_tid_map[pcp] = tid;
3945 
3946 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
3947 	return QDF_STATUS_SUCCESS;
3948 }
3949 
3950 /**
3951  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
3952  * @soc_hdl: DP soc handle
3953  * @vdev_id: id of DP_VDEV handle
3954  * @pcp: pcp value
3955  * @tid: tid value passed by the user
3956  *
3957  * Return: QDF_STATUS_SUCCESS on success
3958  */
3959 QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
3960 					 uint8_t vdev_id,
3961 					 uint8_t pcp, uint8_t tid)
3962 {
3963 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3964 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3965 						     DP_MOD_ID_CDP);
3966 
3967 	if (!vdev)
3968 		return QDF_STATUS_E_FAILURE;
3969 
3970 	vdev->pcp_tid_map[pcp] = tid;
3971 
3972 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3973 	return QDF_STATUS_SUCCESS;
3974 }
3975 
3976 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
3977 void dp_drain_txrx(struct cdp_soc_t *soc_handle)
3978 {
3979 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3980 	uint32_t cur_tx_limit, cur_rx_limit;
3981 	uint32_t budget = 0xffff;
3982 	uint32_t val;
3983 	int i;
3984 	int cpu = dp_srng_get_cpu();
3985 
3986 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
3987 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
3988 
3989 	/* Temporarily increase soft irq limits when going to drain
3990 	 * the UMAC/LMAC SRNGs and restore them after polling.
3991 	 * Though the budget is on higher side, the TX/RX reaping loops
3992 	 * will not execute longer as both TX and RX would be suspended
3993 	 * by the time this API is called.
3994 	 */
3995 	dp_update_soft_irq_limits(soc, budget, budget);
3996 
3997 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
3998 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
3999 
4000 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
4001 
4002 	/* Do a dummy read at offset 0; this will ensure all
4003 	 * pendings writes(HP/TP) are flushed before read returns.
4004 	 */
4005 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
4006 	dp_debug("Register value at offset 0: %u", val);
4007 }
4008 #endif
4009 
4010 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
4011 /**
4012  * dp_flush_ring_hptp() - Update ring shadow
4013  *			  register HP/TP address when runtime
4014  *                        resume
4015  * @soc: DP soc context
4016  * @hal_srng: srng
4017  *
4018  * Return: None
4019  */
4020 static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
4021 {
4022 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
4023 						 HAL_SRNG_FLUSH_EVENT)) {
4024 		/* Acquire the lock */
4025 		hal_srng_access_start(soc->hal_soc, hal_srng);
4026 
4027 		hal_srng_access_end(soc->hal_soc, hal_srng);
4028 
4029 		hal_srng_set_flush_last_ts(hal_srng);
4030 
4031 		dp_debug("flushed");
4032 	}
4033 }
4034 
4035 void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
4036 {
4037 	 uint8_t i;
4038 
4039 	if (force_flush_tx) {
4040 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4041 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
4042 					   HAL_SRNG_FLUSH_EVENT);
4043 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
4044 		}
4045 
4046 		return;
4047 	}
4048 
4049 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4050 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
4051 
4052 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
4053 }
4054 #endif
4055 
4056 #ifdef WLAN_FEATURE_STATS_EXT
4057 /* rx hw stats event wait timeout in ms */
4058 #define DP_REO_STATUS_STATS_TIMEOUT 100
4059 
4060 /**
4061  * dp_rx_hw_stats_cb() - request rx hw stats response callback
4062  * @soc: soc handle
4063  * @cb_ctxt: callback context
4064  * @reo_status: reo command response status
4065  *
4066  * Return: None
4067  */
4068 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4069 			      union hal_reo_status *reo_status)
4070 {
4071 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
4072 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
4073 	bool is_query_timeout;
4074 
4075 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4076 	is_query_timeout = rx_hw_stats->is_query_timeout;
4077 	/* free the cb_ctxt if all pending tid stats query is received */
4078 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
4079 		if (!is_query_timeout) {
4080 			qdf_event_set(&soc->rx_hw_stats_event);
4081 			soc->is_last_stats_ctx_init = false;
4082 		}
4083 
4084 		qdf_mem_free(rx_hw_stats);
4085 	}
4086 
4087 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4088 		dp_info("REO stats failure %d",
4089 			queue_status->header.status);
4090 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4091 		return;
4092 	}
4093 
4094 	if (!is_query_timeout) {
4095 		soc->ext_stats.rx_mpdu_received +=
4096 					queue_status->mpdu_frms_cnt;
4097 		soc->ext_stats.rx_mpdu_missed +=
4098 					queue_status->hole_cnt;
4099 	}
4100 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4101 }
4102 
4103 /**
4104  * dp_request_rx_hw_stats() - request rx hardware stats
4105  * @soc_hdl: soc handle
4106  * @vdev_id: vdev id
4107  *
4108  * Return: None
4109  */
4110 QDF_STATUS
4111 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
4112 {
4113 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4114 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4115 						     DP_MOD_ID_CDP);
4116 	struct dp_peer *peer = NULL;
4117 	QDF_STATUS status;
4118 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
4119 	int rx_stats_sent_cnt = 0;
4120 	uint32_t last_rx_mpdu_received;
4121 	uint32_t last_rx_mpdu_missed;
4122 
4123 	if (!vdev) {
4124 		dp_err("vdev is null for vdev_id: %u", vdev_id);
4125 		status = QDF_STATUS_E_INVAL;
4126 		goto out;
4127 	}
4128 
4129 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
4130 
4131 	if (!peer) {
4132 		dp_err("Peer is NULL");
4133 		status = QDF_STATUS_E_INVAL;
4134 		goto out;
4135 	}
4136 
4137 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
4138 
4139 	if (!rx_hw_stats) {
4140 		dp_err("malloc failed for hw stats structure");
4141 		status = QDF_STATUS_E_INVAL;
4142 		goto out;
4143 	}
4144 
4145 	qdf_event_reset(&soc->rx_hw_stats_event);
4146 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4147 	/* save the last soc cumulative stats and reset it to 0 */
4148 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
4149 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
4150 	soc->ext_stats.rx_mpdu_received = 0;
4151 	soc->ext_stats.rx_mpdu_missed = 0;
4152 
4153 	dp_debug("HW stats query start");
4154 	rx_stats_sent_cnt =
4155 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
4156 	if (!rx_stats_sent_cnt) {
4157 		dp_err("no tid stats sent successfully");
4158 		qdf_mem_free(rx_hw_stats);
4159 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4160 		status = QDF_STATUS_E_INVAL;
4161 		goto out;
4162 	}
4163 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
4164 		       rx_stats_sent_cnt);
4165 	rx_hw_stats->is_query_timeout = false;
4166 	soc->is_last_stats_ctx_init = true;
4167 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4168 
4169 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
4170 				       DP_REO_STATUS_STATS_TIMEOUT);
4171 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
4172 
4173 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
4174 	if (status != QDF_STATUS_SUCCESS) {
4175 		dp_info("partial rx hw stats event collected with %d",
4176 			qdf_atomic_read(
4177 				&rx_hw_stats->pending_tid_stats_cnt));
4178 		if (soc->is_last_stats_ctx_init)
4179 			rx_hw_stats->is_query_timeout = true;
4180 		/*
4181 		 * If query timeout happened, use the last saved stats
4182 		 * for this time query.
4183 		 */
4184 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
4185 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
4186 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
4187 
4188 	}
4189 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
4190 
4191 out:
4192 	if (peer)
4193 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4194 	if (vdev)
4195 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
4196 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
4197 
4198 	return status;
4199 }
4200 
4201 /**
4202  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
4203  * @soc_hdl: soc handle
4204  *
4205  * Return: None
4206  */
4207 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
4208 {
4209 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4210 
4211 	soc->ext_stats.rx_mpdu_received = 0;
4212 	soc->ext_stats.rx_mpdu_missed = 0;
4213 }
4214 #endif /* WLAN_FEATURE_STATS_EXT */
4215 
4216 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
4217 {
4218 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
4219 
4220 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
4221 }
4222 
4223 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
4224 {
4225 	uint32_t i;
4226 
4227 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4228 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
4229 	}
4230 }
4231 
4232 qdf_export_symbol(dp_soc_set_txrx_ring_map);
4233 
4234 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
4235 {
4236 	dp_init_info("DP soc Dump for Target = %d", target_type);
4237 	dp_init_info("ast_override_support = %d da_war_enabled = %d",
4238 		     soc->ast_override_support, soc->da_war_enabled);
4239 
4240 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
4241 }
4242 
4243 /**
4244  * dp_soc_cfg_init() - initialize target specific configuration
4245  *		       during dp_soc_init
4246  * @soc: dp soc handle
4247  */
4248 static void dp_soc_cfg_init(struct dp_soc *soc)
4249 {
4250 	uint32_t target_type;
4251 
4252 	target_type = hal_get_target_type(soc->hal_soc);
4253 	switch (target_type) {
4254 	case TARGET_TYPE_QCA6290:
4255 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4256 					       REO_DST_RING_SIZE_QCA6290);
4257 		soc->ast_override_support = 1;
4258 		soc->da_war_enabled = false;
4259 		break;
4260 	case TARGET_TYPE_QCA6390:
4261 	case TARGET_TYPE_QCA6490:
4262 	case TARGET_TYPE_QCA6750:
4263 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4264 					       REO_DST_RING_SIZE_QCA6290);
4265 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
4266 		soc->ast_override_support = 1;
4267 		if (soc->cdp_soc.ol_ops->get_con_mode &&
4268 		    soc->cdp_soc.ol_ops->get_con_mode() ==
4269 		    QDF_GLOBAL_MONITOR_MODE) {
4270 			int int_ctx;
4271 
4272 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
4273 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
4274 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
4275 			}
4276 		}
4277 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4278 		break;
4279 	case TARGET_TYPE_KIWI:
4280 	case TARGET_TYPE_MANGO:
4281 	case TARGET_TYPE_PEACH:
4282 		soc->ast_override_support = 1;
4283 		soc->per_tid_basize_max_tid = 8;
4284 
4285 		if (soc->cdp_soc.ol_ops->get_con_mode &&
4286 		    soc->cdp_soc.ol_ops->get_con_mode() ==
4287 		    QDF_GLOBAL_MONITOR_MODE) {
4288 			int int_ctx;
4289 
4290 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
4291 			     int_ctx++) {
4292 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
4293 				if (dp_is_monitor_mode_using_poll(soc))
4294 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
4295 			}
4296 		}
4297 
4298 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4299 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
4300 		break;
4301 	case TARGET_TYPE_QCA8074:
4302 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
4303 		soc->da_war_enabled = true;
4304 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4305 		break;
4306 	case TARGET_TYPE_QCA8074V2:
4307 	case TARGET_TYPE_QCA6018:
4308 	case TARGET_TYPE_QCA9574:
4309 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4310 		soc->ast_override_support = 1;
4311 		soc->per_tid_basize_max_tid = 8;
4312 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4313 		soc->da_war_enabled = false;
4314 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4315 		break;
4316 	case TARGET_TYPE_QCN9000:
4317 		soc->ast_override_support = 1;
4318 		soc->da_war_enabled = false;
4319 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4320 		soc->per_tid_basize_max_tid = 8;
4321 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4322 		soc->lmac_polled_mode = 0;
4323 		soc->wbm_release_desc_rx_sg_support = 1;
4324 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
4325 		break;
4326 	case TARGET_TYPE_QCA5018:
4327 	case TARGET_TYPE_QCN6122:
4328 	case TARGET_TYPE_QCN9160:
4329 		soc->ast_override_support = 1;
4330 		soc->da_war_enabled = false;
4331 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4332 		soc->per_tid_basize_max_tid = 8;
4333 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
4334 		soc->disable_mac1_intr = 1;
4335 		soc->disable_mac2_intr = 1;
4336 		soc->wbm_release_desc_rx_sg_support = 1;
4337 		break;
4338 	case TARGET_TYPE_QCN9224:
4339 		soc->ast_override_support = 1;
4340 		soc->da_war_enabled = false;
4341 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4342 		soc->per_tid_basize_max_tid = 8;
4343 		soc->wbm_release_desc_rx_sg_support = 1;
4344 		soc->rxdma2sw_rings_not_supported = 1;
4345 		soc->wbm_sg_last_msdu_war = 1;
4346 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
4347 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
4348 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
4349 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
4350 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
4351 						  CFG_DP_HOST_AST_DB_ENABLE);
4352 		soc->features.wds_ext_ast_override_enable = true;
4353 		break;
4354 	case TARGET_TYPE_QCA5332:
4355 	case TARGET_TYPE_QCN6432:
4356 		soc->ast_override_support = 1;
4357 		soc->da_war_enabled = false;
4358 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
4359 		soc->per_tid_basize_max_tid = 8;
4360 		soc->wbm_release_desc_rx_sg_support = 1;
4361 		soc->rxdma2sw_rings_not_supported = 1;
4362 		soc->wbm_sg_last_msdu_war = 1;
4363 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
4364 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
4365 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
4366 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
4367 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
4368 						  CFG_DP_HOST_AST_DB_ENABLE);
4369 		soc->features.wds_ext_ast_override_enable = true;
4370 		break;
4371 	default:
4372 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
4373 		qdf_assert_always(0);
4374 		break;
4375 	}
4376 	dp_soc_cfg_dump(soc, target_type);
4377 }
4378 
4379 /**
4380  * dp_soc_get_ap_mld_mode() - store ap mld mode from ini
4381  * @soc: Opaque DP SOC handle
4382  *
4383  * Return: none
4384  */
4385 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
4386 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
4387 {
4388 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
4389 		soc->mld_mode_ap =
4390 		soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4391 					CDP_CFG_MLD_NETDEV_MODE_AP);
4392 	}
4393 	qdf_info("DP mld_mode_ap-%u\n", soc->mld_mode_ap);
4394 }
4395 #else
4396 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
4397 {
4398 	(void)soc;
4399 }
4400 #endif
4401 
4402 /**
4403  * dp_soc_init() - Initialize txrx SOC
4404  * @soc: Opaque DP SOC handle
4405  * @htc_handle: Opaque HTC handle
4406  * @hif_handle: Opaque HIF handle
4407  *
4408  * Return: DP SOC handle on success, NULL on failure
4409  */
4410 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
4411 		  struct hif_opaque_softc *hif_handle)
4412 {
4413 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
4414 	bool is_monitor_mode = false;
4415 	uint8_t i;
4416 	int num_dp_msi;
4417 	bool ppeds_attached = false;
4418 
4419 	htt_soc = htt_soc_attach(soc, htc_handle);
4420 	if (!htt_soc)
4421 		goto fail1;
4422 
4423 	soc->htt_handle = htt_soc;
4424 
4425 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
4426 		goto fail2;
4427 
4428 	htt_set_htc_handle(htt_soc, htc_handle);
4429 
4430 	dp_soc_cfg_init(soc);
4431 
4432 	dp_monitor_soc_cfg_init(soc);
4433 	/* Reset/Initialize wbm sg list and flags */
4434 	dp_rx_wbm_sg_list_reset(soc);
4435 
4436 	/* Note: Any SRNG ring initialization should happen only after
4437 	 * Interrupt mode is set and followed by filling up the
4438 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
4439 	 */
4440 	dp_soc_set_interrupt_mode(soc);
4441 	if (soc->cdp_soc.ol_ops->get_con_mode &&
4442 	    soc->cdp_soc.ol_ops->get_con_mode() ==
4443 	    QDF_GLOBAL_MONITOR_MODE) {
4444 		is_monitor_mode = true;
4445 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
4446 	} else {
4447 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
4448 	}
4449 
4450 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
4451 	if (num_dp_msi < 0) {
4452 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
4453 		goto fail3;
4454 	}
4455 
4456 	if (soc->arch_ops.ppeds_handle_attached)
4457 		ppeds_attached = soc->arch_ops.ppeds_handle_attached(soc);
4458 
4459 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
4460 				     soc->intr_mode, is_monitor_mode,
4461 				     ppeds_attached,
4462 				     soc->features.umac_hw_reset_support);
4463 
4464 	/* initialize WBM_IDLE_LINK ring */
4465 	if (dp_hw_link_desc_ring_init(soc)) {
4466 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
4467 		goto fail3;
4468 	}
4469 
4470 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
4471 
4472 	if (dp_soc_srng_init(soc)) {
4473 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
4474 		goto fail4;
4475 	}
4476 
4477 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
4478 			       htt_get_htc_handle(htt_soc),
4479 			       soc->hal_soc, soc->osdev) == NULL)
4480 		goto fail5;
4481 
4482 	/* Initialize descriptors in TCL Rings */
4483 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4484 		hal_tx_init_data_ring(soc->hal_soc,
4485 				      soc->tcl_data_ring[i].hal_srng);
4486 	}
4487 
4488 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
4489 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
4490 		goto fail6;
4491 	}
4492 
4493 	if (soc->arch_ops.txrx_soc_ppeds_start) {
4494 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
4495 			dp_init_err("%pK: ppeds start failed", soc);
4496 			goto fail7;
4497 		}
4498 	}
4499 
4500 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
4501 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
4502 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4503 	wlan_cfg_set_rx_rr(soc->wlan_cfg_ctx,
4504 			   cfg_get(soc->ctrl_psoc, CFG_DP_RX_RR));
4505 #endif
4506 	soc->cce_disable = false;
4507 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
4508 
4509 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
4510 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
4511 	qdf_spinlock_create(&soc->vdev_map_lock);
4512 	qdf_atomic_init(&soc->num_tx_outstanding);
4513 	qdf_atomic_init(&soc->num_tx_exception);
4514 	soc->num_tx_allowed =
4515 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
4516 	soc->num_tx_spl_allowed =
4517 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
4518 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
4519 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
4520 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4521 				CDP_CFG_MAX_PEER_ID);
4522 
4523 		if (ret != -EINVAL)
4524 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
4525 
4526 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
4527 				CDP_CFG_CCE_DISABLE);
4528 		if (ret == 1)
4529 			soc->cce_disable = true;
4530 	}
4531 
4532 	/*
4533 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
4534 	 * and IPQ5018 WMAC2 is not there in these platforms.
4535 	 */
4536 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
4537 	    soc->disable_mac2_intr)
4538 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
4539 
4540 	/*
4541 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
4542 	 * WMAC1 is not there in this platform.
4543 	 */
4544 	if (soc->disable_mac1_intr)
4545 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
4546 
4547 	/* setup the global rx defrag waitlist */
4548 	TAILQ_INIT(&soc->rx.defrag.waitlist);
4549 	soc->rx.defrag.timeout_ms =
4550 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
4551 	soc->rx.defrag.next_flush_ms = 0;
4552 	soc->rx.flags.defrag_timeout_check =
4553 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
4554 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
4555 
4556 	dp_monitor_soc_init(soc);
4557 
4558 	qdf_atomic_set(&soc->cmn_init_done, 1);
4559 
4560 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
4561 
4562 	qdf_spinlock_create(&soc->ast_lock);
4563 	dp_peer_mec_spinlock_create(soc);
4564 
4565 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
4566 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
4567 	INIT_RX_HW_STATS_LOCK(soc);
4568 
4569 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
4570 	/* fill the tx/rx cpu ring map*/
4571 	dp_soc_set_txrx_ring_map(soc);
4572 
4573 	TAILQ_INIT(&soc->inactive_peer_list);
4574 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
4575 	TAILQ_INIT(&soc->inactive_vdev_list);
4576 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
4577 	qdf_spinlock_create(&soc->htt_stats.lock);
4578 	/* initialize work queue for stats processing */
4579 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4580 
4581 	dp_reo_desc_deferred_freelist_create(soc);
4582 
4583 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
4584 		qdf_dma_mem_stats_read(),
4585 		qdf_heap_mem_stats_read(),
4586 		qdf_skb_total_mem_stats_read());
4587 
4588 	soc->vdev_stats_id_map = 0;
4589 
4590 	dp_soc_get_ap_mld_mode(soc);
4591 
4592 	return soc;
4593 fail7:
4594 	dp_soc_tx_desc_sw_pools_deinit(soc);
4595 fail6:
4596 	htt_soc_htc_dealloc(soc->htt_handle);
4597 fail5:
4598 	dp_soc_srng_deinit(soc);
4599 fail4:
4600 	dp_hw_link_desc_ring_deinit(soc);
4601 fail3:
4602 	htt_htc_pkt_pool_free(htt_soc);
4603 fail2:
4604 	htt_soc_detach(htt_soc);
4605 fail1:
4606 	return NULL;
4607 }
4608 
4609 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
4610 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
4611 {
4612 	QDF_STATUS status;
4613 
4614 	if (soc->init_tcl_cmd_cred_ring) {
4615 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
4616 				       TCL_CMD_CREDIT, 0, 0);
4617 		if (QDF_IS_STATUS_ERROR(status))
4618 			return status;
4619 
4620 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
4621 				  soc->tcl_cmd_credit_ring.alloc_size,
4622 				  soc->ctrl_psoc,
4623 				  WLAN_MD_DP_SRNG_TCL_CMD,
4624 				  "wbm_desc_rel_ring");
4625 	}
4626 
4627 	return QDF_STATUS_SUCCESS;
4628 }
4629 
4630 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
4631 {
4632 	if (soc->init_tcl_cmd_cred_ring) {
4633 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
4634 				     soc->tcl_cmd_credit_ring.alloc_size,
4635 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
4636 				     "wbm_desc_rel_ring");
4637 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
4638 			       TCL_CMD_CREDIT, 0);
4639 	}
4640 }
4641 
4642 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
4643 {
4644 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4645 	uint32_t entries;
4646 	QDF_STATUS status;
4647 
4648 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
4649 	if (soc->init_tcl_cmd_cred_ring) {
4650 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
4651 				       TCL_CMD_CREDIT, entries, 0);
4652 		if (QDF_IS_STATUS_ERROR(status))
4653 			return status;
4654 	}
4655 
4656 	return QDF_STATUS_SUCCESS;
4657 }
4658 
4659 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
4660 {
4661 	if (soc->init_tcl_cmd_cred_ring)
4662 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
4663 }
4664 
4665 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
4666 {
4667 	if (soc->init_tcl_cmd_cred_ring)
4668 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
4669 					    soc->tcl_cmd_credit_ring.hal_srng);
4670 }
4671 #else
4672 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
4673 {
4674 	return QDF_STATUS_SUCCESS;
4675 }
4676 
4677 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
4678 {
4679 }
4680 
4681 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
4682 {
4683 	return QDF_STATUS_SUCCESS;
4684 }
4685 
4686 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
4687 {
4688 }
4689 
4690 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
4691 {
4692 }
4693 #endif
4694 
4695 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
4696 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
4697 {
4698 	QDF_STATUS status;
4699 
4700 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
4701 	if (QDF_IS_STATUS_ERROR(status))
4702 		return status;
4703 
4704 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
4705 			  soc->tcl_status_ring.alloc_size,
4706 			  soc->ctrl_psoc,
4707 			  WLAN_MD_DP_SRNG_TCL_STATUS,
4708 			  "wbm_desc_rel_ring");
4709 
4710 	return QDF_STATUS_SUCCESS;
4711 }
4712 
4713 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
4714 {
4715 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
4716 			     soc->tcl_status_ring.alloc_size,
4717 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
4718 			     "wbm_desc_rel_ring");
4719 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4720 }
4721 
4722 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
4723 {
4724 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4725 	uint32_t entries;
4726 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4727 
4728 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
4729 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
4730 			       TCL_STATUS, entries, 0);
4731 
4732 	return status;
4733 }
4734 
4735 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
4736 {
4737 	dp_srng_free(soc, &soc->tcl_status_ring);
4738 }
4739 #else
4740 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
4741 {
4742 	return QDF_STATUS_SUCCESS;
4743 }
4744 
4745 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
4746 {
4747 }
4748 
4749 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
4750 {
4751 	return QDF_STATUS_SUCCESS;
4752 }
4753 
4754 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
4755 {
4756 }
4757 #endif
4758 
4759 /**
4760  * dp_soc_srng_deinit() - de-initialize soc srng rings
4761  * @soc: Datapath soc handle
4762  *
4763  */
4764 void dp_soc_srng_deinit(struct dp_soc *soc)
4765 {
4766 	uint32_t i;
4767 
4768 	if (soc->arch_ops.txrx_soc_srng_deinit)
4769 		soc->arch_ops.txrx_soc_srng_deinit(soc);
4770 
4771 	/* Free the ring memories */
4772 	/* Common rings */
4773 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
4774 			     soc->wbm_desc_rel_ring.alloc_size,
4775 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
4776 			     "wbm_desc_rel_ring");
4777 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4778 
4779 	/* Tx data rings */
4780 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4781 		dp_deinit_tx_pair_by_index(soc, i);
4782 
4783 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4784 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
4785 		dp_ipa_deinit_alt_tx_ring(soc);
4786 	}
4787 
4788 	/* TCL command and status rings */
4789 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
4790 	dp_soc_tcl_status_srng_deinit(soc);
4791 
4792 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4793 		/* TODO: Get number of rings and ring sizes
4794 		 * from wlan_cfg
4795 		 */
4796 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
4797 				     soc->reo_dest_ring[i].alloc_size,
4798 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
4799 				     "reo_dest_ring");
4800 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
4801 	}
4802 
4803 	/* REO reinjection ring */
4804 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
4805 			     soc->reo_reinject_ring.alloc_size,
4806 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
4807 			     "reo_reinject_ring");
4808 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4809 
4810 	/* Rx release ring */
4811 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
4812 			     soc->rx_rel_ring.alloc_size,
4813 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
4814 			     "reo_release_ring");
4815 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4816 
4817 	/* Rx exception ring */
4818 	/* TODO: Better to store ring_type and ring_num in
4819 	 * dp_srng during setup
4820 	 */
4821 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
4822 			     soc->reo_exception_ring.alloc_size,
4823 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
4824 			     "reo_exception_ring");
4825 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4826 
4827 	/* REO command and status rings */
4828 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
4829 			     soc->reo_cmd_ring.alloc_size,
4830 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
4831 			     "reo_cmd_ring");
4832 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4833 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
4834 			     soc->reo_status_ring.alloc_size,
4835 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
4836 			     "reo_status_ring");
4837 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
4838 }
4839 
4840 /**
4841  * dp_soc_srng_init() - Initialize soc level srng rings
4842  * @soc: Datapath soc handle
4843  *
4844  * Return: QDF_STATUS_SUCCESS on success
4845  *	   QDF_STATUS_E_FAILURE on failure
4846  */
4847 QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
4848 {
4849 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4850 	uint8_t i;
4851 	uint8_t wbm2_sw_rx_rel_ring_id;
4852 
4853 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4854 
4855 	dp_enable_verbose_debug(soc);
4856 
4857 	/* WBM descriptor release ring */
4858 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
4859 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
4860 		goto fail1;
4861 	}
4862 
4863 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
4864 			  soc->wbm_desc_rel_ring.alloc_size,
4865 			  soc->ctrl_psoc,
4866 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
4867 			  "wbm_desc_rel_ring");
4868 
4869 	/* TCL command and status rings */
4870 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
4871 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
4872 		goto fail1;
4873 	}
4874 
4875 	if (dp_soc_tcl_status_srng_init(soc)) {
4876 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
4877 		goto fail1;
4878 	}
4879 
4880 	/* REO reinjection ring */
4881 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
4882 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
4883 		goto fail1;
4884 	}
4885 
4886 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
4887 			  soc->reo_reinject_ring.alloc_size,
4888 			  soc->ctrl_psoc,
4889 			  WLAN_MD_DP_SRNG_REO_REINJECT,
4890 			  "reo_reinject_ring");
4891 
4892 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
4893 	/* Rx release ring */
4894 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
4895 			 wbm2_sw_rx_rel_ring_id, 0)) {
4896 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
4897 		goto fail1;
4898 	}
4899 
4900 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
4901 			  soc->rx_rel_ring.alloc_size,
4902 			  soc->ctrl_psoc,
4903 			  WLAN_MD_DP_SRNG_RX_REL,
4904 			  "reo_release_ring");
4905 
4906 	/* Rx exception ring */
4907 	if (dp_srng_init(soc, &soc->reo_exception_ring,
4908 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
4909 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
4910 		goto fail1;
4911 	}
4912 
4913 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
4914 			  soc->reo_exception_ring.alloc_size,
4915 			  soc->ctrl_psoc,
4916 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
4917 			  "reo_exception_ring");
4918 
4919 	/* REO command and status rings */
4920 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
4921 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
4922 		goto fail1;
4923 	}
4924 
4925 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
4926 			  soc->reo_cmd_ring.alloc_size,
4927 			  soc->ctrl_psoc,
4928 			  WLAN_MD_DP_SRNG_REO_CMD,
4929 			  "reo_cmd_ring");
4930 
4931 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
4932 	TAILQ_INIT(&soc->rx.reo_cmd_list);
4933 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
4934 
4935 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
4936 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
4937 		goto fail1;
4938 	}
4939 
4940 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
4941 			  soc->reo_status_ring.alloc_size,
4942 			  soc->ctrl_psoc,
4943 			  WLAN_MD_DP_SRNG_REO_STATUS,
4944 			  "reo_status_ring");
4945 
4946 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4947 		if (dp_init_tx_ring_pair_by_index(soc, i))
4948 			goto fail1;
4949 	}
4950 
4951 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4952 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
4953 			goto fail1;
4954 
4955 		if (dp_ipa_init_alt_tx_ring(soc))
4956 			goto fail1;
4957 	}
4958 
4959 	dp_create_ext_stats_event(soc);
4960 
4961 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4962 		/* Initialize REO destination ring */
4963 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
4964 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
4965 			goto fail1;
4966 		}
4967 
4968 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
4969 				  soc->reo_dest_ring[i].alloc_size,
4970 				  soc->ctrl_psoc,
4971 				  WLAN_MD_DP_SRNG_REO_DEST,
4972 				  "reo_dest_ring");
4973 	}
4974 
4975 	if (soc->arch_ops.txrx_soc_srng_init) {
4976 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
4977 			dp_init_err("%pK: dp_srng_init failed for arch rings",
4978 				    soc);
4979 			goto fail1;
4980 		}
4981 	}
4982 
4983 	return QDF_STATUS_SUCCESS;
4984 fail1:
4985 	/*
4986 	 * Cleanup will be done as part of soc_detach, which will
4987 	 * be called on pdev attach failure
4988 	 */
4989 	dp_soc_srng_deinit(soc);
4990 	return QDF_STATUS_E_FAILURE;
4991 }
4992 
4993 /**
4994  * dp_soc_srng_free() - free soc level srng rings
4995  * @soc: Datapath soc handle
4996  *
4997  */
4998 void dp_soc_srng_free(struct dp_soc *soc)
4999 {
5000 	uint32_t i;
5001 
5002 	if (soc->arch_ops.txrx_soc_srng_free)
5003 		soc->arch_ops.txrx_soc_srng_free(soc);
5004 
5005 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
5006 
5007 	for (i = 0; i < soc->num_tcl_data_rings; i++)
5008 		dp_free_tx_ring_pair_by_index(soc, i);
5009 
5010 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
5011 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5012 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
5013 		dp_ipa_free_alt_tx_ring(soc);
5014 	}
5015 
5016 	dp_soc_tcl_cmd_cred_srng_free(soc);
5017 	dp_soc_tcl_status_srng_free(soc);
5018 
5019 	for (i = 0; i < soc->num_reo_dest_rings; i++)
5020 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
5021 
5022 	dp_srng_free(soc, &soc->reo_reinject_ring);
5023 	dp_srng_free(soc, &soc->rx_rel_ring);
5024 
5025 	dp_srng_free(soc, &soc->reo_exception_ring);
5026 
5027 	dp_srng_free(soc, &soc->reo_cmd_ring);
5028 	dp_srng_free(soc, &soc->reo_status_ring);
5029 }
5030 
5031 /**
5032  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
5033  * @soc: Datapath soc handle
5034  *
5035  * Return: QDF_STATUS_SUCCESS on success
5036  *	   QDF_STATUS_E_NOMEM on failure
5037  */
5038 QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
5039 {
5040 	uint32_t entries;
5041 	uint32_t i;
5042 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5043 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
5044 	uint32_t reo_dst_ring_size;
5045 
5046 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5047 
5048 	/* sw2wbm link descriptor release ring */
5049 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
5050 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
5051 			  entries, 0)) {
5052 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
5053 		goto fail1;
5054 	}
5055 
5056 	/* TCL command and status rings */
5057 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
5058 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
5059 		goto fail1;
5060 	}
5061 
5062 	if (dp_soc_tcl_status_srng_alloc(soc)) {
5063 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
5064 		goto fail1;
5065 	}
5066 
5067 	/* REO reinjection ring */
5068 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
5069 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
5070 			  entries, 0)) {
5071 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
5072 		goto fail1;
5073 	}
5074 
5075 	/* Rx release ring */
5076 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
5077 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
5078 			  entries, 0)) {
5079 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
5080 		goto fail1;
5081 	}
5082 
5083 	/* Rx exception ring */
5084 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
5085 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
5086 			  entries, 0)) {
5087 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
5088 		goto fail1;
5089 	}
5090 
5091 	/* REO command and status rings */
5092 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
5093 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
5094 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
5095 		goto fail1;
5096 	}
5097 
5098 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
5099 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
5100 			  entries, 0)) {
5101 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
5102 		goto fail1;
5103 	}
5104 
5105 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
5106 
5107 	/* Disable cached desc if NSS offload is enabled */
5108 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
5109 		cached = 0;
5110 
5111 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
5112 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
5113 			goto fail1;
5114 	}
5115 
5116 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
5117 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5118 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
5119 			goto fail1;
5120 
5121 		if (dp_ipa_alloc_alt_tx_ring(soc))
5122 			goto fail1;
5123 	}
5124 
5125 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
5126 		/* Setup REO destination ring */
5127 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
5128 				  reo_dst_ring_size, cached)) {
5129 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
5130 			goto fail1;
5131 		}
5132 	}
5133 
5134 	if (soc->arch_ops.txrx_soc_srng_alloc) {
5135 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
5136 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
5137 				    soc);
5138 			goto fail1;
5139 		}
5140 	}
5141 
5142 	return QDF_STATUS_SUCCESS;
5143 
5144 fail1:
5145 	dp_soc_srng_free(soc);
5146 	return QDF_STATUS_E_NOMEM;
5147 }
5148 
5149 /**
5150  * dp_soc_cfg_attach() - set target specific configuration in
5151  *			 dp soc cfg.
5152  * @soc: dp soc handle
5153  */
5154 void dp_soc_cfg_attach(struct dp_soc *soc)
5155 {
5156 	int target_type;
5157 	int nss_cfg = 0;
5158 
5159 	target_type = hal_get_target_type(soc->hal_soc);
5160 	switch (target_type) {
5161 	case TARGET_TYPE_QCA6290:
5162 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
5163 					       REO_DST_RING_SIZE_QCA6290);
5164 		break;
5165 	case TARGET_TYPE_QCA6390:
5166 	case TARGET_TYPE_QCA6490:
5167 	case TARGET_TYPE_QCA6750:
5168 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
5169 					       REO_DST_RING_SIZE_QCA6290);
5170 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5171 		break;
5172 	case TARGET_TYPE_KIWI:
5173 	case TARGET_TYPE_MANGO:
5174 	case TARGET_TYPE_PEACH:
5175 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5176 		break;
5177 	case TARGET_TYPE_QCA8074:
5178 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5179 		break;
5180 	case TARGET_TYPE_QCA8074V2:
5181 	case TARGET_TYPE_QCA6018:
5182 	case TARGET_TYPE_QCA9574:
5183 	case TARGET_TYPE_QCN6122:
5184 	case TARGET_TYPE_QCA5018:
5185 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5186 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5187 		break;
5188 	case TARGET_TYPE_QCN9160:
5189 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5190 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
5191 		break;
5192 	case TARGET_TYPE_QCN9000:
5193 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5194 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5195 		break;
5196 	case TARGET_TYPE_QCN9224:
5197 	case TARGET_TYPE_QCA5332:
5198 	case TARGET_TYPE_QCN6432:
5199 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
5200 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
5201 		break;
5202 	default:
5203 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
5204 		qdf_assert_always(0);
5205 		break;
5206 	}
5207 
5208 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
5209 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
5210 
5211 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
5212 
5213 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
5214 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
5215 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
5216 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
5217 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
5218 		soc->init_tcl_cmd_cred_ring = false;
5219 		soc->num_tcl_data_rings =
5220 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
5221 		soc->num_reo_dest_rings =
5222 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
5223 
5224 	} else {
5225 		soc->init_tcl_cmd_cred_ring = true;
5226 		soc->num_tx_comp_rings =
5227 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
5228 		soc->num_tcl_data_rings =
5229 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
5230 		soc->num_reo_dest_rings =
5231 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
5232 	}
5233 
5234 }
5235 
5236 void dp_pdev_set_default_reo(struct dp_pdev *pdev)
5237 {
5238 	struct dp_soc *soc = pdev->soc;
5239 
5240 	switch (pdev->pdev_id) {
5241 	case 0:
5242 		pdev->reo_dest =
5243 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
5244 		break;
5245 
5246 	case 1:
5247 		pdev->reo_dest =
5248 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
5249 		break;
5250 
5251 	case 2:
5252 		pdev->reo_dest =
5253 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
5254 		break;
5255 
5256 	default:
5257 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
5258 			    soc, pdev->pdev_id);
5259 		break;
5260 	}
5261 }
5262 
5263