xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rings_main.c (revision fb436899e24ed79fc745209e906f95145a787017)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_rings.h"
34 #include "dp_internal.h"
35 #include "dp_tx.h"
36 #include "dp_tx_desc.h"
37 #include "dp_rx.h"
38 #ifdef DP_RATETABLE_SUPPORT
39 #include "dp_ratetable.h"
40 #endif
41 #include <cdp_txrx_handle.h>
42 #include <wlan_cfg.h>
43 #include <wlan_utility.h>
44 #include "cdp_txrx_cmn_struct.h"
45 #include "cdp_txrx_stats_struct.h"
46 #include "cdp_txrx_cmn_reg.h"
47 #include <qdf_util.h>
48 #include "dp_peer.h"
49 #include "htt_stats.h"
50 #include "dp_htt.h"
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 #include <wlan_module_ids.h>
55 
56 #ifdef WIFI_MONITOR_SUPPORT
57 #include <dp_mon.h>
58 #endif
59 
60 #ifdef WLAN_FEATURE_STATS_EXT
61 #define INIT_RX_HW_STATS_LOCK(_soc) \
62 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
63 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
64 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
65 #else
66 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
67 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
68 #endif
69 
70 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
71 						uint8_t index);
72 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
73 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
74 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
75 						 uint8_t index);
76 
77 /* default_dscp_tid_map - Default DSCP-TID mapping
78  *
79  * DSCP        TID
80  * 000000      0
81  * 001000      1
82  * 010000      2
83  * 011000      3
84  * 100000      4
85  * 101000      5
86  * 110000      6
87  * 111000      7
88  */
89 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
90 	0, 0, 0, 0, 0, 0, 0, 0,
91 	1, 1, 1, 1, 1, 1, 1, 1,
92 	2, 2, 2, 2, 2, 2, 2, 2,
93 	3, 3, 3, 3, 3, 3, 3, 3,
94 	4, 4, 4, 4, 4, 4, 4, 4,
95 	5, 5, 5, 5, 5, 5, 5, 5,
96 	6, 6, 6, 6, 6, 6, 6, 6,
97 	7, 7, 7, 7, 7, 7, 7, 7,
98 };
99 
100 /* default_pcp_tid_map - Default PCP-TID mapping
101  *
102  * PCP     TID
103  * 000      0
104  * 001      1
105  * 010      2
106  * 011      3
107  * 100      4
108  * 101      5
109  * 110      6
110  * 111      7
111  */
112 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
113 	0, 1, 2, 3, 4, 5, 6, 7,
114 };
115 
116 uint8_t
117 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
118 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
119 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
120 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
121 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
122 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
123 #ifdef WLAN_TX_PKT_CAPTURE_ENH
124 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
125 #endif
126 };
127 
128 qdf_export_symbol(dp_cpu_ring_map);
129 
130 /**
131  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
132  * @soc: DP soc handle
133  * @ring_type: ring type
134  * @ring_num: ring_num
135  *
136  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
137  */
138 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
139 					    enum hal_ring_type ring_type,
140 					    int ring_num)
141 {
142 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
143 	uint8_t status = 0;
144 
145 	switch (ring_type) {
146 	case WBM2SW_RELEASE:
147 	case REO_DST:
148 	case RXDMA_BUF:
149 	case REO_EXCEPTION:
150 		status = ((nss_config) & (1 << ring_num));
151 		break;
152 	default:
153 		break;
154 	}
155 
156 	return status;
157 }
158 
159 #if !defined(DP_CON_MON)
160 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
161 {
162 	int i;
163 
164 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
165 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
166 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
167 	}
168 }
169 
170 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
171 
172 void dp_service_lmac_rings(void *arg)
173 {
174 	struct dp_soc *soc = (struct dp_soc *)arg;
175 	int ring = 0, i;
176 	struct dp_pdev *pdev = NULL;
177 	union dp_rx_desc_list_elem_t *desc_list = NULL;
178 	union dp_rx_desc_list_elem_t *tail = NULL;
179 
180 	/* Process LMAC interrupts */
181 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
182 		int mac_for_pdev = ring;
183 		struct dp_srng *rx_refill_buf_ring;
184 
185 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
186 		if (!pdev)
187 			continue;
188 
189 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
190 
191 		dp_monitor_process(soc, NULL, mac_for_pdev,
192 				   QCA_NAPI_BUDGET);
193 
194 		for (i = 0;
195 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
196 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
197 					     mac_for_pdev,
198 					     QCA_NAPI_BUDGET);
199 
200 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
201 						  mac_for_pdev))
202 			dp_rx_buffers_replenish(soc, mac_for_pdev,
203 						rx_refill_buf_ring,
204 						&soc->rx_desc_buf[mac_for_pdev],
205 						0, &desc_list, &tail, false);
206 	}
207 
208 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
209 }
210 
211 #endif
212 
213 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
214 /**
215  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
216  *				rx_near_full_grp1 mask
217  * @soc: Datapath SoC Handle
218  * @ring_num: REO ring number
219  *
220  * Return: 1 if the ring_num belongs to reo_nf_grp1,
221  *	   0, otherwise.
222  */
223 static inline int
224 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
225 {
226 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
227 }
228 
229 /**
230  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
231  *				rx_near_full_grp2 mask
232  * @soc: Datapath SoC Handle
233  * @ring_num: REO ring number
234  *
235  * Return: 1 if the ring_num belongs to reo_nf_grp2,
236  *	   0, otherwise.
237  */
238 static inline int
239 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
240 {
241 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
242 }
243 
244 /**
245  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
246  *				ring type and number
247  * @soc: Datapath SoC handle
248  * @ring_type: SRNG type
249  * @ring_num: ring num
250  *
251  * Return: near-full irq mask pointer
252  */
253 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
254 					enum hal_ring_type ring_type,
255 					int ring_num)
256 {
257 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
258 	uint8_t wbm2_sw_rx_rel_ring_id;
259 	uint8_t *nf_irq_mask = NULL;
260 
261 	switch (ring_type) {
262 	case WBM2SW_RELEASE:
263 		wbm2_sw_rx_rel_ring_id =
264 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
265 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
266 			nf_irq_mask = &soc->wlan_cfg_ctx->
267 					int_tx_ring_near_full_irq_mask[0];
268 		}
269 		break;
270 	case REO_DST:
271 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
272 			nf_irq_mask =
273 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
274 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
275 			nf_irq_mask =
276 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
277 		else
278 			qdf_assert(0);
279 		break;
280 	default:
281 		break;
282 	}
283 
284 	return nf_irq_mask;
285 }
286 
287 /**
288  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
289  * @soc: Datapath SoC handle
290  * @ring_params: srng params handle
291  * @msi2_addr: MSI2 addr to be set for the SRNG
292  * @msi2_data: MSI2 data to be set for the SRNG
293  *
294  * Return: None
295  */
296 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
297 				  struct hal_srng_params *ring_params,
298 				  qdf_dma_addr_t msi2_addr,
299 				  uint32_t msi2_data)
300 {
301 	ring_params->msi2_addr = msi2_addr;
302 	ring_params->msi2_data = msi2_data;
303 }
304 
305 /**
306  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
307  * @soc: Datapath SoC handle
308  * @ring_params: ring_params for SRNG
309  * @ring_type: SENG type
310  * @ring_num: ring number for the SRNG
311  * @nf_msi_grp_num: near full msi group number
312  *
313  * Return: None
314  */
315 void dp_srng_msi2_setup(struct dp_soc *soc,
316 			struct hal_srng_params *ring_params,
317 			int ring_type, int ring_num, int nf_msi_grp_num)
318 {
319 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
320 	int msi_data_count, ret;
321 
322 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
323 					  &msi_data_count, &msi_data_start,
324 					  &msi_irq_start);
325 	if (ret)
326 		return;
327 
328 	if (nf_msi_grp_num < 0) {
329 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
330 			     soc, ring_type, ring_num);
331 		ring_params->msi2_addr = 0;
332 		ring_params->msi2_data = 0;
333 		return;
334 	}
335 
336 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
337 					   msi_data_count)) {
338 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
339 			     soc, nf_msi_grp_num);
340 		QDF_ASSERT(0);
341 	}
342 
343 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
344 
345 	ring_params->nf_irq_support = 1;
346 	ring_params->msi2_addr = addr_low;
347 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
348 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
349 		+ msi_data_start;
350 	ring_params->flags |= HAL_SRNG_MSI_INTR;
351 }
352 
353 /* Percentage of ring entries considered as nearly full */
354 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
355 /* Percentage of ring entries considered as critically full */
356 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
357 /* Percentage of ring entries considered as safe threshold */
358 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
359 
360 /**
361  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
362  *			near full irq
363  * @soc: Datapath SoC handle
364  * @ring_params: ring params for SRNG
365  * @ring_type: ring type
366  */
367 void
368 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
369 					  struct hal_srng_params *ring_params,
370 					  int ring_type)
371 {
372 	if (ring_params->nf_irq_support) {
373 		ring_params->high_thresh = (ring_params->num_entries *
374 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
375 		ring_params->crit_thresh = (ring_params->num_entries *
376 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
377 		ring_params->safe_thresh = (ring_params->num_entries *
378 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
379 	}
380 }
381 
382 /**
383  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
384  *			structure from the ring params
385  * @soc: Datapath SoC handle
386  * @srng: SRNG handle
387  * @ring_params: ring params for a SRNG
388  *
389  * Return: None
390  */
391 static inline void
392 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
393 			  struct hal_srng_params *ring_params)
394 {
395 	srng->crit_thresh = ring_params->crit_thresh;
396 	srng->safe_thresh = ring_params->safe_thresh;
397 }
398 
399 #else
400 static inline void
401 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
402 			  struct hal_srng_params *ring_params)
403 {
404 }
405 #endif
406 
407 /**
408  * dp_get_num_msi_available()- API to get number of MSIs available
409  * @soc: DP soc Handle
410  * @interrupt_mode: Mode of interrupts
411  *
412  * Return: Number of MSIs available or 0 in case of integrated
413  */
414 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
415 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
416 {
417 	return 0;
418 }
419 #else
420 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
421 {
422 	int msi_data_count;
423 	int msi_data_start;
424 	int msi_irq_start;
425 	int ret;
426 
427 	if (interrupt_mode == DP_INTR_INTEGRATED) {
428 		return 0;
429 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
430 		   DP_INTR_POLL) {
431 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
432 						  &msi_data_count,
433 						  &msi_data_start,
434 						  &msi_irq_start);
435 		if (ret) {
436 			qdf_err("Unable to get DP MSI assignment %d",
437 				interrupt_mode);
438 			return -EINVAL;
439 		}
440 		return msi_data_count;
441 	}
442 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
443 	return -EINVAL;
444 }
445 #endif
446 
447 /**
448  * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
449  * update threshold value from wlan_cfg_ctx
450  * @soc: device handle
451  * @ring_params: per ring specific parameters
452  * @ring_type: Ring type
453  * @ring_num: Ring number for a given ring type
454  * @num_entries: number of entries to fill
455  *
456  * Fill the ring params with the pointer update threshold
457  * configuration parameters available in wlan_cfg_ctx
458  *
459  * Return: None
460  */
461 static void
462 dp_srng_configure_pointer_update_thresholds(
463 				struct dp_soc *soc,
464 				struct hal_srng_params *ring_params,
465 				int ring_type, int ring_num,
466 				int num_entries)
467 {
468 	if (ring_type == REO_DST) {
469 		ring_params->pointer_timer_threshold =
470 			wlan_cfg_get_pointer_timer_threshold_rx(
471 						soc->wlan_cfg_ctx);
472 		ring_params->pointer_num_threshold =
473 			wlan_cfg_get_pointer_num_threshold_rx(
474 						soc->wlan_cfg_ctx);
475 	}
476 }
477 
478 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
479 			    int ring_type, int ring_num, int mac_id,
480 			    uint32_t idx)
481 {
482 	bool idle_check;
483 
484 	hal_soc_handle_t hal_soc = soc->hal_soc;
485 	struct hal_srng_params ring_params;
486 
487 	if (srng->hal_srng) {
488 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
489 			    soc, ring_type, ring_num);
490 		return QDF_STATUS_SUCCESS;
491 	}
492 
493 	/* memset the srng ring to zero */
494 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
495 
496 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
497 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
498 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
499 
500 	ring_params.num_entries = srng->num_entries;
501 
502 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
503 		ring_type, ring_num,
504 		(void *)ring_params.ring_base_vaddr,
505 		(void *)ring_params.ring_base_paddr,
506 		ring_params.num_entries);
507 
508 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
509 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
510 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
511 				 ring_type, ring_num);
512 	} else {
513 		ring_params.msi_data = 0;
514 		ring_params.msi_addr = 0;
515 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
516 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
517 				 ring_type, ring_num);
518 	}
519 
520 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
521 					       ring_type, ring_num,
522 					       srng->num_entries);
523 
524 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
525 	dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
526 						    ring_type, ring_num,
527 						    srng->num_entries);
528 
529 	if (srng->cached)
530 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
531 
532 	idle_check = dp_check_umac_reset_in_progress(soc);
533 
534 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
535 					    mac_id, &ring_params, idle_check,
536 					    idx);
537 
538 	if (!srng->hal_srng) {
539 		dp_srng_free(soc, srng);
540 		return QDF_STATUS_E_FAILURE;
541 	}
542 
543 	return QDF_STATUS_SUCCESS;
544 }
545 
546 qdf_export_symbol(dp_srng_init_idx);
547 
548 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
549 /**
550  * dp_service_near_full_srngs() - Bottom half handler to process the near
551  *				full IRQ on a SRNG
552  * @dp_ctx: Datapath SoC handle
553  * @dp_budget: Number of SRNGs which can be processed in a single attempt
554  *		without rescheduling
555  * @cpu: cpu id
556  *
557  * Return: remaining budget/quota for the soc device
558  */
559 static
560 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
561 {
562 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
563 	struct dp_soc *soc = int_ctx->soc;
564 
565 	/*
566 	 * dp_service_near_full_srngs arch ops should be initialized always
567 	 * if the NEAR FULL IRQ feature is enabled.
568 	 */
569 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
570 							dp_budget);
571 }
572 #endif
573 
574 #ifndef QCA_HOST_MODE_WIFI_DISABLED
575 
576 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
577 {
578 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
579 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
580 	struct dp_soc *soc = int_ctx->soc;
581 	int ring = 0;
582 	int index;
583 	uint32_t work_done  = 0;
584 	int budget = dp_budget;
585 	uint32_t remaining_quota = dp_budget;
586 	uint8_t tx_mask = 0;
587 	uint8_t rx_mask = 0;
588 	uint8_t rx_err_mask = 0;
589 	uint8_t rx_wbm_rel_mask = 0;
590 	uint8_t reo_status_mask = 0;
591 
592 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
593 
594 	tx_mask = int_ctx->tx_ring_mask;
595 	rx_mask = int_ctx->rx_ring_mask;
596 	rx_err_mask = int_ctx->rx_err_ring_mask;
597 	rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
598 	reo_status_mask = int_ctx->reo_status_ring_mask;
599 
600 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x",
601 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
602 			 reo_status_mask,
603 			 int_ctx->rx_mon_ring_mask,
604 			 int_ctx->host2rxdma_ring_mask,
605 			 int_ctx->rxdma2host_ring_mask);
606 
607 	/* Process Tx completion interrupts first to return back buffers */
608 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
609 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
610 			continue;
611 		work_done = dp_tx_comp_handler(int_ctx,
612 					       soc,
613 					       soc->tx_comp_ring[index].hal_srng,
614 					       index, remaining_quota);
615 		if (work_done) {
616 			intr_stats->num_tx_ring_masks[index]++;
617 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
618 					 tx_mask, index, budget,
619 					 work_done);
620 		}
621 		budget -= work_done;
622 		if (budget <= 0)
623 			goto budget_done;
624 
625 		remaining_quota = budget;
626 	}
627 
628 	/* Process REO Exception ring interrupt */
629 	if (rx_err_mask) {
630 		work_done = dp_rx_err_process(int_ctx, soc,
631 					      soc->reo_exception_ring.hal_srng,
632 					      remaining_quota);
633 
634 		if (work_done) {
635 			intr_stats->num_rx_err_ring_masks++;
636 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
637 					 work_done, budget);
638 		}
639 
640 		budget -=  work_done;
641 		if (budget <= 0) {
642 			goto budget_done;
643 		}
644 		remaining_quota = budget;
645 	}
646 
647 	/* Process Rx WBM release ring interrupt */
648 	if (rx_wbm_rel_mask) {
649 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
650 						  soc->rx_rel_ring.hal_srng,
651 						  remaining_quota);
652 
653 		if (work_done) {
654 			intr_stats->num_rx_wbm_rel_ring_masks++;
655 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
656 					 work_done, budget);
657 		}
658 
659 		budget -=  work_done;
660 		if (budget <= 0) {
661 			goto budget_done;
662 		}
663 		remaining_quota = budget;
664 	}
665 
666 	/* Process Rx interrupts */
667 	if (rx_mask) {
668 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
669 			if (!(rx_mask & (1 << ring)))
670 				continue;
671 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
672 						  soc->reo_dest_ring[ring].hal_srng,
673 						  ring,
674 						  remaining_quota);
675 			if (work_done) {
676 				intr_stats->num_rx_ring_masks[ring]++;
677 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
678 						 rx_mask, ring,
679 						 work_done, budget);
680 				budget -=  work_done;
681 				if (budget <= 0)
682 					goto budget_done;
683 				remaining_quota = budget;
684 			}
685 		}
686 	}
687 
688 	if (reo_status_mask) {
689 		if (dp_reo_status_ring_handler(int_ctx, soc))
690 			int_ctx->intr_stats.num_reo_status_ring_masks++;
691 	}
692 
693 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
694 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
695 		if (work_done) {
696 			budget -=  work_done;
697 			if (budget <= 0)
698 				goto budget_done;
699 			remaining_quota = budget;
700 		}
701 	}
702 
703 	qdf_lro_flush(int_ctx->lro_ctx);
704 	intr_stats->num_masks++;
705 
706 budget_done:
707 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
708 
709 	if (soc->notify_fw_callback)
710 		soc->notify_fw_callback(soc);
711 
712 	return dp_budget - budget;
713 }
714 
715 #else /* QCA_HOST_MODE_WIFI_DISABLED */
716 
717 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
718 {
719 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
720 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
721 	struct dp_soc *soc = int_ctx->soc;
722 	uint32_t remaining_quota = dp_budget;
723 	uint32_t work_done  = 0;
724 	int budget = dp_budget;
725 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
726 
727 	if (reo_status_mask) {
728 		if (dp_reo_status_ring_handler(int_ctx, soc))
729 			int_ctx->intr_stats.num_reo_status_ring_masks++;
730 	}
731 
732 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
733 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
734 		if (work_done) {
735 			budget -=  work_done;
736 			if (budget <= 0)
737 				goto budget_done;
738 			remaining_quota = budget;
739 		}
740 	}
741 
742 	qdf_lro_flush(int_ctx->lro_ctx);
743 	intr_stats->num_masks++;
744 
745 budget_done:
746 	return dp_budget - budget;
747 }
748 
749 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
750 
751 QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
752 {
753 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
754 	int i;
755 	int lmac_id = 0;
756 
757 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
758 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
759 	soc->intr_mode = DP_INTR_POLL;
760 
761 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
762 		soc->intr_ctx[i].dp_intr_id = i;
763 		soc->intr_ctx[i].tx_ring_mask =
764 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
765 		soc->intr_ctx[i].rx_ring_mask =
766 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
767 		soc->intr_ctx[i].rx_mon_ring_mask =
768 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
769 		soc->intr_ctx[i].rx_err_ring_mask =
770 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
771 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
772 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
773 		soc->intr_ctx[i].reo_status_ring_mask =
774 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
775 		soc->intr_ctx[i].rxdma2host_ring_mask =
776 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
777 		soc->intr_ctx[i].soc = soc;
778 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
779 
780 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
781 			hif_event_history_init(soc->hif_handle, i);
782 			soc->mon_intr_id_lmac_map[lmac_id] = i;
783 			lmac_id++;
784 		}
785 	}
786 
787 	qdf_timer_init(soc->osdev, &soc->int_timer,
788 		       dp_interrupt_timer, (void *)soc,
789 		       QDF_TIMER_TYPE_WAKE_APPS);
790 
791 	return QDF_STATUS_SUCCESS;
792 }
793 
794 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
795 /**
796  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
797  * @soc: DP soc handle
798  * @num_irq: IRQ number
799  * @irq_id_map: IRQ map
800  * @intr_id: interrupt context ID
801  *
802  * Return: 0 for success. nonzero for failure.
803  */
804 static inline int
805 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
806 				  int irq_id_map[], int intr_id)
807 {
808 	return hif_register_ext_group(soc->hif_handle,
809 				      num_irq, irq_id_map,
810 				      dp_service_near_full_srngs,
811 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
812 				      HIF_EXEC_NAPI_TYPE,
813 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
814 }
815 #else
816 static inline int
817 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
818 				  int *irq_id_map, int intr_id)
819 {
820 	return 0;
821 }
822 #endif
823 
824 #ifdef DP_CON_MON_MSI_SKIP_SET
825 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
826 {
827 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
828 		 QDF_GLOBAL_MONITOR_MODE &&
829 		 !wlan_cfg_get_local_pkt_capture(soc->wlan_cfg_ctx));
830 }
831 #else
832 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
833 {
834 	return false;
835 }
836 #endif
837 
838 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
839 {
840 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
841 	int i;
842 
843 	if (soc->intr_mode == DP_INTR_POLL) {
844 		qdf_timer_free(&soc->int_timer);
845 	} else {
846 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
847 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
848 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
849 	}
850 
851 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
852 		soc->intr_ctx[i].tx_ring_mask = 0;
853 		soc->intr_ctx[i].rx_ring_mask = 0;
854 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
855 		soc->intr_ctx[i].rx_err_ring_mask = 0;
856 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
857 		soc->intr_ctx[i].reo_status_ring_mask = 0;
858 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
859 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
860 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
861 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
862 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
863 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
864 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
865 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
866 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
867 
868 		hif_event_history_deinit(soc->hif_handle, i);
869 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
870 	}
871 
872 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
873 		    sizeof(soc->mon_intr_id_lmac_map),
874 		    DP_MON_INVALID_LMAC_ID);
875 }
876 
877 QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
878 {
879 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
880 
881 	int i = 0;
882 	int num_irq = 0;
883 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
884 	int lmac_id = 0;
885 	int napi_scale;
886 
887 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
888 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
889 
890 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
891 		int ret = 0;
892 
893 		/* Map of IRQ ids registered with one interrupt context */
894 		int irq_id_map[HIF_MAX_GRP_IRQ];
895 
896 		int tx_mask =
897 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
898 		int rx_mask =
899 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
900 		int rx_mon_mask =
901 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
902 		int tx_mon_ring_mask =
903 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
904 		int rx_err_ring_mask =
905 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
906 		int rx_wbm_rel_ring_mask =
907 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
908 		int reo_status_ring_mask =
909 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
910 		int rxdma2host_ring_mask =
911 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
912 		int host2rxdma_ring_mask =
913 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
914 		int host2rxdma_mon_ring_mask =
915 			wlan_cfg_get_host2rxdma_mon_ring_mask(
916 				soc->wlan_cfg_ctx, i);
917 		int rx_near_full_grp_1_mask =
918 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
919 							     i);
920 		int rx_near_full_grp_2_mask =
921 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
922 							     i);
923 		int tx_ring_near_full_mask =
924 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
925 							    i);
926 		int host2txmon_ring_mask =
927 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
928 		int umac_reset_intr_mask =
929 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
930 
931 		if (dp_skip_rx_mon_ring_mask_set(soc))
932 			rx_mon_mask = 0;
933 
934 		soc->intr_ctx[i].dp_intr_id = i;
935 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
936 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
937 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
938 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
939 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
940 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
941 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
942 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
943 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
944 			 host2rxdma_mon_ring_mask;
945 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
946 						rx_near_full_grp_1_mask;
947 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
948 						rx_near_full_grp_2_mask;
949 		soc->intr_ctx[i].tx_ring_near_full_mask =
950 						tx_ring_near_full_mask;
951 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
952 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
953 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
954 
955 		soc->intr_ctx[i].soc = soc;
956 
957 		num_irq = 0;
958 
959 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
960 					       &num_irq);
961 
962 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
963 		    tx_ring_near_full_mask) {
964 			dp_soc_near_full_interrupt_attach(soc, num_irq,
965 							  irq_id_map, i);
966 		} else {
967 			napi_scale = wlan_cfg_get_napi_scale_factor(
968 							    soc->wlan_cfg_ctx);
969 			if (!napi_scale)
970 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
971 
972 			ret = hif_register_ext_group(soc->hif_handle,
973 				num_irq, irq_id_map, dp_service_srngs_wrapper,
974 				&soc->intr_ctx[i], "dp_intr",
975 				HIF_EXEC_NAPI_TYPE, napi_scale);
976 		}
977 
978 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
979 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
980 
981 		if (ret) {
982 			dp_init_err("%pK: failed, ret = %d", soc, ret);
983 			dp_soc_interrupt_detach(txrx_soc);
984 			return QDF_STATUS_E_FAILURE;
985 		}
986 
987 		hif_event_history_init(soc->hif_handle, i);
988 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
989 
990 		if (rx_err_ring_mask)
991 			rx_err_ring_intr_ctxt_id = i;
992 
993 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
994 			soc->mon_intr_id_lmac_map[lmac_id] = i;
995 			lmac_id++;
996 		}
997 	}
998 
999 	hif_configure_ext_group_interrupts(soc->hif_handle);
1000 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
1001 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
1002 						  rx_err_ring_intr_ctxt_id, 0);
1003 
1004 	return QDF_STATUS_SUCCESS;
1005 }
1006 
1007 #define AVG_MAX_MPDUS_PER_TID 128
1008 #define AVG_TIDS_PER_CLIENT 2
1009 #define AVG_FLOWS_PER_TID 2
1010 #define AVG_MSDUS_PER_FLOW 128
1011 #define AVG_MSDUS_PER_MPDU 4
1012 
1013 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
1014 {
1015 	struct qdf_mem_multi_page_t *pages;
1016 
1017 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1018 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1019 	} else {
1020 		pages = &soc->link_desc_pages;
1021 	}
1022 
1023 	if (!pages) {
1024 		dp_err("can not get link desc pages");
1025 		QDF_ASSERT(0);
1026 		return;
1027 	}
1028 
1029 	if (pages->dma_pages) {
1030 		wlan_minidump_remove((void *)
1031 				     pages->dma_pages->page_v_addr_start,
1032 				     pages->num_pages * pages->page_size,
1033 				     soc->ctrl_psoc,
1034 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1035 				     "hw_link_desc_bank");
1036 		dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE,
1037 					     pages, 0, false);
1038 	}
1039 }
1040 
1041 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
1042 
1043 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
1044 {
1045 	hal_soc_handle_t hal_soc = soc->hal_soc;
1046 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1047 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1048 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1049 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
1050 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
1051 	uint32_t num_mpdu_links_per_queue_desc =
1052 		hal_num_mpdu_links_per_queue_desc(hal_soc);
1053 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1054 	uint32_t *total_link_descs, total_mem_size;
1055 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1056 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1057 	uint32_t num_entries;
1058 	struct qdf_mem_multi_page_t *pages;
1059 	struct dp_srng *dp_srng;
1060 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
1061 
1062 	/* Only Tx queue descriptors are allocated from common link descriptor
1063 	 * pool Rx queue descriptors are not included in this because (REO queue
1064 	 * extension descriptors) they are expected to be allocated contiguously
1065 	 * with REO queue descriptors
1066 	 */
1067 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1068 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1069 		/* dp_monitor_get_link_desc_pages returns NULL only
1070 		 * if monitor SOC is  NULL
1071 		 */
1072 		if (!pages) {
1073 			dp_err("can not get link desc pages");
1074 			QDF_ASSERT(0);
1075 			return QDF_STATUS_E_FAULT;
1076 		}
1077 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
1078 		num_entries = dp_srng->alloc_size /
1079 			hal_srng_get_entrysize(soc->hal_soc,
1080 					       RXDMA_MONITOR_DESC);
1081 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
1082 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
1083 			      MINIDUMP_STR_SIZE);
1084 	} else {
1085 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1086 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1087 
1088 		num_mpdu_queue_descs = num_mpdu_link_descs /
1089 			num_mpdu_links_per_queue_desc;
1090 
1091 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1092 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1093 			num_msdus_per_link_desc;
1094 
1095 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1096 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1097 
1098 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1099 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1100 
1101 		pages = &soc->link_desc_pages;
1102 		total_link_descs = &soc->total_link_descs;
1103 		qdf_str_lcopy(minidump_str, "link_desc_bank",
1104 			      MINIDUMP_STR_SIZE);
1105 	}
1106 
1107 	/* If link descriptor banks are allocated, return from here */
1108 	if (pages->num_pages)
1109 		return QDF_STATUS_SUCCESS;
1110 
1111 	/* Round up to power of 2 */
1112 	*total_link_descs = 1;
1113 	while (*total_link_descs < num_entries)
1114 		*total_link_descs <<= 1;
1115 
1116 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
1117 		     soc, *total_link_descs, link_desc_size);
1118 	total_mem_size =  *total_link_descs * link_desc_size;
1119 	total_mem_size += link_desc_align;
1120 
1121 	dp_init_info("%pK: total_mem_size: %d",
1122 		     soc, total_mem_size);
1123 
1124 	dp_set_max_page_size(pages, max_alloc_size);
1125 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE,
1126 				      pages,
1127 				      link_desc_size,
1128 				      *total_link_descs,
1129 				      0, false);
1130 	if (!pages->num_pages) {
1131 		dp_err("Multi page alloc fail for hw link desc pool");
1132 		return QDF_STATUS_E_FAULT;
1133 	}
1134 
1135 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
1136 			  pages->num_pages * pages->page_size,
1137 			  soc->ctrl_psoc,
1138 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1139 			  "hw_link_desc_bank");
1140 
1141 	return QDF_STATUS_SUCCESS;
1142 }
1143 
1144 void dp_hw_link_desc_ring_free(struct dp_soc *soc)
1145 {
1146 	uint32_t i;
1147 	uint32_t size = soc->wbm_idle_scatter_buf_size;
1148 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
1149 	qdf_dma_addr_t paddr;
1150 
1151 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
1152 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1153 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
1154 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
1155 			if (vaddr) {
1156 				qdf_mem_free_consistent(soc->osdev,
1157 							soc->osdev->dev,
1158 							size,
1159 							vaddr,
1160 							paddr,
1161 							0);
1162 				vaddr = NULL;
1163 			}
1164 		}
1165 	} else {
1166 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
1167 				     soc->wbm_idle_link_ring.alloc_size,
1168 				     soc->ctrl_psoc,
1169 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1170 				     "wbm_idle_link_ring");
1171 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
1172 	}
1173 }
1174 
1175 QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
1176 {
1177 	uint32_t entry_size, i;
1178 	uint32_t total_mem_size;
1179 	qdf_dma_addr_t *baseaddr = NULL;
1180 	struct dp_srng *dp_srng;
1181 	uint32_t ring_type;
1182 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1183 	uint32_t tlds;
1184 
1185 	ring_type = WBM_IDLE_LINK;
1186 	dp_srng = &soc->wbm_idle_link_ring;
1187 	tlds = soc->total_link_descs;
1188 
1189 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
1190 	total_mem_size = entry_size * tlds;
1191 
1192 	if (total_mem_size <= max_alloc_size) {
1193 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
1194 			dp_init_err("%pK: Link desc idle ring setup failed",
1195 				    soc);
1196 			goto fail;
1197 		}
1198 
1199 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
1200 				  soc->wbm_idle_link_ring.alloc_size,
1201 				  soc->ctrl_psoc,
1202 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1203 				  "wbm_idle_link_ring");
1204 	} else {
1205 		uint32_t num_scatter_bufs;
1206 		uint32_t buf_size = 0;
1207 
1208 		soc->wbm_idle_scatter_buf_size =
1209 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1210 		hal_idle_scatter_buf_num_entries(
1211 					soc->hal_soc,
1212 					soc->wbm_idle_scatter_buf_size);
1213 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1214 					soc->hal_soc, total_mem_size,
1215 					soc->wbm_idle_scatter_buf_size);
1216 
1217 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1218 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1219 				  FL("scatter bufs size out of bounds"));
1220 			goto fail;
1221 		}
1222 
1223 		for (i = 0; i < num_scatter_bufs; i++) {
1224 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
1225 			buf_size = soc->wbm_idle_scatter_buf_size;
1226 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1227 				qdf_mem_alloc_consistent(soc->osdev,
1228 							 soc->osdev->dev,
1229 							 buf_size,
1230 							 baseaddr);
1231 
1232 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1233 				QDF_TRACE(QDF_MODULE_ID_DP,
1234 					  QDF_TRACE_LEVEL_ERROR,
1235 					  FL("Scatter lst memory alloc fail"));
1236 				goto fail;
1237 			}
1238 		}
1239 		soc->num_scatter_bufs = num_scatter_bufs;
1240 	}
1241 	return QDF_STATUS_SUCCESS;
1242 
1243 fail:
1244 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1245 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
1246 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
1247 
1248 		if (vaddr) {
1249 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1250 						soc->wbm_idle_scatter_buf_size,
1251 						vaddr,
1252 						paddr, 0);
1253 			vaddr = NULL;
1254 		}
1255 	}
1256 	return QDF_STATUS_E_NOMEM;
1257 }
1258 
1259 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
1260 
1261 QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
1262 {
1263 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
1264 
1265 	if (dp_srng->base_vaddr_unaligned) {
1266 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
1267 			return QDF_STATUS_E_FAILURE;
1268 	}
1269 	return QDF_STATUS_SUCCESS;
1270 }
1271 
1272 void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
1273 {
1274 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
1275 }
1276 
1277 #ifdef IPA_OFFLOAD
1278 #define USE_1_IPA_RX_REO_RING 1
1279 #define USE_2_IPA_RX_REO_RINGS 2
1280 #define REO_DST_RING_SIZE_QCA6290 1023
1281 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
1282 #define REO_DST_RING_SIZE_QCA8074 1023
1283 #define REO_DST_RING_SIZE_QCN9000 2048
1284 #else
1285 #define REO_DST_RING_SIZE_QCA8074 8
1286 #define REO_DST_RING_SIZE_QCN9000 8
1287 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
1288 
1289 #ifdef IPA_WDI3_TX_TWO_PIPES
1290 #ifdef DP_MEMORY_OPT
1291 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1292 {
1293 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1294 }
1295 
1296 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1297 {
1298 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1299 }
1300 
1301 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1302 {
1303 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1304 }
1305 
1306 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1307 {
1308 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1309 }
1310 
1311 #else /* !DP_MEMORY_OPT */
1312 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1313 {
1314 	return 0;
1315 }
1316 
1317 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1318 {
1319 }
1320 
1321 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1322 {
1323 	return 0;
1324 }
1325 
1326 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1327 {
1328 }
1329 #endif /* DP_MEMORY_OPT */
1330 
1331 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1332 {
1333 	hal_tx_init_data_ring(soc->hal_soc,
1334 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
1335 }
1336 
1337 #else /* !IPA_WDI3_TX_TWO_PIPES */
1338 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1339 {
1340 	return 0;
1341 }
1342 
1343 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1344 {
1345 }
1346 
1347 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1348 {
1349 	return 0;
1350 }
1351 
1352 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1353 {
1354 }
1355 
1356 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1357 {
1358 }
1359 
1360 #endif /* IPA_WDI3_TX_TWO_PIPES */
1361 
1362 #else
1363 
1364 #define REO_DST_RING_SIZE_QCA6290 1024
1365 
1366 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1367 {
1368 	return 0;
1369 }
1370 
1371 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1372 {
1373 }
1374 
1375 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1376 {
1377 	return 0;
1378 }
1379 
1380 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1381 {
1382 }
1383 
1384 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1385 {
1386 }
1387 
1388 #endif /* IPA_OFFLOAD */
1389 
1390 /**
1391  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
1392  * @soc: Datapath soc handler
1393  *
1394  * This api resets the default cpu ring map
1395  */
1396 void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1397 {
1398 	uint8_t i;
1399 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1400 
1401 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1402 		switch (nss_config) {
1403 		case dp_nss_cfg_first_radio:
1404 			/*
1405 			 * Setting Tx ring map for one nss offloaded radio
1406 			 */
1407 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1408 			break;
1409 
1410 		case dp_nss_cfg_second_radio:
1411 			/*
1412 			 * Setting Tx ring for two nss offloaded radios
1413 			 */
1414 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1415 			break;
1416 
1417 		case dp_nss_cfg_dbdc:
1418 			/*
1419 			 * Setting Tx ring map for 2 nss offloaded radios
1420 			 */
1421 			soc->tx_ring_map[i] =
1422 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
1423 			break;
1424 
1425 		case dp_nss_cfg_dbtc:
1426 			/*
1427 			 * Setting Tx ring map for 3 nss offloaded radios
1428 			 */
1429 			soc->tx_ring_map[i] =
1430 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
1431 			break;
1432 
1433 		default:
1434 			dp_err("tx_ring_map failed due to invalid nss cfg");
1435 			break;
1436 		}
1437 	}
1438 }
1439 
1440 /**
1441  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
1442  *					  unused WMAC hw rings
1443  * @soc: DP Soc handle
1444  * @mac_num: wmac num
1445  *
1446  * Return: Return void
1447  */
1448 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
1449 						int mac_num)
1450 {
1451 	uint8_t *grp_mask = NULL;
1452 	int group_number;
1453 
1454 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1455 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1456 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1457 					  group_number, 0x0);
1458 
1459 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1460 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1461 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
1462 				      group_number, 0x0);
1463 
1464 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1465 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1466 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
1467 					  group_number, 0x0);
1468 
1469 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1470 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1471 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
1472 					      group_number, 0x0);
1473 }
1474 
1475 #ifdef IPA_OFFLOAD
1476 #ifdef IPA_WDI3_VLAN_SUPPORT
1477 /**
1478  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
1479  *                                     ring for vlan tagged traffic
1480  * @soc: DP Soc handle
1481  *
1482  * Return: Return void
1483  */
1484 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1485 {
1486 	uint8_t *grp_mask = NULL;
1487 	int group_number, mask;
1488 
1489 	if (!wlan_ipa_is_vlan_enabled())
1490 		return;
1491 
1492 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1493 
1494 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
1495 	if (group_number < 0) {
1496 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1497 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
1498 		return;
1499 	}
1500 
1501 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1502 
1503 	/* reset the interrupt mask for offloaded ring */
1504 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
1505 
1506 	/*
1507 	 * set the interrupt mask to zero for rx offloaded radio.
1508 	 */
1509 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1510 }
1511 #else
1512 inline
1513 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1514 { }
1515 #endif /* IPA_WDI3_VLAN_SUPPORT */
1516 #else
1517 inline
1518 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1519 { }
1520 #endif /* IPA_OFFLOAD */
1521 
1522 /**
1523  * dp_soc_reset_intr_mask() - reset interrupt mask
1524  * @soc: DP Soc handle
1525  *
1526  * Return: Return void
1527  */
1528 void dp_soc_reset_intr_mask(struct dp_soc *soc)
1529 {
1530 	uint8_t j;
1531 	uint8_t *grp_mask = NULL;
1532 	int group_number, mask, num_ring;
1533 
1534 	/* number of tx ring */
1535 	num_ring = soc->num_tcl_data_rings;
1536 
1537 	/*
1538 	 * group mask for tx completion  ring.
1539 	 */
1540 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1541 
1542 	/* loop and reset the mask for only offloaded ring */
1543 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
1544 		/*
1545 		 * Group number corresponding to tx offloaded ring.
1546 		 */
1547 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1548 		if (group_number < 0) {
1549 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1550 				      soc, WBM2SW_RELEASE, j);
1551 			continue;
1552 		}
1553 
1554 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1555 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
1556 		    (!mask)) {
1557 			continue;
1558 		}
1559 
1560 		/* reset the tx mask for offloaded ring */
1561 		mask &= (~(1 << j));
1562 
1563 		/*
1564 		 * reset the interrupt mask for offloaded ring.
1565 		 */
1566 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1567 	}
1568 
1569 	/* number of rx rings */
1570 	num_ring = soc->num_reo_dest_rings;
1571 
1572 	/*
1573 	 * group mask for reo destination ring.
1574 	 */
1575 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1576 
1577 	/* loop and reset the mask for only offloaded ring */
1578 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
1579 		/*
1580 		 * Group number corresponding to rx offloaded ring.
1581 		 */
1582 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1583 		if (group_number < 0) {
1584 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1585 				      soc, REO_DST, j);
1586 			continue;
1587 		}
1588 
1589 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1590 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
1591 		    (!mask)) {
1592 			continue;
1593 		}
1594 
1595 		/* reset the interrupt mask for offloaded ring */
1596 		mask &= (~(1 << j));
1597 
1598 		/*
1599 		 * set the interrupt mask to zero for rx offloaded radio.
1600 		 */
1601 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1602 	}
1603 
1604 	/*
1605 	 * group mask for Rx buffer refill ring
1606 	 */
1607 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1608 
1609 	/* loop and reset the mask for only offloaded ring */
1610 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1611 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1612 
1613 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1614 			continue;
1615 		}
1616 
1617 		/*
1618 		 * Group number corresponding to rx offloaded ring.
1619 		 */
1620 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
1621 		if (group_number < 0) {
1622 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1623 				      soc, REO_DST, lmac_id);
1624 			continue;
1625 		}
1626 
1627 		/* set the interrupt mask for offloaded ring */
1628 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1629 							  group_number);
1630 		mask &= (~(1 << lmac_id));
1631 
1632 		/*
1633 		 * set the interrupt mask to zero for rx offloaded radio.
1634 		 */
1635 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1636 						  group_number, mask);
1637 	}
1638 
1639 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1640 
1641 	for (j = 0; j < num_ring; j++) {
1642 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
1643 			continue;
1644 		}
1645 
1646 		/*
1647 		 * Group number corresponding to rx err ring.
1648 		 */
1649 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1650 		if (group_number < 0) {
1651 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1652 				      soc, REO_EXCEPTION, j);
1653 			continue;
1654 		}
1655 
1656 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
1657 					      group_number, 0);
1658 	}
1659 }
1660 
1661 #ifdef IPA_OFFLOAD
1662 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
1663 			 uint32_t *remap1, uint32_t *remap2)
1664 {
1665 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
1666 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
1667 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
1668 
1669 	switch (soc->arch_id) {
1670 	case CDP_ARCH_TYPE_BE:
1671 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1672 					      soc->num_reo_dest_rings -
1673 					      USE_2_IPA_RX_REO_RINGS, remap1,
1674 					      remap2);
1675 		break;
1676 
1677 	case CDP_ARCH_TYPE_LI:
1678 		if (wlan_ipa_is_vlan_enabled()) {
1679 			hal_compute_reo_remap_ix2_ix3(
1680 					soc->hal_soc, ring,
1681 					soc->num_reo_dest_rings -
1682 					USE_2_IPA_RX_REO_RINGS, remap1,
1683 					remap2);
1684 
1685 		} else {
1686 			hal_compute_reo_remap_ix2_ix3(
1687 					soc->hal_soc, ring,
1688 					soc->num_reo_dest_rings -
1689 					USE_1_IPA_RX_REO_RING, remap1,
1690 					remap2);
1691 		}
1692 
1693 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
1694 		break;
1695 	default:
1696 		dp_err("unknown arch_id 0x%x", soc->arch_id);
1697 		QDF_BUG(0);
1698 	}
1699 
1700 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
1701 
1702 	return true;
1703 }
1704 
1705 #ifdef IPA_WDI3_TX_TWO_PIPES
1706 static bool dp_ipa_is_alt_tx_ring(int index)
1707 {
1708 	return index == IPA_TX_ALT_RING_IDX;
1709 }
1710 
1711 static bool dp_ipa_is_alt_tx_comp_ring(int index)
1712 {
1713 	return index == IPA_TX_ALT_COMP_RING_IDX;
1714 }
1715 #else /* !IPA_WDI3_TX_TWO_PIPES */
1716 static bool dp_ipa_is_alt_tx_ring(int index)
1717 {
1718 	return false;
1719 }
1720 
1721 static bool dp_ipa_is_alt_tx_comp_ring(int index)
1722 {
1723 	return false;
1724 }
1725 #endif /* IPA_WDI3_TX_TWO_PIPES */
1726 
1727 /**
1728  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
1729  *
1730  * @tx_ring_num: Tx ring number
1731  * @tx_ipa_ring_sz: Return param only updated for IPA.
1732  * @soc_cfg_ctx: dp soc cfg context
1733  *
1734  * Return: None
1735  */
1736 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
1737 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1738 {
1739 	if (!soc_cfg_ctx->ipa_enabled)
1740 		return;
1741 
1742 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
1743 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
1744 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
1745 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
1746 }
1747 
1748 /**
1749  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
1750  *
1751  * @tx_comp_ring_num: Tx comp ring number
1752  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
1753  * @soc_cfg_ctx: dp soc cfg context
1754  *
1755  * Return: None
1756  */
1757 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
1758 					 int *tx_comp_ipa_ring_sz,
1759 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1760 {
1761 	if (!soc_cfg_ctx->ipa_enabled)
1762 		return;
1763 
1764 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
1765 		*tx_comp_ipa_ring_sz =
1766 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
1767 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
1768 		*tx_comp_ipa_ring_sz =
1769 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
1770 }
1771 #else
1772 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
1773 {
1774 	uint8_t num = 0;
1775 
1776 	switch (value) {
1777 	/* should we have all the different possible ring configs */
1778 	case 0xFF:
1779 		num = 8;
1780 		ring[0] = REO_REMAP_SW1;
1781 		ring[1] = REO_REMAP_SW2;
1782 		ring[2] = REO_REMAP_SW3;
1783 		ring[3] = REO_REMAP_SW4;
1784 		ring[4] = REO_REMAP_SW5;
1785 		ring[5] = REO_REMAP_SW6;
1786 		ring[6] = REO_REMAP_SW7;
1787 		ring[7] = REO_REMAP_SW8;
1788 		break;
1789 
1790 	case 0x3F:
1791 		num = 6;
1792 		ring[0] = REO_REMAP_SW1;
1793 		ring[1] = REO_REMAP_SW2;
1794 		ring[2] = REO_REMAP_SW3;
1795 		ring[3] = REO_REMAP_SW4;
1796 		ring[4] = REO_REMAP_SW5;
1797 		ring[5] = REO_REMAP_SW6;
1798 		break;
1799 
1800 	case 0xF:
1801 		num = 4;
1802 		ring[0] = REO_REMAP_SW1;
1803 		ring[1] = REO_REMAP_SW2;
1804 		ring[2] = REO_REMAP_SW3;
1805 		ring[3] = REO_REMAP_SW4;
1806 		break;
1807 	case 0xE:
1808 		num = 3;
1809 		ring[0] = REO_REMAP_SW2;
1810 		ring[1] = REO_REMAP_SW3;
1811 		ring[2] = REO_REMAP_SW4;
1812 		break;
1813 	case 0xD:
1814 		num = 3;
1815 		ring[0] = REO_REMAP_SW1;
1816 		ring[1] = REO_REMAP_SW3;
1817 		ring[2] = REO_REMAP_SW4;
1818 		break;
1819 	case 0xC:
1820 		num = 2;
1821 		ring[0] = REO_REMAP_SW3;
1822 		ring[1] = REO_REMAP_SW4;
1823 		break;
1824 	case 0xB:
1825 		num = 3;
1826 		ring[0] = REO_REMAP_SW1;
1827 		ring[1] = REO_REMAP_SW2;
1828 		ring[2] = REO_REMAP_SW4;
1829 		break;
1830 	case 0xA:
1831 		num = 2;
1832 		ring[0] = REO_REMAP_SW2;
1833 		ring[1] = REO_REMAP_SW4;
1834 		break;
1835 	case 0x9:
1836 		num = 2;
1837 		ring[0] = REO_REMAP_SW1;
1838 		ring[1] = REO_REMAP_SW4;
1839 		break;
1840 	case 0x8:
1841 		num = 1;
1842 		ring[0] = REO_REMAP_SW4;
1843 		break;
1844 	case 0x7:
1845 		num = 3;
1846 		ring[0] = REO_REMAP_SW1;
1847 		ring[1] = REO_REMAP_SW2;
1848 		ring[2] = REO_REMAP_SW3;
1849 		break;
1850 	case 0x6:
1851 		num = 2;
1852 		ring[0] = REO_REMAP_SW2;
1853 		ring[1] = REO_REMAP_SW3;
1854 		break;
1855 	case 0x5:
1856 		num = 2;
1857 		ring[0] = REO_REMAP_SW1;
1858 		ring[1] = REO_REMAP_SW3;
1859 		break;
1860 	case 0x4:
1861 		num = 1;
1862 		ring[0] = REO_REMAP_SW3;
1863 		break;
1864 	case 0x3:
1865 		num = 2;
1866 		ring[0] = REO_REMAP_SW1;
1867 		ring[1] = REO_REMAP_SW2;
1868 		break;
1869 	case 0x2:
1870 		num = 1;
1871 		ring[0] = REO_REMAP_SW2;
1872 		break;
1873 	case 0x1:
1874 		num = 1;
1875 		ring[0] = REO_REMAP_SW1;
1876 		break;
1877 	default:
1878 		dp_err("unknown reo ring map 0x%x", value);
1879 		QDF_BUG(0);
1880 	}
1881 	return num;
1882 }
1883 
1884 bool dp_reo_remap_config(struct dp_soc *soc,
1885 			 uint32_t *remap0,
1886 			 uint32_t *remap1,
1887 			 uint32_t *remap2)
1888 {
1889 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1890 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
1891 	uint8_t num;
1892 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
1893 	uint32_t value;
1894 
1895 	switch (offload_radio) {
1896 	case dp_nss_cfg_default:
1897 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
1898 		num = dp_reo_ring_selection(value, ring);
1899 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1900 					      num, remap1, remap2);
1901 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
1902 
1903 		break;
1904 	case dp_nss_cfg_first_radio:
1905 		value = reo_config & 0xE;
1906 		num = dp_reo_ring_selection(value, ring);
1907 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1908 					      num, remap1, remap2);
1909 
1910 		break;
1911 	case dp_nss_cfg_second_radio:
1912 		value = reo_config & 0xD;
1913 		num = dp_reo_ring_selection(value, ring);
1914 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1915 					      num, remap1, remap2);
1916 
1917 		break;
1918 	case dp_nss_cfg_dbdc:
1919 	case dp_nss_cfg_dbtc:
1920 		/* return false if both or all are offloaded to NSS */
1921 		return false;
1922 	}
1923 
1924 	dp_debug("remap1 %x remap2 %x offload_radio %u",
1925 		 *remap1, *remap2, offload_radio);
1926 	return true;
1927 }
1928 
1929 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
1930 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1931 {
1932 }
1933 
1934 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
1935 					 int *tx_comp_ipa_ring_sz,
1936 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1937 {
1938 }
1939 #endif /* IPA_OFFLOAD */
1940 
1941 /**
1942  * dp_reo_frag_dst_set() - configure reo register to set the
1943  *                        fragment destination ring
1944  * @soc: Datapath soc
1945  * @frag_dst_ring: output parameter to set fragment destination ring
1946  *
1947  * Based on offload_radio below fragment destination rings is selected
1948  * 0 - TCL
1949  * 1 - SW1
1950  * 2 - SW2
1951  * 3 - SW3
1952  * 4 - SW4
1953  * 5 - Release
1954  * 6 - FW
1955  * 7 - alternate select
1956  *
1957  * Return: void
1958  */
1959 void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
1960 {
1961 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1962 
1963 	switch (offload_radio) {
1964 	case dp_nss_cfg_default:
1965 		*frag_dst_ring = REO_REMAP_TCL;
1966 		break;
1967 	case dp_nss_cfg_first_radio:
1968 		/*
1969 		 * This configuration is valid for single band radio which
1970 		 * is also NSS offload.
1971 		 */
1972 	case dp_nss_cfg_dbdc:
1973 	case dp_nss_cfg_dbtc:
1974 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
1975 		break;
1976 	default:
1977 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
1978 		break;
1979 	}
1980 }
1981 
1982 #ifdef WLAN_FEATURE_STATS_EXT
1983 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
1984 {
1985 	qdf_event_create(&soc->rx_hw_stats_event);
1986 }
1987 #else
1988 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
1989 {
1990 }
1991 #endif
1992 
1993 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
1994 {
1995 	int tcl_ring_num, wbm_ring_num;
1996 
1997 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
1998 						index,
1999 						&tcl_ring_num,
2000 						&wbm_ring_num);
2001 
2002 	if (tcl_ring_num == -1) {
2003 		dp_err("incorrect tcl ring num for index %u", index);
2004 		return;
2005 	}
2006 
2007 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
2008 			     soc->tcl_data_ring[index].alloc_size,
2009 			     soc->ctrl_psoc,
2010 			     WLAN_MD_DP_SRNG_TCL_DATA,
2011 			     "tcl_data_ring");
2012 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
2013 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
2014 		       tcl_ring_num);
2015 
2016 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
2017 		return;
2018 
2019 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
2020 			     soc->tx_comp_ring[index].alloc_size,
2021 			     soc->ctrl_psoc,
2022 			     WLAN_MD_DP_SRNG_TX_COMP,
2023 			     "tcl_comp_ring");
2024 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2025 		       wbm_ring_num);
2026 }
2027 
2028 /**
2029  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
2030  * ring pair
2031  * @soc: DP soc pointer
2032  * @index: index of soc->tcl_data or soc->tx_comp to initialize
2033  *
2034  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
2035  */
2036 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
2037 						uint8_t index)
2038 {
2039 	int tcl_ring_num, wbm_ring_num;
2040 	uint8_t bm_id;
2041 
2042 	if (index >= MAX_TCL_DATA_RINGS) {
2043 		dp_err("unexpected index!");
2044 		QDF_BUG(0);
2045 		goto fail1;
2046 	}
2047 
2048 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
2049 						index,
2050 						&tcl_ring_num,
2051 						&wbm_ring_num);
2052 
2053 	if (tcl_ring_num == -1) {
2054 		dp_err("incorrect tcl ring num for index %u", index);
2055 		goto fail1;
2056 	}
2057 
2058 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
2059 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
2060 			 tcl_ring_num, 0)) {
2061 		dp_err("dp_srng_init failed for tcl_data_ring");
2062 		goto fail1;
2063 	}
2064 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
2065 			  soc->tcl_data_ring[index].alloc_size,
2066 			  soc->ctrl_psoc,
2067 			  WLAN_MD_DP_SRNG_TCL_DATA,
2068 			  "tcl_data_ring");
2069 
2070 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
2071 		goto set_rbm;
2072 
2073 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2074 			 wbm_ring_num, 0)) {
2075 		dp_err("dp_srng_init failed for tx_comp_ring");
2076 		goto fail1;
2077 	}
2078 
2079 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
2080 			  soc->tx_comp_ring[index].alloc_size,
2081 			  soc->ctrl_psoc,
2082 			  WLAN_MD_DP_SRNG_TX_COMP,
2083 			  "tcl_comp_ring");
2084 set_rbm:
2085 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
2086 
2087 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
2088 
2089 	return QDF_STATUS_SUCCESS;
2090 
2091 fail1:
2092 	return QDF_STATUS_E_FAILURE;
2093 }
2094 
2095 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
2096 {
2097 	dp_debug("index %u", index);
2098 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
2099 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
2100 }
2101 
2102 /**
2103  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
2104  * ring pair for the given "index"
2105  * @soc: DP soc pointer
2106  * @index: index of soc->tcl_data or soc->tx_comp to initialize
2107  *
2108  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
2109  */
2110 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
2111 						 uint8_t index)
2112 {
2113 	int tx_ring_size;
2114 	int tx_comp_ring_size;
2115 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
2116 	int cached = 0;
2117 
2118 	if (index >= MAX_TCL_DATA_RINGS) {
2119 		dp_err("unexpected index!");
2120 		QDF_BUG(0);
2121 		goto fail1;
2122 	}
2123 
2124 	dp_debug("index %u", index);
2125 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
2126 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
2127 
2128 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
2129 			  tx_ring_size, cached)) {
2130 		dp_err("dp_srng_alloc failed for tcl_data_ring");
2131 		goto fail1;
2132 	}
2133 
2134 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2135 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
2136 	/* Enable cached TCL desc if NSS offload is disabled */
2137 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2138 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
2139 
2140 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
2141 	    INVALID_WBM_RING_NUM)
2142 		return QDF_STATUS_SUCCESS;
2143 
2144 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2145 			  tx_comp_ring_size, cached)) {
2146 		dp_err("dp_srng_alloc failed for tx_comp_ring");
2147 		goto fail1;
2148 	}
2149 
2150 	return QDF_STATUS_SUCCESS;
2151 
2152 fail1:
2153 	return QDF_STATUS_E_FAILURE;
2154 }
2155 
2156 /**
2157  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
2158  * @pdev: DP_PDEV handle
2159  *
2160  * Return: void
2161  */
2162 void
2163 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2164 {
2165 	uint8_t map_id;
2166 	struct dp_soc *soc = pdev->soc;
2167 
2168 	if (!soc)
2169 		return;
2170 
2171 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2172 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2173 			     default_dscp_tid_map,
2174 			     sizeof(default_dscp_tid_map));
2175 	}
2176 
2177 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2178 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2179 					default_dscp_tid_map,
2180 					map_id);
2181 	}
2182 }
2183 
2184 /**
2185  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
2186  * @pdev: DP_PDEV handle
2187  *
2188  * Return: void
2189  */
2190 void
2191 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
2192 {
2193 	struct dp_soc *soc = pdev->soc;
2194 
2195 	if (!soc)
2196 		return;
2197 
2198 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
2199 		     sizeof(default_pcp_tid_map));
2200 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
2201 }
2202 
2203 #ifndef DP_UMAC_HW_RESET_SUPPORT
2204 static inline
2205 #endif
2206 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2207 {
2208 	struct reo_desc_list_node *desc;
2209 	struct dp_rx_tid *rx_tid;
2210 
2211 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2212 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2213 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2214 		rx_tid = &desc->rx_tid;
2215 		qdf_mem_unmap_nbytes_single(soc->osdev,
2216 			rx_tid->hw_qdesc_paddr,
2217 			QDF_DMA_BIDIRECTIONAL,
2218 			rx_tid->hw_qdesc_alloc_size);
2219 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2220 		qdf_mem_free(desc);
2221 	}
2222 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2223 	qdf_list_destroy(&soc->reo_desc_freelist);
2224 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2225 }
2226 
2227 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2228 /**
2229  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
2230  *                                          for deferred reo desc list
2231  * @soc: Datapath soc handle
2232  *
2233  * Return: void
2234  */
2235 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
2236 {
2237 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
2238 	qdf_list_create(&soc->reo_desc_deferred_freelist,
2239 			REO_DESC_DEFERRED_FREELIST_SIZE);
2240 	soc->reo_desc_deferred_freelist_init = true;
2241 }
2242 
2243 /**
2244  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
2245  *                                           free the leftover REO QDESCs
2246  * @soc: Datapath soc handle
2247  *
2248  * Return: void
2249  */
2250 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
2251 {
2252 	struct reo_desc_deferred_freelist_node *desc;
2253 
2254 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
2255 	soc->reo_desc_deferred_freelist_init = false;
2256 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
2257 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2258 		qdf_mem_unmap_nbytes_single(soc->osdev,
2259 					    desc->hw_qdesc_paddr,
2260 					    QDF_DMA_BIDIRECTIONAL,
2261 					    desc->hw_qdesc_alloc_size);
2262 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
2263 		qdf_mem_free(desc);
2264 	}
2265 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
2266 
2267 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
2268 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
2269 }
2270 #else
2271 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
2272 {
2273 }
2274 
2275 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
2276 {
2277 }
2278 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
2279 
2280 /**
2281  * dp_soc_reset_txrx_ring_map() - reset tx ring map
2282  * @soc: DP SOC handle
2283  *
2284  */
2285 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
2286 {
2287 	uint32_t i;
2288 
2289 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
2290 		soc->tx_ring_map[i] = 0;
2291 }
2292 
2293 /**
2294  * dp_soc_deinit() - Deinitialize txrx SOC
2295  * @txrx_soc: Opaque DP SOC handle
2296  *
2297  * Return: None
2298  */
2299 void dp_soc_deinit(void *txrx_soc)
2300 {
2301 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2302 	struct htt_soc *htt_soc = soc->htt_handle;
2303 
2304 	dp_monitor_soc_deinit(soc);
2305 
2306 	/* free peer tables & AST tables allocated during peer_map_attach */
2307 	if (soc->peer_map_attach_success) {
2308 		dp_peer_find_detach(soc);
2309 		soc->arch_ops.txrx_peer_map_detach(soc);
2310 		soc->peer_map_attach_success = FALSE;
2311 	}
2312 
2313 	qdf_flush_work(&soc->htt_stats.work);
2314 	qdf_disable_work(&soc->htt_stats.work);
2315 
2316 	qdf_spinlock_destroy(&soc->htt_stats.lock);
2317 
2318 	dp_soc_reset_txrx_ring_map(soc);
2319 
2320 	dp_reo_desc_freelist_destroy(soc);
2321 	dp_reo_desc_deferred_freelist_destroy(soc);
2322 
2323 	DEINIT_RX_HW_STATS_LOCK(soc);
2324 
2325 	qdf_spinlock_destroy(&soc->ast_lock);
2326 
2327 	dp_peer_mec_spinlock_destroy(soc);
2328 
2329 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2330 
2331 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
2332 
2333 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
2334 
2335 	qdf_spinlock_destroy(&soc->vdev_map_lock);
2336 
2337 	dp_reo_cmdlist_destroy(soc);
2338 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
2339 
2340 	dp_soc_tx_desc_sw_pools_deinit(soc);
2341 
2342 	dp_soc_srng_deinit(soc);
2343 
2344 	dp_hw_link_desc_ring_deinit(soc);
2345 
2346 	dp_soc_print_inactive_objects(soc);
2347 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
2348 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
2349 
2350 	htt_soc_htc_dealloc(soc->htt_handle);
2351 
2352 	htt_soc_detach(htt_soc);
2353 
2354 	/* Free wbm sg list and reset flags in down path */
2355 	dp_rx_wbm_sg_list_deinit(soc);
2356 
2357 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
2358 			     WLAN_MD_DP_SOC, "dp_soc");
2359 }
2360 
2361 #ifdef QCA_HOST2FW_RXBUF_RING
2362 void
2363 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
2364 				int lmac_id)
2365 {
2366 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
2367 		htt_srng_setup(soc->htt_handle, mac_id,
2368 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
2369 			       RXDMA_DST);
2370 }
2371 #endif
2372 
2373 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
2374 				  enum cdp_host_reo_dest_ring *reo_dest,
2375 				  bool *hash_based)
2376 {
2377 	struct dp_soc *soc;
2378 	struct dp_pdev *pdev;
2379 
2380 	pdev = vdev->pdev;
2381 	soc = pdev->soc;
2382 	/*
2383 	 * hash based steering is disabled for Radios which are offloaded
2384 	 * to NSS
2385 	 */
2386 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
2387 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
2388 
2389 	/*
2390 	 * Below line of code will ensure the proper reo_dest ring is chosen
2391 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
2392 	 */
2393 	*reo_dest = pdev->reo_dest;
2394 }
2395 
2396 #ifdef IPA_OFFLOAD
2397 /**
2398  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
2399  * @vdev: Virtual device
2400  *
2401  * Return: true if the vdev is of subtype P2P
2402  *	   false if the vdev is of any other subtype
2403  */
2404 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
2405 {
2406 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
2407 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
2408 	    vdev->subtype == wlan_op_subtype_p2p_go)
2409 		return true;
2410 
2411 	return false;
2412 }
2413 
2414 /**
2415  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
2416  * @vdev: Datapath VDEV handle
2417  * @setup_info:
2418  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
2419  * @hash_based: pointer to hash value (enabled/disabled) to be populated
2420  * @lmac_peer_id_msb:
2421  *
2422  * If IPA is enabled in ini, for SAP mode, disable hash based
2423  * steering, use default reo_dst ring for RX. Use config values for other modes.
2424  *
2425  * Return: None
2426  */
2427 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
2428 				       struct cdp_peer_setup_info *setup_info,
2429 				       enum cdp_host_reo_dest_ring *reo_dest,
2430 				       bool *hash_based,
2431 				       uint8_t *lmac_peer_id_msb)
2432 {
2433 	struct dp_soc *soc;
2434 	struct dp_pdev *pdev;
2435 
2436 	pdev = vdev->pdev;
2437 	soc = pdev->soc;
2438 
2439 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
2440 
2441 	/* For P2P-GO interfaces we do not need to change the REO
2442 	 * configuration even if IPA config is enabled
2443 	 */
2444 	if (dp_is_vdev_subtype_p2p(vdev))
2445 		return;
2446 
2447 	/*
2448 	 * If IPA is enabled, disable hash-based flow steering and set
2449 	 * reo_dest_ring_4 as the REO ring to receive packets on.
2450 	 * IPA is configured to reap reo_dest_ring_4.
2451 	 *
2452 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
2453 	 * value enum value is from 1 - 4.
2454 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
2455 	 */
2456 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
2457 		if (dp_ipa_is_mdm_platform()) {
2458 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
2459 			if (vdev->opmode == wlan_op_mode_ap)
2460 				*hash_based = 0;
2461 		} else {
2462 			dp_debug("opt_dp: default HOST reo ring is set");
2463 		}
2464 	}
2465 }
2466 
2467 #else
2468 
2469 /**
2470  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
2471  * @vdev: Datapath VDEV handle
2472  * @setup_info:
2473  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
2474  * @hash_based: pointer to hash value (enabled/disabled) to be populated
2475  * @lmac_peer_id_msb:
2476  *
2477  * Use system config values for hash based steering.
2478  * Return: None
2479  */
2480 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
2481 				       struct cdp_peer_setup_info *setup_info,
2482 				       enum cdp_host_reo_dest_ring *reo_dest,
2483 				       bool *hash_based,
2484 				       uint8_t *lmac_peer_id_msb)
2485 {
2486 	struct dp_soc *soc = vdev->pdev->soc;
2487 
2488 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
2489 					lmac_peer_id_msb);
2490 }
2491 #endif /* IPA_OFFLOAD */
2492 
2493 /**
2494  * dp_peer_setup_wifi3() - initialize the peer
2495  * @soc_hdl: soc handle object
2496  * @vdev_id: vdev_id of vdev object
2497  * @peer_mac: Peer's mac address
2498  * @setup_info: peer setup info for MLO
2499  *
2500  * Return: QDF_STATUS
2501  */
2502 QDF_STATUS
2503 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2504 		    uint8_t *peer_mac,
2505 		    struct cdp_peer_setup_info *setup_info)
2506 {
2507 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
2508 	struct dp_pdev *pdev;
2509 	bool hash_based = 0;
2510 	enum cdp_host_reo_dest_ring reo_dest;
2511 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2512 	struct dp_vdev *vdev = NULL;
2513 	struct dp_peer *peer =
2514 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
2515 					       DP_MOD_ID_CDP);
2516 	struct dp_peer *mld_peer = NULL;
2517 	enum wlan_op_mode vdev_opmode;
2518 	uint8_t lmac_peer_id_msb = 0;
2519 
2520 	if (!peer)
2521 		return QDF_STATUS_E_FAILURE;
2522 
2523 	vdev = peer->vdev;
2524 	if (!vdev) {
2525 		status = QDF_STATUS_E_FAILURE;
2526 		goto fail;
2527 	}
2528 
2529 	/* save vdev related member in case vdev freed */
2530 	vdev_opmode = vdev->opmode;
2531 	pdev = vdev->pdev;
2532 	dp_peer_setup_get_reo_hash(vdev, setup_info,
2533 				   &reo_dest, &hash_based,
2534 				   &lmac_peer_id_msb);
2535 
2536 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
2537 					   peer, vdev, vdev->vdev_id,
2538 					   setup_info);
2539 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
2540 		"hash-based-steering:%d default-reo_dest:%u",
2541 		pdev->pdev_id, vdev->vdev_id,
2542 		vdev->opmode, peer,
2543 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
2544 
2545 	/*
2546 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
2547 	 * i.e both the devices have same MAC address. In these
2548 	 * cases we want such pkts to be processed in NULL Q handler
2549 	 * which is REO2TCL ring. for this reason we should
2550 	 * not setup reo_queues and default route for bss_peer.
2551 	 */
2552 	if (!IS_MLO_DP_MLD_PEER(peer))
2553 		dp_monitor_peer_tx_init(pdev, peer);
2554 
2555 	if (!setup_info)
2556 		if (dp_peer_legacy_setup(soc, peer) !=
2557 				QDF_STATUS_SUCCESS) {
2558 			status = QDF_STATUS_E_RESOURCES;
2559 			goto fail;
2560 		}
2561 
2562 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
2563 		status = QDF_STATUS_E_FAILURE;
2564 		goto fail;
2565 	}
2566 
2567 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
2568 		/* TODO: Check the destination ring number to be passed to FW */
2569 		soc->cdp_soc.ol_ops->peer_set_default_routing(
2570 				soc->ctrl_psoc,
2571 				peer->vdev->pdev->pdev_id,
2572 				peer->mac_addr.raw,
2573 				peer->vdev->vdev_id, hash_based, reo_dest,
2574 				lmac_peer_id_msb);
2575 	}
2576 
2577 	qdf_atomic_set(&peer->is_default_route_set, 1);
2578 
2579 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
2580 	if (QDF_IS_STATUS_ERROR(status)) {
2581 		dp_peer_err("peer mlo setup failed");
2582 		qdf_assert_always(0);
2583 	}
2584 
2585 	if (vdev_opmode != wlan_op_mode_monitor) {
2586 		/* In case of MLD peer, switch peer to mld peer and
2587 		 * do peer_rx_init.
2588 		 */
2589 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2590 		    IS_MLO_DP_LINK_PEER(peer)) {
2591 			if (setup_info && setup_info->is_first_link) {
2592 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
2593 				if (mld_peer)
2594 					dp_peer_rx_init(pdev, mld_peer);
2595 				else
2596 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
2597 			}
2598 		} else {
2599 			dp_peer_rx_init(pdev, peer);
2600 		}
2601 	}
2602 
2603 	if (!IS_MLO_DP_MLD_PEER(peer))
2604 		dp_peer_ppdu_delayed_ba_init(peer);
2605 
2606 fail:
2607 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2608 	return status;
2609 }
2610 
2611 /**
2612  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
2613  * @txrx_soc: cdp soc handle
2614  * @ac: Access category
2615  * @value: timeout value in millisec
2616  *
2617  * Return: void
2618  */
2619 void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
2620 			     uint8_t ac, uint32_t value)
2621 {
2622 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2623 
2624 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
2625 }
2626 
2627 /**
2628  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
2629  * @txrx_soc: cdp soc handle
2630  * @ac: access category
2631  * @value: timeout value in millisec
2632  *
2633  * Return: void
2634  */
2635 void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
2636 			     uint8_t ac, uint32_t *value)
2637 {
2638 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2639 
2640 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
2641 }
2642 
2643 /**
2644  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
2645  * @txrx_soc: cdp soc handle
2646  * @pdev_id: id of physical device object
2647  * @val: reo destination ring index (1 - 4)
2648  *
2649  * Return: QDF_STATUS
2650  */
2651 QDF_STATUS
2652 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
2653 		     enum cdp_host_reo_dest_ring val)
2654 {
2655 	struct dp_pdev *pdev =
2656 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
2657 						   pdev_id);
2658 
2659 	if (pdev) {
2660 		pdev->reo_dest = val;
2661 		return QDF_STATUS_SUCCESS;
2662 	}
2663 
2664 	return QDF_STATUS_E_FAILURE;
2665 }
2666 
2667 /**
2668  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
2669  * @txrx_soc: cdp soc handle
2670  * @pdev_id: id of physical device object
2671  *
2672  * Return: reo destination ring index
2673  */
2674 enum cdp_host_reo_dest_ring
2675 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
2676 {
2677 	struct dp_pdev *pdev =
2678 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
2679 						   pdev_id);
2680 
2681 	if (pdev)
2682 		return pdev->reo_dest;
2683 	else
2684 		return cdp_host_reo_dest_ring_unknown;
2685 }
2686 
2687 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2688 	union hal_reo_status *reo_status)
2689 {
2690 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
2691 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2692 
2693 	if (!dp_check_pdev_exists(soc, pdev)) {
2694 		dp_err_rl("pdev doesn't exist");
2695 		return;
2696 	}
2697 
2698 	if (!qdf_atomic_read(&soc->cmn_init_done))
2699 		return;
2700 
2701 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2702 		DP_PRINT_STATS("REO stats failure %d",
2703 			       queue_status->header.status);
2704 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
2705 		return;
2706 	}
2707 
2708 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
2709 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
2710 }
2711 
2712 /**
2713  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
2714  * @soc: dp soc.
2715  * @pdev: dp pdev.
2716  *
2717  * Return: None.
2718  */
2719 void
2720 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
2721 {
2722 	uint32_t hw_head;
2723 	uint32_t hw_tail;
2724 	struct dp_srng *srng;
2725 
2726 	if (!soc) {
2727 		dp_err("soc is NULL");
2728 		return;
2729 	}
2730 
2731 	if (!pdev) {
2732 		dp_err("pdev is NULL");
2733 		return;
2734 	}
2735 
2736 	srng = &pdev->soc->wbm_idle_link_ring;
2737 	if (!srng) {
2738 		dp_err("wbm_idle_link_ring srng is NULL");
2739 		return;
2740 	}
2741 
2742 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
2743 			&hw_tail, WBM_IDLE_LINK);
2744 
2745 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
2746 		 hw_head, hw_tail);
2747 }
2748 
2749 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
2750 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
2751 				      uint32_t rx_limit)
2752 {
2753 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
2754 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
2755 }
2756 
2757 #else
2758 
2759 static inline
2760 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
2761 			       uint32_t rx_limit)
2762 {
2763 }
2764 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
2765 
2766 /**
2767  * dp_display_srng_info() - Dump the srng HP TP info
2768  * @soc_hdl: CDP Soc handle
2769  *
2770  * This function dumps the SW hp/tp values for the important rings.
2771  * HW hp/tp values are not being dumped, since it can lead to
2772  * READ NOC error when UMAC is in low power state. MCC does not have
2773  * device force wake working yet.
2774  *
2775  * Return: none
2776  */
2777 void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
2778 {
2779 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2780 	hal_soc_handle_t hal_soc = soc->hal_soc;
2781 	uint32_t hp, tp, i;
2782 
2783 	dp_info("SRNG HP-TP data:");
2784 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
2785 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
2786 				&tp, &hp);
2787 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2788 
2789 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
2790 		    INVALID_WBM_RING_NUM)
2791 			continue;
2792 
2793 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
2794 				&tp, &hp);
2795 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2796 	}
2797 
2798 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
2799 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
2800 				&tp, &hp);
2801 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2802 	}
2803 
2804 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
2805 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
2806 
2807 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
2808 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
2809 
2810 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
2811 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
2812 }
2813 
2814 /**
2815  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
2816  * @psoc: dp soc handle
2817  * @pdev_id: id of DP_PDEV handle
2818  * @pcp: pcp value
2819  * @tid: tid value passed by the user
2820  *
2821  * Return: QDF_STATUS_SUCCESS on success
2822  */
2823 QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
2824 					 uint8_t pdev_id,
2825 					 uint8_t pcp, uint8_t tid)
2826 {
2827 	struct dp_soc *soc = (struct dp_soc *)psoc;
2828 
2829 	soc->pcp_tid_map[pcp] = tid;
2830 
2831 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
2832 	return QDF_STATUS_SUCCESS;
2833 }
2834 
2835 /**
2836  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
2837  * @soc_hdl: DP soc handle
2838  * @vdev_id: id of DP_VDEV handle
2839  * @pcp: pcp value
2840  * @tid: tid value passed by the user
2841  *
2842  * Return: QDF_STATUS_SUCCESS on success
2843  */
2844 QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
2845 					 uint8_t vdev_id,
2846 					 uint8_t pcp, uint8_t tid)
2847 {
2848 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2849 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2850 						     DP_MOD_ID_CDP);
2851 
2852 	if (!vdev)
2853 		return QDF_STATUS_E_FAILURE;
2854 
2855 	vdev->pcp_tid_map[pcp] = tid;
2856 
2857 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
2858 	return QDF_STATUS_SUCCESS;
2859 }
2860 
2861 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
2862 void dp_drain_txrx(struct cdp_soc_t *soc_handle)
2863 {
2864 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2865 	uint32_t cur_tx_limit, cur_rx_limit;
2866 	uint32_t budget = 0xffff;
2867 	uint32_t val;
2868 	int i;
2869 	int cpu = dp_srng_get_cpu();
2870 
2871 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
2872 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
2873 
2874 	/* Temporarily increase soft irq limits when going to drain
2875 	 * the UMAC/LMAC SRNGs and restore them after polling.
2876 	 * Though the budget is on higher side, the TX/RX reaping loops
2877 	 * will not execute longer as both TX and RX would be suspended
2878 	 * by the time this API is called.
2879 	 */
2880 	dp_update_soft_irq_limits(soc, budget, budget);
2881 
2882 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
2883 		soc->arch_ops.dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
2884 
2885 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
2886 
2887 	/* Do a dummy read at offset 0; this will ensure all
2888 	 * pendings writes(HP/TP) are flushed before read returns.
2889 	 */
2890 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
2891 	dp_debug("Register value at offset 0: %u", val);
2892 }
2893 #endif
2894 
2895 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
2896 /**
2897  * dp_flush_ring_hptp() - Update ring shadow
2898  *			  register HP/TP address when runtime
2899  *                        resume
2900  * @soc: DP soc context
2901  * @hal_srng: srng
2902  *
2903  * Return: None
2904  */
2905 static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
2906 {
2907 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
2908 						 HAL_SRNG_FLUSH_EVENT)) {
2909 		/* Acquire the lock */
2910 		hal_srng_access_start(soc->hal_soc, hal_srng);
2911 
2912 		hal_srng_access_end(soc->hal_soc, hal_srng);
2913 
2914 		hal_srng_set_flush_last_ts(hal_srng);
2915 
2916 		dp_debug("flushed");
2917 	}
2918 }
2919 
2920 void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
2921 {
2922 	 uint8_t i;
2923 
2924 	if (force_flush_tx) {
2925 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2926 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
2927 					   HAL_SRNG_FLUSH_EVENT);
2928 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
2929 		}
2930 
2931 		return;
2932 	}
2933 
2934 	for (i = 0; i < soc->num_tcl_data_rings; i++)
2935 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
2936 
2937 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
2938 }
2939 #endif
2940 
2941 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2942 /*
2943  * dp_flush_tcl_ring() - flush TCL ring hp
2944  * @pdev: dp pdev
2945  * @ring_id: TCL ring id
2946  *
2947  * Return: 0 on success and error code on failure
2948  */
2949 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
2950 {
2951 	struct dp_soc *soc = pdev->soc;
2952 	hal_ring_handle_t hal_ring_hdl =
2953 			soc->tcl_data_ring[ring_id].hal_srng;
2954 	int ret;
2955 
2956 	ret = hal_srng_try_access_start(soc->hal_soc, hal_ring_hdl);
2957 	if (ret)
2958 		return ret;
2959 
2960 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
2961 	if (ret) {
2962 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
2963 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
2964 		hal_srng_inc_flush_cnt(hal_ring_hdl);
2965 		return ret;
2966 	}
2967 
2968 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
2969 	hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
2970 
2971 	return ret;
2972 }
2973 #else
2974 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
2975 {
2976 	return QDF_STATUS_SUCCESS;
2977 }
2978 #endif
2979 
2980 #ifdef WLAN_FEATURE_STATS_EXT
2981 /* rx hw stats event wait timeout in ms */
2982 #define DP_REO_STATUS_STATS_TIMEOUT 100
2983 
2984 /**
2985  * dp_rx_hw_stats_cb() - request rx hw stats response callback
2986  * @soc: soc handle
2987  * @cb_ctxt: callback context
2988  * @reo_status: reo command response status
2989  *
2990  * Return: None
2991  */
2992 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2993 			      union hal_reo_status *reo_status)
2994 {
2995 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
2996 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
2997 	bool is_query_timeout;
2998 
2999 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3000 	is_query_timeout = rx_hw_stats->is_query_timeout;
3001 	/* free the cb_ctxt if all pending tid stats query is received */
3002 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
3003 		if (!is_query_timeout) {
3004 			qdf_event_set(&soc->rx_hw_stats_event);
3005 			soc->is_last_stats_ctx_init = false;
3006 		}
3007 
3008 		qdf_mem_free(rx_hw_stats);
3009 	}
3010 
3011 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
3012 		dp_info("REO stats failure %d",
3013 			queue_status->header.status);
3014 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3015 		return;
3016 	}
3017 
3018 	if (!is_query_timeout) {
3019 		soc->ext_stats.rx_mpdu_received +=
3020 					queue_status->mpdu_frms_cnt;
3021 		soc->ext_stats.rx_mpdu_missed +=
3022 					queue_status->hole_cnt;
3023 	}
3024 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3025 }
3026 
3027 /**
3028  * dp_request_rx_hw_stats() - request rx hardware stats
3029  * @soc_hdl: soc handle
3030  * @vdev_id: vdev id
3031  *
3032  * Return: None
3033  */
3034 QDF_STATUS
3035 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
3036 {
3037 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3038 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3039 						     DP_MOD_ID_CDP);
3040 	struct dp_peer *peer = NULL;
3041 	QDF_STATUS status;
3042 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
3043 	int rx_stats_sent_cnt = 0;
3044 	uint32_t last_rx_mpdu_received;
3045 	uint32_t last_rx_mpdu_missed;
3046 
3047 	if (!vdev) {
3048 		dp_err("vdev is null for vdev_id: %u", vdev_id);
3049 		status = QDF_STATUS_E_INVAL;
3050 		goto out;
3051 	}
3052 
3053 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
3054 
3055 	if (!peer) {
3056 		dp_err("Peer is NULL");
3057 		status = QDF_STATUS_E_INVAL;
3058 		goto out;
3059 	}
3060 
3061 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
3062 
3063 	if (!rx_hw_stats) {
3064 		dp_err("malloc failed for hw stats structure");
3065 		status = QDF_STATUS_E_INVAL;
3066 		goto out;
3067 	}
3068 
3069 	qdf_event_reset(&soc->rx_hw_stats_event);
3070 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3071 	/* save the last soc cumulative stats and reset it to 0 */
3072 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
3073 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
3074 	soc->ext_stats.rx_mpdu_received = 0;
3075 	soc->ext_stats.rx_mpdu_missed = 0;
3076 
3077 	dp_debug("HW stats query start");
3078 	rx_stats_sent_cnt =
3079 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
3080 	if (!rx_stats_sent_cnt) {
3081 		dp_err("no tid stats sent successfully");
3082 		qdf_mem_free(rx_hw_stats);
3083 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3084 		status = QDF_STATUS_E_INVAL;
3085 		goto out;
3086 	}
3087 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
3088 		       rx_stats_sent_cnt);
3089 	rx_hw_stats->is_query_timeout = false;
3090 	soc->is_last_stats_ctx_init = true;
3091 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3092 
3093 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
3094 				       DP_REO_STATUS_STATS_TIMEOUT);
3095 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
3096 
3097 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3098 	if (status != QDF_STATUS_SUCCESS) {
3099 		dp_info("partial rx hw stats event collected with %d",
3100 			qdf_atomic_read(
3101 				&rx_hw_stats->pending_tid_stats_cnt));
3102 		if (soc->is_last_stats_ctx_init)
3103 			rx_hw_stats->is_query_timeout = true;
3104 		/*
3105 		 * If query timeout happened, use the last saved stats
3106 		 * for this time query.
3107 		 */
3108 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
3109 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
3110 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
3111 
3112 	}
3113 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3114 
3115 out:
3116 	if (peer)
3117 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3118 	if (vdev)
3119 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3120 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
3121 
3122 	return status;
3123 }
3124 
3125 /**
3126  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
3127  * @soc_hdl: soc handle
3128  *
3129  * Return: None
3130  */
3131 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
3132 {
3133 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3134 
3135 	soc->ext_stats.rx_mpdu_received = 0;
3136 	soc->ext_stats.rx_mpdu_missed = 0;
3137 }
3138 #endif /* WLAN_FEATURE_STATS_EXT */
3139 
3140 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
3141 {
3142 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3143 
3144 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
3145 }
3146 
3147 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
3148 {
3149 	uint32_t i;
3150 
3151 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3152 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
3153 	}
3154 }
3155 
3156 qdf_export_symbol(dp_soc_set_txrx_ring_map);
3157 
3158 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
3159 {
3160 	dp_init_info("DP soc Dump for Target = %d", target_type);
3161 	dp_init_info("ast_override_support = %d da_war_enabled = %d",
3162 		     soc->ast_override_support, soc->da_war_enabled);
3163 
3164 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
3165 }
3166 
3167 /**
3168  * dp_soc_cfg_init() - initialize target specific configuration
3169  *		       during dp_soc_init
3170  * @soc: dp soc handle
3171  */
3172 static void dp_soc_cfg_init(struct dp_soc *soc)
3173 {
3174 	uint32_t target_type;
3175 
3176 	target_type = hal_get_target_type(soc->hal_soc);
3177 	switch (target_type) {
3178 	case TARGET_TYPE_QCA6290:
3179 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
3180 					       REO_DST_RING_SIZE_QCA6290);
3181 		soc->ast_override_support = 1;
3182 		soc->da_war_enabled = false;
3183 		break;
3184 	case TARGET_TYPE_QCA6390:
3185 	case TARGET_TYPE_QCA6490:
3186 	case TARGET_TYPE_QCA6750:
3187 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
3188 					       REO_DST_RING_SIZE_QCA6290);
3189 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
3190 		soc->ast_override_support = 1;
3191 		if (soc->cdp_soc.ol_ops->get_con_mode &&
3192 		    soc->cdp_soc.ol_ops->get_con_mode() ==
3193 		    QDF_GLOBAL_MONITOR_MODE) {
3194 			int int_ctx;
3195 
3196 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
3197 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
3198 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
3199 			}
3200 		}
3201 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
3202 		break;
3203 	case TARGET_TYPE_KIWI:
3204 	case TARGET_TYPE_MANGO:
3205 	case TARGET_TYPE_PEACH:
3206 		soc->ast_override_support = 1;
3207 		soc->per_tid_basize_max_tid = 8;
3208 
3209 		if (soc->cdp_soc.ol_ops->get_con_mode &&
3210 		    soc->cdp_soc.ol_ops->get_con_mode() ==
3211 		    QDF_GLOBAL_MONITOR_MODE) {
3212 			int int_ctx;
3213 
3214 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
3215 			     int_ctx++) {
3216 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
3217 				if (dp_is_monitor_mode_using_poll(soc))
3218 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
3219 			}
3220 		}
3221 
3222 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
3223 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
3224 		break;
3225 	case TARGET_TYPE_QCA8074:
3226 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
3227 		soc->da_war_enabled = true;
3228 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3229 		break;
3230 	case TARGET_TYPE_QCA8074V2:
3231 	case TARGET_TYPE_QCA6018:
3232 	case TARGET_TYPE_QCA9574:
3233 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3234 		soc->ast_override_support = 1;
3235 		soc->per_tid_basize_max_tid = 8;
3236 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3237 		soc->da_war_enabled = false;
3238 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3239 		break;
3240 	case TARGET_TYPE_QCN9000:
3241 		soc->ast_override_support = 1;
3242 		soc->da_war_enabled = false;
3243 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3244 		soc->per_tid_basize_max_tid = 8;
3245 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3246 		soc->lmac_polled_mode = 0;
3247 		soc->wbm_release_desc_rx_sg_support = 1;
3248 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3249 		break;
3250 	case TARGET_TYPE_QCA5018:
3251 	case TARGET_TYPE_QCN6122:
3252 	case TARGET_TYPE_QCN9160:
3253 		soc->ast_override_support = 1;
3254 		soc->da_war_enabled = false;
3255 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3256 		soc->per_tid_basize_max_tid = 8;
3257 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
3258 		soc->disable_mac1_intr = 1;
3259 		soc->disable_mac2_intr = 1;
3260 		soc->wbm_release_desc_rx_sg_support = 1;
3261 		break;
3262 	case TARGET_TYPE_QCN9224:
3263 		soc->umac_reset_supported = true;
3264 		soc->ast_override_support = 1;
3265 		soc->da_war_enabled = false;
3266 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3267 		soc->per_tid_basize_max_tid = 8;
3268 		soc->wbm_release_desc_rx_sg_support = 1;
3269 		soc->rxdma2sw_rings_not_supported = 1;
3270 		soc->wbm_sg_last_msdu_war = 1;
3271 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
3272 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
3273 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3274 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
3275 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
3276 						  CFG_DP_HOST_AST_DB_ENABLE);
3277 		soc->features.wds_ext_ast_override_enable = true;
3278 		break;
3279 	case TARGET_TYPE_QCA5332:
3280 	case TARGET_TYPE_QCN6432:
3281 		soc->umac_reset_supported = true;
3282 		soc->ast_override_support = 1;
3283 		soc->da_war_enabled = false;
3284 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3285 		soc->per_tid_basize_max_tid = 8;
3286 		soc->wbm_release_desc_rx_sg_support = 1;
3287 		soc->rxdma2sw_rings_not_supported = 1;
3288 		soc->wbm_sg_last_msdu_war = 1;
3289 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
3290 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
3291 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
3292 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
3293 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
3294 						  CFG_DP_HOST_AST_DB_ENABLE);
3295 		soc->features.wds_ext_ast_override_enable = true;
3296 		break;
3297 	default:
3298 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
3299 		qdf_assert_always(0);
3300 		break;
3301 	}
3302 	dp_soc_cfg_dump(soc, target_type);
3303 }
3304 
3305 /**
3306  * dp_soc_get_ap_mld_mode() - store ap mld mode from ini
3307  * @soc: Opaque DP SOC handle
3308  *
3309  * Return: none
3310  */
3311 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3312 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
3313 {
3314 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
3315 		soc->mld_mode_ap =
3316 		soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3317 					CDP_CFG_MLD_NETDEV_MODE_AP);
3318 	}
3319 	dp_info("DP mld_mode_ap-%u\n", soc->mld_mode_ap);
3320 }
3321 #else
3322 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
3323 {
3324 	(void)soc;
3325 }
3326 #endif
3327 
3328 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
3329 /**
3330  * dp_soc_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_soc
3331  * @soc: Datapath soc handle
3332  *
3333  * Return: none
3334  */
3335 static inline
3336 void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
3337 {
3338 	soc->hw_txrx_stats_en =
3339 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
3340 }
3341 #else
3342 static inline
3343 void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
3344 {
3345 	soc->hw_txrx_stats_en = 0;
3346 }
3347 #endif
3348 
3349 /**
3350  * dp_soc_init() - Initialize txrx SOC
3351  * @soc: Opaque DP SOC handle
3352  * @htc_handle: Opaque HTC handle
3353  * @hif_handle: Opaque HIF handle
3354  *
3355  * Return: DP SOC handle on success, NULL on failure
3356  */
3357 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
3358 		  struct hif_opaque_softc *hif_handle)
3359 {
3360 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
3361 	bool is_monitor_mode = false;
3362 	uint8_t i;
3363 	int num_dp_msi;
3364 	bool ppeds_attached = false;
3365 
3366 	htt_soc = htt_soc_attach(soc, htc_handle);
3367 	if (!htt_soc)
3368 		goto fail1;
3369 
3370 	soc->htt_handle = htt_soc;
3371 
3372 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
3373 		goto fail2;
3374 
3375 	htt_set_htc_handle(htt_soc, htc_handle);
3376 
3377 	dp_soc_cfg_init(soc);
3378 
3379 	dp_monitor_soc_cfg_init(soc);
3380 	/* Reset/Initialize wbm sg list and flags */
3381 	dp_rx_wbm_sg_list_reset(soc);
3382 
3383 	/* Note: Any SRNG ring initialization should happen only after
3384 	 * Interrupt mode is set and followed by filling up the
3385 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
3386 	 */
3387 	dp_soc_set_interrupt_mode(soc);
3388 	if (soc->cdp_soc.ol_ops->get_con_mode &&
3389 	    soc->cdp_soc.ol_ops->get_con_mode() ==
3390 	    QDF_GLOBAL_MONITOR_MODE) {
3391 		is_monitor_mode = true;
3392 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
3393 	} else {
3394 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
3395 	}
3396 
3397 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
3398 	if (num_dp_msi < 0) {
3399 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
3400 		goto fail3;
3401 	}
3402 
3403 	if (soc->arch_ops.ppeds_handle_attached)
3404 		ppeds_attached = soc->arch_ops.ppeds_handle_attached(soc);
3405 
3406 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
3407 				     soc->intr_mode, is_monitor_mode,
3408 				     ppeds_attached,
3409 				     soc->umac_reset_supported);
3410 
3411 	/* initialize WBM_IDLE_LINK ring */
3412 	if (dp_hw_link_desc_ring_init(soc)) {
3413 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
3414 		goto fail3;
3415 	}
3416 
3417 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
3418 
3419 	if (dp_soc_srng_init(soc)) {
3420 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
3421 		goto fail4;
3422 	}
3423 
3424 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
3425 			       htt_get_htc_handle(htt_soc),
3426 			       soc->hal_soc, soc->osdev) == NULL)
3427 		goto fail5;
3428 
3429 	/* Initialize descriptors in TCL Rings */
3430 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3431 		hal_tx_init_data_ring(soc->hal_soc,
3432 				      soc->tcl_data_ring[i].hal_srng);
3433 	}
3434 
3435 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
3436 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
3437 		goto fail6;
3438 	}
3439 
3440 	if (soc->arch_ops.txrx_soc_ppeds_start) {
3441 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
3442 			dp_init_err("%pK: ppeds start failed", soc);
3443 			goto fail7;
3444 		}
3445 	}
3446 
3447 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
3448 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
3449 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3450 	wlan_cfg_set_rx_rr(soc->wlan_cfg_ctx,
3451 			   cfg_get(soc->ctrl_psoc, CFG_DP_RX_RR));
3452 #endif
3453 	soc->cce_disable = false;
3454 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
3455 
3456 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
3457 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
3458 	qdf_spinlock_create(&soc->vdev_map_lock);
3459 	qdf_atomic_init(&soc->num_tx_outstanding);
3460 	qdf_atomic_init(&soc->num_tx_exception);
3461 	soc->num_tx_allowed =
3462 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
3463 	soc->num_tx_spl_allowed =
3464 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
3465 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
3466 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
3467 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3468 				CDP_CFG_MAX_PEER_ID);
3469 
3470 		if (ret != -EINVAL)
3471 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
3472 
3473 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3474 				CDP_CFG_CCE_DISABLE);
3475 		if (ret == 1)
3476 			soc->cce_disable = true;
3477 	}
3478 
3479 	/*
3480 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
3481 	 * and IPQ5018 WMAC2 is not there in these platforms.
3482 	 */
3483 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
3484 	    soc->disable_mac2_intr)
3485 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
3486 
3487 	/*
3488 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
3489 	 * WMAC1 is not there in this platform.
3490 	 */
3491 	if (soc->disable_mac1_intr)
3492 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
3493 
3494 	/* setup the global rx defrag waitlist */
3495 	TAILQ_INIT(&soc->rx.defrag.waitlist);
3496 	soc->rx.defrag.timeout_ms =
3497 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
3498 	soc->rx.defrag.next_flush_ms = 0;
3499 	soc->rx.flags.defrag_timeout_check =
3500 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
3501 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
3502 
3503 	dp_monitor_soc_init(soc);
3504 
3505 	qdf_atomic_set(&soc->cmn_init_done, 1);
3506 
3507 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
3508 
3509 	qdf_spinlock_create(&soc->ast_lock);
3510 	dp_peer_mec_spinlock_create(soc);
3511 
3512 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
3513 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
3514 	INIT_RX_HW_STATS_LOCK(soc);
3515 
3516 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
3517 	/* fill the tx/rx cpu ring map*/
3518 	dp_soc_set_txrx_ring_map(soc);
3519 
3520 	TAILQ_INIT(&soc->inactive_peer_list);
3521 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
3522 	TAILQ_INIT(&soc->inactive_vdev_list);
3523 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
3524 	qdf_spinlock_create(&soc->htt_stats.lock);
3525 	/* initialize work queue for stats processing */
3526 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3527 
3528 	dp_reo_desc_deferred_freelist_create(soc);
3529 
3530 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
3531 		qdf_dma_mem_stats_read(),
3532 		qdf_heap_mem_stats_read(),
3533 		qdf_skb_total_mem_stats_read());
3534 
3535 	soc->vdev_stats_id_map = 0;
3536 
3537 	dp_soc_hw_txrx_stats_init(soc);
3538 
3539 	dp_soc_get_ap_mld_mode(soc);
3540 
3541 	return soc;
3542 fail7:
3543 	dp_soc_tx_desc_sw_pools_deinit(soc);
3544 fail6:
3545 	htt_soc_htc_dealloc(soc->htt_handle);
3546 fail5:
3547 	dp_soc_srng_deinit(soc);
3548 fail4:
3549 	dp_hw_link_desc_ring_deinit(soc);
3550 fail3:
3551 	htt_htc_pkt_pool_free(htt_soc);
3552 fail2:
3553 	htt_soc_detach(htt_soc);
3554 fail1:
3555 	return NULL;
3556 }
3557 
3558 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
3559 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
3560 {
3561 	QDF_STATUS status;
3562 
3563 	if (soc->init_tcl_cmd_cred_ring) {
3564 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
3565 				       TCL_CMD_CREDIT, 0, 0);
3566 		if (QDF_IS_STATUS_ERROR(status))
3567 			return status;
3568 
3569 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
3570 				  soc->tcl_cmd_credit_ring.alloc_size,
3571 				  soc->ctrl_psoc,
3572 				  WLAN_MD_DP_SRNG_TCL_CMD,
3573 				  "wbm_desc_rel_ring");
3574 	}
3575 
3576 	return QDF_STATUS_SUCCESS;
3577 }
3578 
3579 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
3580 {
3581 	if (soc->init_tcl_cmd_cred_ring) {
3582 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
3583 				     soc->tcl_cmd_credit_ring.alloc_size,
3584 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
3585 				     "wbm_desc_rel_ring");
3586 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
3587 			       TCL_CMD_CREDIT, 0);
3588 	}
3589 }
3590 
3591 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
3592 {
3593 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3594 	uint32_t entries;
3595 	QDF_STATUS status;
3596 
3597 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
3598 	if (soc->init_tcl_cmd_cred_ring) {
3599 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
3600 				       TCL_CMD_CREDIT, entries, 0);
3601 		if (QDF_IS_STATUS_ERROR(status))
3602 			return status;
3603 	}
3604 
3605 	return QDF_STATUS_SUCCESS;
3606 }
3607 
3608 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
3609 {
3610 	if (soc->init_tcl_cmd_cred_ring)
3611 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
3612 }
3613 
3614 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
3615 {
3616 	if (soc->init_tcl_cmd_cred_ring)
3617 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
3618 					    soc->tcl_cmd_credit_ring.hal_srng);
3619 }
3620 #else
3621 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
3622 {
3623 	return QDF_STATUS_SUCCESS;
3624 }
3625 
3626 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
3627 {
3628 }
3629 
3630 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
3631 {
3632 	return QDF_STATUS_SUCCESS;
3633 }
3634 
3635 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
3636 {
3637 }
3638 
3639 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
3640 {
3641 }
3642 #endif
3643 
3644 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
3645 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
3646 {
3647 	QDF_STATUS status;
3648 
3649 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
3650 	if (QDF_IS_STATUS_ERROR(status))
3651 		return status;
3652 
3653 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
3654 			  soc->tcl_status_ring.alloc_size,
3655 			  soc->ctrl_psoc,
3656 			  WLAN_MD_DP_SRNG_TCL_STATUS,
3657 			  "wbm_desc_rel_ring");
3658 
3659 	return QDF_STATUS_SUCCESS;
3660 }
3661 
3662 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
3663 {
3664 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
3665 			     soc->tcl_status_ring.alloc_size,
3666 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
3667 			     "wbm_desc_rel_ring");
3668 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3669 }
3670 
3671 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
3672 {
3673 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3674 	uint32_t entries;
3675 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3676 
3677 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
3678 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
3679 			       TCL_STATUS, entries, 0);
3680 
3681 	return status;
3682 }
3683 
3684 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
3685 {
3686 	dp_srng_free(soc, &soc->tcl_status_ring);
3687 }
3688 #else
3689 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
3690 {
3691 	return QDF_STATUS_SUCCESS;
3692 }
3693 
3694 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
3695 {
3696 }
3697 
3698 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
3699 {
3700 	return QDF_STATUS_SUCCESS;
3701 }
3702 
3703 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
3704 {
3705 }
3706 #endif
3707 
3708 /**
3709  * dp_soc_srng_deinit() - de-initialize soc srng rings
3710  * @soc: Datapath soc handle
3711  *
3712  */
3713 void dp_soc_srng_deinit(struct dp_soc *soc)
3714 {
3715 	uint32_t i;
3716 
3717 	if (soc->arch_ops.txrx_soc_srng_deinit)
3718 		soc->arch_ops.txrx_soc_srng_deinit(soc);
3719 
3720 	/* Free the ring memories */
3721 	/* Common rings */
3722 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
3723 			     soc->wbm_desc_rel_ring.alloc_size,
3724 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
3725 			     "wbm_desc_rel_ring");
3726 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3727 
3728 	/* Tx data rings */
3729 	for (i = 0; i < soc->num_tcl_data_rings; i++)
3730 		dp_deinit_tx_pair_by_index(soc, i);
3731 
3732 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3733 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
3734 		dp_ipa_deinit_alt_tx_ring(soc);
3735 	}
3736 
3737 	/* TCL command and status rings */
3738 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
3739 	dp_soc_tcl_status_srng_deinit(soc);
3740 
3741 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3742 		/* TODO: Get number of rings and ring sizes
3743 		 * from wlan_cfg
3744 		 */
3745 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
3746 				     soc->reo_dest_ring[i].alloc_size,
3747 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
3748 				     "reo_dest_ring");
3749 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
3750 	}
3751 
3752 	/* REO reinjection ring */
3753 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
3754 			     soc->reo_reinject_ring.alloc_size,
3755 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
3756 			     "reo_reinject_ring");
3757 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3758 
3759 	/* Rx release ring */
3760 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
3761 			     soc->rx_rel_ring.alloc_size,
3762 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
3763 			     "reo_release_ring");
3764 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3765 
3766 	/* Rx exception ring */
3767 	/* TODO: Better to store ring_type and ring_num in
3768 	 * dp_srng during setup
3769 	 */
3770 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
3771 			     soc->reo_exception_ring.alloc_size,
3772 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
3773 			     "reo_exception_ring");
3774 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3775 
3776 	/* REO command and status rings */
3777 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
3778 			     soc->reo_cmd_ring.alloc_size,
3779 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
3780 			     "reo_cmd_ring");
3781 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3782 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
3783 			     soc->reo_status_ring.alloc_size,
3784 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
3785 			     "reo_status_ring");
3786 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3787 }
3788 
3789 /**
3790  * dp_soc_srng_init() - Initialize soc level srng rings
3791  * @soc: Datapath soc handle
3792  *
3793  * Return: QDF_STATUS_SUCCESS on success
3794  *	   QDF_STATUS_E_FAILURE on failure
3795  */
3796 QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
3797 {
3798 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3799 	uint8_t i;
3800 	uint8_t wbm2_sw_rx_rel_ring_id;
3801 
3802 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3803 
3804 	dp_enable_verbose_debug(soc);
3805 
3806 	/* WBM descriptor release ring */
3807 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
3808 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
3809 		goto fail1;
3810 	}
3811 
3812 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
3813 			  soc->wbm_desc_rel_ring.alloc_size,
3814 			  soc->ctrl_psoc,
3815 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
3816 			  "wbm_desc_rel_ring");
3817 
3818 	/* TCL command and status rings */
3819 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
3820 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
3821 		goto fail1;
3822 	}
3823 
3824 	if (dp_soc_tcl_status_srng_init(soc)) {
3825 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
3826 		goto fail1;
3827 	}
3828 
3829 	/* REO reinjection ring */
3830 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
3831 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
3832 		goto fail1;
3833 	}
3834 
3835 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
3836 			  soc->reo_reinject_ring.alloc_size,
3837 			  soc->ctrl_psoc,
3838 			  WLAN_MD_DP_SRNG_REO_REINJECT,
3839 			  "reo_reinject_ring");
3840 
3841 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
3842 	/* Rx release ring */
3843 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
3844 			 wbm2_sw_rx_rel_ring_id, 0)) {
3845 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
3846 		goto fail1;
3847 	}
3848 
3849 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
3850 			  soc->rx_rel_ring.alloc_size,
3851 			  soc->ctrl_psoc,
3852 			  WLAN_MD_DP_SRNG_RX_REL,
3853 			  "reo_release_ring");
3854 
3855 	/* Rx exception ring */
3856 	if (dp_srng_init(soc, &soc->reo_exception_ring,
3857 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
3858 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
3859 		goto fail1;
3860 	}
3861 
3862 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
3863 			  soc->reo_exception_ring.alloc_size,
3864 			  soc->ctrl_psoc,
3865 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
3866 			  "reo_exception_ring");
3867 
3868 	/* REO command and status rings */
3869 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
3870 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
3871 		goto fail1;
3872 	}
3873 
3874 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
3875 			  soc->reo_cmd_ring.alloc_size,
3876 			  soc->ctrl_psoc,
3877 			  WLAN_MD_DP_SRNG_REO_CMD,
3878 			  "reo_cmd_ring");
3879 
3880 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
3881 	TAILQ_INIT(&soc->rx.reo_cmd_list);
3882 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
3883 
3884 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
3885 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
3886 		goto fail1;
3887 	}
3888 
3889 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
3890 			  soc->reo_status_ring.alloc_size,
3891 			  soc->ctrl_psoc,
3892 			  WLAN_MD_DP_SRNG_REO_STATUS,
3893 			  "reo_status_ring");
3894 
3895 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3896 		if (dp_init_tx_ring_pair_by_index(soc, i))
3897 			goto fail1;
3898 	}
3899 
3900 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3901 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
3902 			goto fail1;
3903 
3904 		if (dp_ipa_init_alt_tx_ring(soc))
3905 			goto fail1;
3906 	}
3907 
3908 	dp_create_ext_stats_event(soc);
3909 
3910 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3911 		/* Initialize REO destination ring */
3912 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
3913 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
3914 			goto fail1;
3915 		}
3916 
3917 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
3918 				  soc->reo_dest_ring[i].alloc_size,
3919 				  soc->ctrl_psoc,
3920 				  WLAN_MD_DP_SRNG_REO_DEST,
3921 				  "reo_dest_ring");
3922 	}
3923 
3924 	if (soc->arch_ops.txrx_soc_srng_init) {
3925 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
3926 			dp_init_err("%pK: dp_srng_init failed for arch rings",
3927 				    soc);
3928 			goto fail1;
3929 		}
3930 	}
3931 
3932 	return QDF_STATUS_SUCCESS;
3933 fail1:
3934 	/*
3935 	 * Cleanup will be done as part of soc_detach, which will
3936 	 * be called on pdev attach failure
3937 	 */
3938 	dp_soc_srng_deinit(soc);
3939 	return QDF_STATUS_E_FAILURE;
3940 }
3941 
3942 /**
3943  * dp_soc_srng_free() - free soc level srng rings
3944  * @soc: Datapath soc handle
3945  *
3946  */
3947 void dp_soc_srng_free(struct dp_soc *soc)
3948 {
3949 	uint32_t i;
3950 
3951 	if (soc->arch_ops.txrx_soc_srng_free)
3952 		soc->arch_ops.txrx_soc_srng_free(soc);
3953 
3954 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
3955 
3956 	for (i = 0; i < soc->num_tcl_data_rings; i++)
3957 		dp_free_tx_ring_pair_by_index(soc, i);
3958 
3959 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
3960 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3961 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
3962 		dp_ipa_free_alt_tx_ring(soc);
3963 	}
3964 
3965 	dp_soc_tcl_cmd_cred_srng_free(soc);
3966 	dp_soc_tcl_status_srng_free(soc);
3967 
3968 	for (i = 0; i < soc->num_reo_dest_rings; i++)
3969 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
3970 
3971 	dp_srng_free(soc, &soc->reo_reinject_ring);
3972 	dp_srng_free(soc, &soc->rx_rel_ring);
3973 
3974 	dp_srng_free(soc, &soc->reo_exception_ring);
3975 
3976 	dp_srng_free(soc, &soc->reo_cmd_ring);
3977 	dp_srng_free(soc, &soc->reo_status_ring);
3978 }
3979 
3980 /**
3981  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
3982  * @soc: Datapath soc handle
3983  *
3984  * Return: QDF_STATUS_SUCCESS on success
3985  *	   QDF_STATUS_E_NOMEM on failure
3986  */
3987 QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
3988 {
3989 	uint32_t entries;
3990 	uint32_t i;
3991 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3992 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
3993 	uint32_t reo_dst_ring_size;
3994 
3995 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3996 
3997 	/* sw2wbm link descriptor release ring */
3998 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
3999 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
4000 			  entries, 0)) {
4001 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
4002 		goto fail1;
4003 	}
4004 
4005 	/* TCL command and status rings */
4006 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
4007 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
4008 		goto fail1;
4009 	}
4010 
4011 	if (dp_soc_tcl_status_srng_alloc(soc)) {
4012 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
4013 		goto fail1;
4014 	}
4015 
4016 	/* REO reinjection ring */
4017 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
4018 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
4019 			  entries, 0)) {
4020 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
4021 		goto fail1;
4022 	}
4023 
4024 	/* Rx release ring */
4025 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
4026 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
4027 			  entries, 0)) {
4028 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
4029 		goto fail1;
4030 	}
4031 
4032 	/* Rx exception ring */
4033 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
4034 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
4035 			  entries, 0)) {
4036 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
4037 		goto fail1;
4038 	}
4039 
4040 	/* REO command and status rings */
4041 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
4042 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
4043 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
4044 		goto fail1;
4045 	}
4046 
4047 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
4048 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
4049 			  entries, 0)) {
4050 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
4051 		goto fail1;
4052 	}
4053 
4054 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
4055 
4056 	/* Disable cached desc if NSS offload is enabled */
4057 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4058 		cached = 0;
4059 
4060 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4061 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
4062 			goto fail1;
4063 	}
4064 
4065 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
4066 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4067 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
4068 			goto fail1;
4069 
4070 		if (dp_ipa_alloc_alt_tx_ring(soc))
4071 			goto fail1;
4072 	}
4073 
4074 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4075 		/* Setup REO destination ring */
4076 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
4077 				  reo_dst_ring_size, cached)) {
4078 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
4079 			goto fail1;
4080 		}
4081 	}
4082 
4083 	if (soc->arch_ops.txrx_soc_srng_alloc) {
4084 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
4085 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
4086 				    soc);
4087 			goto fail1;
4088 		}
4089 	}
4090 
4091 	return QDF_STATUS_SUCCESS;
4092 
4093 fail1:
4094 	dp_soc_srng_free(soc);
4095 	return QDF_STATUS_E_NOMEM;
4096 }
4097 
4098 /**
4099  * dp_soc_cfg_attach() - set target specific configuration in
4100  *			 dp soc cfg.
4101  * @soc: dp soc handle
4102  */
4103 void dp_soc_cfg_attach(struct dp_soc *soc)
4104 {
4105 	int target_type;
4106 	int nss_cfg = 0;
4107 
4108 	target_type = hal_get_target_type(soc->hal_soc);
4109 	switch (target_type) {
4110 	case TARGET_TYPE_QCA6290:
4111 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4112 					       REO_DST_RING_SIZE_QCA6290);
4113 		break;
4114 	case TARGET_TYPE_QCA6390:
4115 	case TARGET_TYPE_QCA6490:
4116 	case TARGET_TYPE_QCA6750:
4117 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4118 					       REO_DST_RING_SIZE_QCA6290);
4119 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4120 		break;
4121 	case TARGET_TYPE_KIWI:
4122 	case TARGET_TYPE_MANGO:
4123 	case TARGET_TYPE_PEACH:
4124 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4125 		break;
4126 	case TARGET_TYPE_QCA8074:
4127 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4128 		break;
4129 	case TARGET_TYPE_QCA8074V2:
4130 	case TARGET_TYPE_QCA6018:
4131 	case TARGET_TYPE_QCA9574:
4132 	case TARGET_TYPE_QCN6122:
4133 	case TARGET_TYPE_QCA5018:
4134 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4135 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4136 		break;
4137 	case TARGET_TYPE_QCN9160:
4138 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4139 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4140 		break;
4141 	case TARGET_TYPE_QCN9000:
4142 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4143 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4144 		break;
4145 	case TARGET_TYPE_QCN9224:
4146 	case TARGET_TYPE_QCA5332:
4147 	case TARGET_TYPE_QCN6432:
4148 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4149 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4150 		break;
4151 	default:
4152 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
4153 		qdf_assert_always(0);
4154 		break;
4155 	}
4156 
4157 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
4158 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
4159 
4160 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
4161 
4162 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
4163 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
4164 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
4165 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
4166 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
4167 		soc->init_tcl_cmd_cred_ring = false;
4168 		soc->num_tcl_data_rings =
4169 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
4170 		soc->num_reo_dest_rings =
4171 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
4172 
4173 	} else {
4174 		soc->init_tcl_cmd_cred_ring = true;
4175 		soc->num_tx_comp_rings =
4176 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
4177 		soc->num_tcl_data_rings =
4178 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
4179 		soc->num_reo_dest_rings =
4180 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4181 	}
4182 
4183 }
4184 
4185 void dp_pdev_set_default_reo(struct dp_pdev *pdev)
4186 {
4187 	struct dp_soc *soc = pdev->soc;
4188 
4189 	switch (pdev->pdev_id) {
4190 	case 0:
4191 		pdev->reo_dest =
4192 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
4193 		break;
4194 
4195 	case 1:
4196 		pdev->reo_dest =
4197 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
4198 		break;
4199 
4200 	case 2:
4201 		pdev->reo_dest =
4202 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
4203 		break;
4204 
4205 	default:
4206 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
4207 			    soc, pdev->pdev_id);
4208 		break;
4209 	}
4210 }
4211 
4212