1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_rings.h"
34 #include "dp_internal.h"
35 #include "dp_tx.h"
36 #include "dp_tx_desc.h"
37 #include "dp_rx.h"
38 #ifdef DP_RATETABLE_SUPPORT
39 #include "dp_ratetable.h"
40 #endif
41 #include <cdp_txrx_handle.h>
42 #include <wlan_cfg.h>
43 #include <wlan_utility.h>
44 #include "cdp_txrx_cmn_struct.h"
45 #include "cdp_txrx_stats_struct.h"
46 #include "cdp_txrx_cmn_reg.h"
47 #include <qdf_util.h>
48 #include "dp_peer.h"
49 #include "htt_stats.h"
50 #include "dp_htt.h"
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 #include <wlan_module_ids.h>
55 
56 #ifdef WIFI_MONITOR_SUPPORT
57 #include <dp_mon.h>
58 #endif
59 #include "qdf_ssr_driver_dump.h"
60 
61 #ifdef WLAN_FEATURE_STATS_EXT
62 #define INIT_RX_HW_STATS_LOCK(_soc) \
63 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
64 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
65 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
66 #else
67 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
68 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
69 #endif
70 
71 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
72 						uint8_t index);
73 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
74 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
75 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
76 						 uint8_t index);
77 
78 /* default_dscp_tid_map - Default DSCP-TID mapping
79  *
80  * DSCP        TID
81  * 000000      0
82  * 001000      1
83  * 010000      2
84  * 011000      3
85  * 100000      4
86  * 101000      5
87  * 110000      6
88  * 111000      7
89  */
90 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
91 	0, 0, 0, 0, 0, 0, 0, 0,
92 	1, 1, 1, 1, 1, 1, 1, 1,
93 	2, 2, 2, 2, 2, 2, 2, 2,
94 	3, 3, 3, 3, 3, 3, 3, 3,
95 	4, 4, 4, 4, 4, 4, 4, 4,
96 	5, 5, 5, 5, 5, 5, 5, 5,
97 	6, 6, 6, 6, 6, 6, 6, 6,
98 	7, 7, 7, 7, 7, 7, 7, 7,
99 };
100 
101 /* default_pcp_tid_map - Default PCP-TID mapping
102  *
103  * PCP     TID
104  * 000      0
105  * 001      1
106  * 010      2
107  * 011      3
108  * 100      4
109  * 101      5
110  * 110      6
111  * 111      7
112  */
113 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
114 	0, 1, 2, 3, 4, 5, 6, 7,
115 };
116 
117 uint8_t
118 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
119 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
120 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
121 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
122 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
123 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
124 #ifdef WLAN_TX_PKT_CAPTURE_ENH
125 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
126 #endif
127 };
128 
129 qdf_export_symbol(dp_cpu_ring_map);
130 
131 /**
132  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
133  * @soc: DP soc handle
134  * @ring_type: ring type
135  * @ring_num: ring_num
136  *
137  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
138  */
dp_soc_ring_if_nss_offloaded(struct dp_soc * soc,enum hal_ring_type ring_type,int ring_num)139 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
140 					    enum hal_ring_type ring_type,
141 					    int ring_num)
142 {
143 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
144 	uint8_t status = 0;
145 
146 	switch (ring_type) {
147 	case WBM2SW_RELEASE:
148 	case REO_DST:
149 	case RXDMA_BUF:
150 	case REO_EXCEPTION:
151 		status = ((nss_config) & (1 << ring_num));
152 		break;
153 	default:
154 		break;
155 	}
156 
157 	return status;
158 }
159 
160 #if !defined(DP_CON_MON)
dp_soc_reset_mon_intr_mask(struct dp_soc * soc)161 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
162 {
163 	int i;
164 
165 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
166 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
167 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
168 	}
169 }
170 
171 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
172 
dp_service_lmac_rings(void * arg)173 void dp_service_lmac_rings(void *arg)
174 {
175 	struct dp_soc *soc = (struct dp_soc *)arg;
176 	int ring = 0, i;
177 	struct dp_pdev *pdev = NULL;
178 	union dp_rx_desc_list_elem_t *desc_list = NULL;
179 	union dp_rx_desc_list_elem_t *tail = NULL;
180 
181 	/* Process LMAC interrupts */
182 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
183 		int mac_for_pdev = ring;
184 		struct dp_srng *rx_refill_buf_ring;
185 
186 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
187 		if (!pdev)
188 			continue;
189 
190 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
191 
192 		dp_monitor_process(soc, NULL, mac_for_pdev,
193 				   QCA_NAPI_BUDGET);
194 
195 		for (i = 0;
196 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
197 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
198 					     mac_for_pdev,
199 					     QCA_NAPI_BUDGET);
200 
201 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
202 						  mac_for_pdev))
203 			dp_rx_buffers_replenish(soc, mac_for_pdev,
204 						rx_refill_buf_ring,
205 						&soc->rx_desc_buf[mac_for_pdev],
206 						0, &desc_list, &tail, false);
207 	}
208 
209 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
210 }
211 
212 #endif
213 
214 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
215 /**
216  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
217  *				rx_near_full_grp1 mask
218  * @soc: Datapath SoC Handle
219  * @ring_num: REO ring number
220  *
221  * Return: 1 if the ring_num belongs to reo_nf_grp1,
222  *	   0, otherwise.
223  */
224 static inline int
dp_is_reo_ring_num_in_nf_grp1(struct dp_soc * soc,int ring_num)225 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
226 {
227 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
228 }
229 
230 /**
231  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
232  *				rx_near_full_grp2 mask
233  * @soc: Datapath SoC Handle
234  * @ring_num: REO ring number
235  *
236  * Return: 1 if the ring_num belongs to reo_nf_grp2,
237  *	   0, otherwise.
238  */
239 static inline int
dp_is_reo_ring_num_in_nf_grp2(struct dp_soc * soc,int ring_num)240 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
241 {
242 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
243 }
244 
245 /**
246  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
247  *				ring type and number
248  * @soc: Datapath SoC handle
249  * @ring_type: SRNG type
250  * @ring_num: ring num
251  *
252  * Return: near-full irq mask pointer
253  */
dp_srng_get_near_full_irq_mask(struct dp_soc * soc,enum hal_ring_type ring_type,int ring_num)254 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
255 					enum hal_ring_type ring_type,
256 					int ring_num)
257 {
258 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
259 	uint8_t wbm2_sw_rx_rel_ring_id;
260 	uint8_t *nf_irq_mask = NULL;
261 
262 	switch (ring_type) {
263 	case WBM2SW_RELEASE:
264 		wbm2_sw_rx_rel_ring_id =
265 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
266 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
267 			nf_irq_mask = &soc->wlan_cfg_ctx->
268 					int_tx_ring_near_full_irq_mask[0];
269 		}
270 		break;
271 	case REO_DST:
272 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
273 			nf_irq_mask =
274 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
275 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
276 			nf_irq_mask =
277 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
278 		else
279 			qdf_assert(0);
280 		break;
281 	default:
282 		break;
283 	}
284 
285 	return nf_irq_mask;
286 }
287 
288 /**
289  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
290  * @soc: Datapath SoC handle
291  * @ring_params: srng params handle
292  * @msi2_addr: MSI2 addr to be set for the SRNG
293  * @msi2_data: MSI2 data to be set for the SRNG
294  *
295  * Return: None
296  */
dp_srng_set_msi2_ring_params(struct dp_soc * soc,struct hal_srng_params * ring_params,qdf_dma_addr_t msi2_addr,uint32_t msi2_data)297 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
298 				  struct hal_srng_params *ring_params,
299 				  qdf_dma_addr_t msi2_addr,
300 				  uint32_t msi2_data)
301 {
302 	ring_params->msi2_addr = msi2_addr;
303 	ring_params->msi2_data = msi2_data;
304 }
305 
306 /**
307  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
308  * @soc: Datapath SoC handle
309  * @ring_params: ring_params for SRNG
310  * @ring_type: SENG type
311  * @ring_num: ring number for the SRNG
312  * @nf_msi_grp_num: near full msi group number
313  *
314  * Return: None
315  */
dp_srng_msi2_setup(struct dp_soc * soc,struct hal_srng_params * ring_params,int ring_type,int ring_num,int nf_msi_grp_num)316 void dp_srng_msi2_setup(struct dp_soc *soc,
317 			struct hal_srng_params *ring_params,
318 			int ring_type, int ring_num, int nf_msi_grp_num)
319 {
320 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
321 	int msi_data_count, ret;
322 
323 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
324 					  &msi_data_count, &msi_data_start,
325 					  &msi_irq_start);
326 	if (ret)
327 		return;
328 
329 	if (nf_msi_grp_num < 0) {
330 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
331 			     soc, ring_type, ring_num);
332 		ring_params->msi2_addr = 0;
333 		ring_params->msi2_data = 0;
334 		return;
335 	}
336 
337 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
338 					   msi_data_count)) {
339 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
340 			     soc, nf_msi_grp_num);
341 		QDF_ASSERT(0);
342 	}
343 
344 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
345 
346 	ring_params->nf_irq_support = 1;
347 	ring_params->msi2_addr = addr_low;
348 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
349 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
350 		+ msi_data_start;
351 	ring_params->flags |= HAL_SRNG_MSI_INTR;
352 }
353 
354 /* Percentage of ring entries considered as nearly full */
355 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
356 /* Percentage of ring entries considered as critically full */
357 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
358 /* Percentage of ring entries considered as safe threshold */
359 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
360 
361 /**
362  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
363  *			near full irq
364  * @soc: Datapath SoC handle
365  * @ring_params: ring params for SRNG
366  * @ring_type: ring type
367  */
368 void
dp_srng_configure_nf_interrupt_thresholds(struct dp_soc * soc,struct hal_srng_params * ring_params,int ring_type)369 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
370 					  struct hal_srng_params *ring_params,
371 					  int ring_type)
372 {
373 	if (ring_params->nf_irq_support) {
374 		ring_params->high_thresh = (ring_params->num_entries *
375 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
376 		ring_params->crit_thresh = (ring_params->num_entries *
377 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
378 		ring_params->safe_thresh = (ring_params->num_entries *
379 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
380 	}
381 }
382 
383 /**
384  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
385  *			structure from the ring params
386  * @soc: Datapath SoC handle
387  * @srng: SRNG handle
388  * @ring_params: ring params for a SRNG
389  *
390  * Return: None
391  */
392 static inline void
dp_srng_set_nf_thresholds(struct dp_soc * soc,struct dp_srng * srng,struct hal_srng_params * ring_params)393 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
394 			  struct hal_srng_params *ring_params)
395 {
396 	srng->crit_thresh = ring_params->crit_thresh;
397 	srng->safe_thresh = ring_params->safe_thresh;
398 }
399 
400 #else
401 static inline void
dp_srng_set_nf_thresholds(struct dp_soc * soc,struct dp_srng * srng,struct hal_srng_params * ring_params)402 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
403 			  struct hal_srng_params *ring_params)
404 {
405 }
406 #endif
407 
408 /**
409  * dp_get_num_msi_available()- API to get number of MSIs available
410  * @soc: DP soc Handle
411  * @interrupt_mode: Mode of interrupts
412  *
413  * Return: Number of MSIs available or 0 in case of integrated
414  */
415 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
dp_get_num_msi_available(struct dp_soc * soc,int interrupt_mode)416 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
417 {
418 	return 0;
419 }
420 #else
dp_get_num_msi_available(struct dp_soc * soc,int interrupt_mode)421 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
422 {
423 	int msi_data_count;
424 	int msi_data_start;
425 	int msi_irq_start;
426 	int ret;
427 
428 	if (interrupt_mode == DP_INTR_INTEGRATED) {
429 		return 0;
430 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
431 		   DP_INTR_POLL) {
432 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
433 						  &msi_data_count,
434 						  &msi_data_start,
435 						  &msi_irq_start);
436 		if (ret) {
437 			qdf_err("Unable to get DP MSI assignment %d",
438 				interrupt_mode);
439 			return -EINVAL;
440 		}
441 		return msi_data_count;
442 	}
443 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
444 	return -EINVAL;
445 }
446 #endif
447 
448 /**
449  * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
450  * update threshold value from wlan_cfg_ctx
451  * @soc: device handle
452  * @ring_params: per ring specific parameters
453  * @ring_type: Ring type
454  * @ring_num: Ring number for a given ring type
455  * @num_entries: number of entries to fill
456  *
457  * Fill the ring params with the pointer update threshold
458  * configuration parameters available in wlan_cfg_ctx
459  *
460  * Return: None
461  */
462 static void
dp_srng_configure_pointer_update_thresholds(struct dp_soc * soc,struct hal_srng_params * ring_params,int ring_type,int ring_num,int num_entries)463 dp_srng_configure_pointer_update_thresholds(
464 				struct dp_soc *soc,
465 				struct hal_srng_params *ring_params,
466 				int ring_type, int ring_num,
467 				int num_entries)
468 {
469 	if (ring_type == REO_DST) {
470 		ring_params->pointer_timer_threshold =
471 			wlan_cfg_get_pointer_timer_threshold_rx(
472 						soc->wlan_cfg_ctx);
473 		ring_params->pointer_num_threshold =
474 			wlan_cfg_get_pointer_num_threshold_rx(
475 						soc->wlan_cfg_ctx);
476 	}
477 }
478 
dp_srng_init_idx(struct dp_soc * soc,struct dp_srng * srng,int ring_type,int ring_num,int mac_id,uint32_t idx)479 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
480 			    int ring_type, int ring_num, int mac_id,
481 			    uint32_t idx)
482 {
483 	bool idle_check;
484 
485 	hal_soc_handle_t hal_soc = soc->hal_soc;
486 	struct hal_srng_params ring_params;
487 
488 	if (srng->hal_srng) {
489 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
490 			    soc, ring_type, ring_num);
491 		return QDF_STATUS_SUCCESS;
492 	}
493 
494 	/* memset the srng ring to zero */
495 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
496 
497 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
498 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
499 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
500 
501 	ring_params.num_entries = srng->num_entries;
502 
503 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
504 		ring_type, ring_num,
505 		(void *)ring_params.ring_base_vaddr,
506 		(void *)ring_params.ring_base_paddr,
507 		ring_params.num_entries);
508 
509 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
510 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
511 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
512 				 ring_type, ring_num);
513 	} else {
514 		ring_params.msi_data = 0;
515 		ring_params.msi_addr = 0;
516 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
517 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
518 				 ring_type, ring_num);
519 	}
520 
521 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
522 					       ring_type, ring_num,
523 					       srng->num_entries);
524 
525 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
526 	dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
527 						    ring_type, ring_num,
528 						    srng->num_entries);
529 
530 	if (srng->cached)
531 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
532 
533 	idle_check = dp_check_umac_reset_in_progress(soc);
534 
535 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
536 					    mac_id, &ring_params, idle_check,
537 					    idx);
538 
539 	if (!srng->hal_srng) {
540 		dp_srng_free(soc, srng);
541 		return QDF_STATUS_E_FAILURE;
542 	}
543 
544 	return QDF_STATUS_SUCCESS;
545 }
546 
547 qdf_export_symbol(dp_srng_init_idx);
548 
549 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
550 /**
551  * dp_service_near_full_srngs() - Bottom half handler to process the near
552  *				full IRQ on a SRNG
553  * @dp_ctx: Datapath SoC handle
554  * @dp_budget: Number of SRNGs which can be processed in a single attempt
555  *		without rescheduling
556  * @cpu: cpu id
557  *
558  * Return: remaining budget/quota for the soc device
559  */
560 static
dp_service_near_full_srngs(void * dp_ctx,uint32_t dp_budget,int cpu)561 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
562 {
563 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
564 	struct dp_soc *soc = int_ctx->soc;
565 
566 	/*
567 	 * dp_service_near_full_srngs arch ops should be initialized always
568 	 * if the NEAR FULL IRQ feature is enabled.
569 	 */
570 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
571 							dp_budget);
572 }
573 #endif
574 
575 #ifndef QCA_HOST_MODE_WIFI_DISABLED
576 
dp_service_srngs(void * dp_ctx,uint32_t dp_budget,int cpu)577 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
578 {
579 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
580 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
581 	struct dp_soc *soc = int_ctx->soc;
582 	int ring = 0;
583 	int index;
584 	uint32_t work_done  = 0;
585 	int budget = dp_budget;
586 	uint32_t remaining_quota = dp_budget;
587 	uint8_t tx_mask = 0;
588 	uint8_t rx_mask = 0;
589 	uint8_t rx_err_mask = 0;
590 	uint8_t rx_wbm_rel_mask = 0;
591 	uint8_t reo_status_mask = 0;
592 
593 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
594 
595 	tx_mask = int_ctx->tx_ring_mask;
596 	rx_mask = int_ctx->rx_ring_mask;
597 	rx_err_mask = int_ctx->rx_err_ring_mask;
598 	rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
599 	reo_status_mask = int_ctx->reo_status_ring_mask;
600 
601 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x",
602 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
603 			 reo_status_mask,
604 			 int_ctx->rx_mon_ring_mask,
605 			 int_ctx->host2rxdma_ring_mask,
606 			 int_ctx->rxdma2host_ring_mask);
607 
608 	/* Process Tx completion interrupts first to return back buffers */
609 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
610 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
611 			continue;
612 		work_done = dp_tx_comp_handler(int_ctx,
613 					       soc,
614 					       soc->tx_comp_ring[index].hal_srng,
615 					       index, remaining_quota);
616 		if (work_done) {
617 			intr_stats->num_tx_ring_masks[index]++;
618 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
619 					 tx_mask, index, budget,
620 					 work_done);
621 		}
622 		budget -= work_done;
623 		if (budget <= 0)
624 			goto budget_done;
625 
626 		remaining_quota = budget;
627 	}
628 
629 	/* Process REO Exception ring interrupt */
630 	if (rx_err_mask) {
631 		work_done = dp_rx_err_process(int_ctx, soc,
632 					      soc->reo_exception_ring.hal_srng,
633 					      remaining_quota);
634 
635 		if (work_done) {
636 			intr_stats->num_rx_err_ring_masks++;
637 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
638 					 work_done, budget);
639 		}
640 
641 		budget -=  work_done;
642 		if (budget <= 0) {
643 			goto budget_done;
644 		}
645 		remaining_quota = budget;
646 	}
647 
648 	/* Process Rx WBM release ring interrupt */
649 	if (rx_wbm_rel_mask) {
650 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
651 						  soc->rx_rel_ring.hal_srng,
652 						  remaining_quota);
653 
654 		if (work_done) {
655 			intr_stats->num_rx_wbm_rel_ring_masks++;
656 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
657 					 work_done, budget);
658 		}
659 
660 		budget -=  work_done;
661 		if (budget <= 0) {
662 			goto budget_done;
663 		}
664 		remaining_quota = budget;
665 	}
666 
667 	/* Process Rx interrupts */
668 	if (rx_mask) {
669 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
670 			if (!(rx_mask & (1 << ring)))
671 				continue;
672 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
673 						  soc->reo_dest_ring[ring].hal_srng,
674 						  ring,
675 						  remaining_quota);
676 			if (work_done) {
677 				intr_stats->num_rx_ring_masks[ring]++;
678 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
679 						 rx_mask, ring,
680 						 work_done, budget);
681 				budget -=  work_done;
682 				if (budget <= 0)
683 					goto budget_done;
684 				remaining_quota = budget;
685 			}
686 		}
687 	}
688 
689 	if (reo_status_mask) {
690 		if (dp_reo_status_ring_handler(int_ctx, soc))
691 			int_ctx->intr_stats.num_reo_status_ring_masks++;
692 	}
693 
694 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
695 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
696 		if (work_done) {
697 			budget -=  work_done;
698 			if (budget <= 0)
699 				goto budget_done;
700 			remaining_quota = budget;
701 		}
702 	}
703 
704 	qdf_lro_flush(int_ctx->lro_ctx);
705 	intr_stats->num_masks++;
706 
707 budget_done:
708 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
709 
710 	dp_umac_reset_trigger_pre_reset_notify_cb(soc);
711 
712 	return dp_budget - budget;
713 }
714 
715 #else /* QCA_HOST_MODE_WIFI_DISABLED */
716 
dp_service_srngs(void * dp_ctx,uint32_t dp_budget,int cpu)717 uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
718 {
719 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
720 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
721 	struct dp_soc *soc = int_ctx->soc;
722 	uint32_t remaining_quota = dp_budget;
723 	uint32_t work_done  = 0;
724 	int budget = dp_budget;
725 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
726 
727 	if (reo_status_mask) {
728 		if (dp_reo_status_ring_handler(int_ctx, soc))
729 			int_ctx->intr_stats.num_reo_status_ring_masks++;
730 	}
731 
732 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
733 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
734 		if (work_done) {
735 			budget -=  work_done;
736 			if (budget <= 0)
737 				goto budget_done;
738 			remaining_quota = budget;
739 		}
740 	}
741 
742 	qdf_lro_flush(int_ctx->lro_ctx);
743 	intr_stats->num_masks++;
744 
745 budget_done:
746 	return dp_budget - budget;
747 }
748 
749 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
750 
dp_soc_attach_poll(struct cdp_soc_t * txrx_soc)751 QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
752 {
753 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
754 	int i;
755 	int lmac_id = 0;
756 
757 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
758 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
759 	soc->intr_mode = DP_INTR_POLL;
760 
761 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
762 		soc->intr_ctx[i].dp_intr_id = i;
763 		soc->intr_ctx[i].tx_ring_mask =
764 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
765 		soc->intr_ctx[i].rx_ring_mask =
766 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
767 		soc->intr_ctx[i].rx_mon_ring_mask =
768 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
769 		soc->intr_ctx[i].rx_err_ring_mask =
770 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
771 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
772 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
773 		soc->intr_ctx[i].reo_status_ring_mask =
774 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
775 		soc->intr_ctx[i].rxdma2host_ring_mask =
776 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
777 		soc->intr_ctx[i].soc = soc;
778 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
779 
780 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
781 			hif_event_history_init(soc->hif_handle, i);
782 			soc->mon_intr_id_lmac_map[lmac_id] = i;
783 			lmac_id++;
784 		}
785 	}
786 
787 	qdf_timer_init(soc->osdev, &soc->int_timer,
788 		       dp_interrupt_timer, (void *)soc,
789 		       QDF_TIMER_TYPE_WAKE_APPS);
790 
791 	return QDF_STATUS_SUCCESS;
792 }
793 
794 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
795 /**
796  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
797  * @soc: DP soc handle
798  * @num_irq: IRQ number
799  * @irq_id_map: IRQ map
800  * @intr_id: interrupt context ID
801  *
802  * Return: 0 for success. nonzero for failure.
803  */
804 static inline int
dp_soc_near_full_interrupt_attach(struct dp_soc * soc,int num_irq,int irq_id_map[],int intr_id)805 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
806 				  int irq_id_map[], int intr_id)
807 {
808 	return hif_register_ext_group(soc->hif_handle,
809 				      num_irq, irq_id_map,
810 				      dp_service_near_full_srngs,
811 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
812 				      HIF_EXEC_NAPI_TYPE,
813 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
814 }
815 #else
816 static inline int
dp_soc_near_full_interrupt_attach(struct dp_soc * soc,int num_irq,int * irq_id_map,int intr_id)817 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
818 				  int *irq_id_map, int intr_id)
819 {
820 	return 0;
821 }
822 #endif
823 
824 #ifdef DP_CON_MON_MSI_SKIP_SET
dp_skip_rx_mon_ring_mask_set(struct dp_soc * soc)825 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
826 {
827 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
828 		 QDF_GLOBAL_MONITOR_MODE &&
829 		 !dp_mon_mode_local_pkt_capture(soc));
830 }
831 #else
dp_skip_rx_mon_ring_mask_set(struct dp_soc * soc)832 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
833 {
834 	return false;
835 }
836 #endif
837 
dp_soc_interrupt_detach(struct cdp_soc_t * txrx_soc)838 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
839 {
840 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
841 	int i;
842 
843 	if (soc->intr_mode == DP_INTR_POLL) {
844 		qdf_timer_free(&soc->int_timer);
845 	} else {
846 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
847 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
848 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
849 	}
850 
851 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
852 		soc->intr_ctx[i].tx_ring_mask = 0;
853 		soc->intr_ctx[i].rx_ring_mask = 0;
854 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
855 		soc->intr_ctx[i].rx_err_ring_mask = 0;
856 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
857 		soc->intr_ctx[i].reo_status_ring_mask = 0;
858 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
859 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
860 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
861 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
862 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
863 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
864 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
865 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
866 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
867 
868 		hif_event_history_deinit(soc->hif_handle, i);
869 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
870 	}
871 
872 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
873 		    sizeof(soc->mon_intr_id_lmac_map),
874 		    DP_MON_INVALID_LMAC_ID);
875 }
876 
dp_soc_interrupt_attach(struct cdp_soc_t * txrx_soc)877 QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
878 {
879 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
880 
881 	int i = 0;
882 	int num_irq = 0;
883 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
884 	int lmac_id = 0;
885 	int napi_scale;
886 
887 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
888 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
889 
890 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
891 		int ret = 0;
892 
893 		/* Map of IRQ ids registered with one interrupt context */
894 		int irq_id_map[HIF_MAX_GRP_IRQ];
895 
896 		int tx_mask =
897 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
898 		int rx_mask =
899 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
900 		int rx_mon_mask =
901 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
902 		int tx_mon_ring_mask =
903 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
904 		int rx_err_ring_mask =
905 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
906 		int rx_wbm_rel_ring_mask =
907 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
908 		int reo_status_ring_mask =
909 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
910 		int rxdma2host_ring_mask =
911 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
912 		int host2rxdma_ring_mask =
913 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
914 		int host2rxdma_mon_ring_mask =
915 			wlan_cfg_get_host2rxdma_mon_ring_mask(
916 				soc->wlan_cfg_ctx, i);
917 		int rx_near_full_grp_1_mask =
918 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
919 							     i);
920 		int rx_near_full_grp_2_mask =
921 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
922 							     i);
923 		int tx_ring_near_full_mask =
924 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
925 							    i);
926 		int host2txmon_ring_mask =
927 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
928 		int umac_reset_intr_mask =
929 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
930 
931 		if (dp_skip_rx_mon_ring_mask_set(soc))
932 			rx_mon_mask = 0;
933 
934 		soc->intr_ctx[i].dp_intr_id = i;
935 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
936 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
937 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
938 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
939 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
940 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
941 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
942 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
943 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
944 			 host2rxdma_mon_ring_mask;
945 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
946 						rx_near_full_grp_1_mask;
947 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
948 						rx_near_full_grp_2_mask;
949 		soc->intr_ctx[i].tx_ring_near_full_mask =
950 						tx_ring_near_full_mask;
951 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
952 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
953 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
954 
955 		soc->intr_ctx[i].soc = soc;
956 
957 		num_irq = 0;
958 
959 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
960 					       &num_irq);
961 
962 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
963 		    tx_ring_near_full_mask) {
964 			dp_soc_near_full_interrupt_attach(soc, num_irq,
965 							  irq_id_map, i);
966 		} else {
967 			napi_scale = wlan_cfg_get_napi_scale_factor(
968 							    soc->wlan_cfg_ctx);
969 			if (!napi_scale)
970 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
971 
972 			ret = hif_register_ext_group(soc->hif_handle,
973 				num_irq, irq_id_map, dp_service_srngs_wrapper,
974 				&soc->intr_ctx[i], "dp_intr",
975 				HIF_EXEC_NAPI_TYPE, napi_scale);
976 		}
977 
978 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
979 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
980 
981 		if (ret) {
982 			dp_init_err("%pK: failed, ret = %d", soc, ret);
983 			dp_soc_interrupt_detach(txrx_soc);
984 			return QDF_STATUS_E_FAILURE;
985 		}
986 
987 		hif_event_history_init(soc->hif_handle, i);
988 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
989 
990 		if (rx_err_ring_mask)
991 			rx_err_ring_intr_ctxt_id = i;
992 
993 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
994 			soc->mon_intr_id_lmac_map[lmac_id] = i;
995 			lmac_id++;
996 		}
997 	}
998 
999 	hif_configure_ext_group_interrupts(soc->hif_handle);
1000 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
1001 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
1002 						  rx_err_ring_intr_ctxt_id, 0);
1003 
1004 	return QDF_STATUS_SUCCESS;
1005 }
1006 
1007 #define AVG_MAX_MPDUS_PER_TID 128
1008 #define AVG_TIDS_PER_CLIENT 2
1009 #define AVG_FLOWS_PER_TID 2
1010 #define AVG_MSDUS_PER_FLOW 128
1011 #define AVG_MSDUS_PER_MPDU 4
1012 
dp_hw_link_desc_pool_banks_free(struct dp_soc * soc,uint32_t mac_id)1013 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
1014 {
1015 	struct qdf_mem_multi_page_t *pages;
1016 
1017 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1018 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1019 	} else {
1020 		pages = &soc->link_desc_pages;
1021 	}
1022 
1023 	if (!pages) {
1024 		dp_err("can not get link desc pages");
1025 		QDF_ASSERT(0);
1026 		return;
1027 	}
1028 
1029 	if (pages->dma_pages) {
1030 		wlan_minidump_remove((void *)
1031 				     pages->dma_pages->page_v_addr_start,
1032 				     pages->num_pages * pages->page_size,
1033 				     soc->ctrl_psoc,
1034 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1035 				     "hw_link_desc_bank");
1036 		dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE,
1037 					     pages, 0, false);
1038 	}
1039 }
1040 
1041 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
1042 
dp_hw_link_desc_pool_banks_alloc(struct dp_soc * soc,uint32_t mac_id)1043 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
1044 {
1045 	hal_soc_handle_t hal_soc = soc->hal_soc;
1046 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1047 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1048 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1049 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
1050 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
1051 	uint32_t num_mpdu_links_per_queue_desc =
1052 		hal_num_mpdu_links_per_queue_desc(hal_soc);
1053 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1054 	uint32_t *total_link_descs, total_mem_size;
1055 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1056 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1057 	uint32_t num_entries;
1058 	struct qdf_mem_multi_page_t *pages;
1059 	struct dp_srng *dp_srng;
1060 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
1061 
1062 	/* Only Tx queue descriptors are allocated from common link descriptor
1063 	 * pool Rx queue descriptors are not included in this because (REO queue
1064 	 * extension descriptors) they are expected to be allocated contiguously
1065 	 * with REO queue descriptors
1066 	 */
1067 	if (mac_id != WLAN_INVALID_PDEV_ID) {
1068 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1069 		/* dp_monitor_get_link_desc_pages returns NULL only
1070 		 * if monitor SOC is  NULL
1071 		 */
1072 		if (!pages) {
1073 			dp_err("can not get link desc pages");
1074 			QDF_ASSERT(0);
1075 			return QDF_STATUS_E_FAULT;
1076 		}
1077 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
1078 		num_entries = dp_srng->alloc_size /
1079 			hal_srng_get_entrysize(soc->hal_soc,
1080 					       RXDMA_MONITOR_DESC);
1081 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
1082 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
1083 			      MINIDUMP_STR_SIZE);
1084 	} else {
1085 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1086 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1087 
1088 		num_mpdu_queue_descs = num_mpdu_link_descs /
1089 			num_mpdu_links_per_queue_desc;
1090 
1091 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1092 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1093 			num_msdus_per_link_desc;
1094 
1095 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1096 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1097 
1098 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1099 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1100 
1101 		pages = &soc->link_desc_pages;
1102 		total_link_descs = &soc->total_link_descs;
1103 		qdf_str_lcopy(minidump_str, "link_desc_bank",
1104 			      MINIDUMP_STR_SIZE);
1105 	}
1106 
1107 	/* If link descriptor banks are allocated, return from here */
1108 	if (pages->num_pages)
1109 		return QDF_STATUS_SUCCESS;
1110 
1111 	/* Round up to power of 2 */
1112 	*total_link_descs = 1;
1113 	while (*total_link_descs < num_entries)
1114 		*total_link_descs <<= 1;
1115 
1116 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
1117 		     soc, *total_link_descs, link_desc_size);
1118 	total_mem_size =  *total_link_descs * link_desc_size;
1119 	total_mem_size += link_desc_align;
1120 
1121 	dp_init_info("%pK: total_mem_size: %d",
1122 		     soc, total_mem_size);
1123 
1124 	dp_set_max_page_size(pages, max_alloc_size);
1125 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE,
1126 				      pages,
1127 				      link_desc_size,
1128 				      *total_link_descs,
1129 				      0, false);
1130 	if (!pages->num_pages) {
1131 		dp_err("Multi page alloc fail for hw link desc pool");
1132 		return QDF_STATUS_E_FAULT;
1133 	}
1134 
1135 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
1136 			  pages->num_pages * pages->page_size,
1137 			  soc->ctrl_psoc,
1138 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1139 			  "hw_link_desc_bank");
1140 
1141 	return QDF_STATUS_SUCCESS;
1142 }
1143 
dp_hw_link_desc_ring_free(struct dp_soc * soc)1144 void dp_hw_link_desc_ring_free(struct dp_soc *soc)
1145 {
1146 	uint32_t i;
1147 	uint32_t size = soc->wbm_idle_scatter_buf_size;
1148 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
1149 	qdf_dma_addr_t paddr;
1150 
1151 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
1152 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1153 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
1154 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
1155 			if (vaddr) {
1156 				qdf_mem_free_consistent(soc->osdev,
1157 							soc->osdev->dev,
1158 							size,
1159 							vaddr,
1160 							paddr,
1161 							0);
1162 				vaddr = NULL;
1163 			}
1164 		}
1165 	} else {
1166 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
1167 				     soc->wbm_idle_link_ring.alloc_size,
1168 				     soc->ctrl_psoc,
1169 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1170 				     "wbm_idle_link_ring");
1171 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
1172 	}
1173 }
1174 
dp_hw_link_desc_ring_alloc(struct dp_soc * soc)1175 QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
1176 {
1177 	uint32_t entry_size, i;
1178 	uint32_t total_mem_size;
1179 	qdf_dma_addr_t *baseaddr = NULL;
1180 	struct dp_srng *dp_srng;
1181 	uint32_t ring_type;
1182 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1183 	uint32_t tlds;
1184 
1185 	ring_type = WBM_IDLE_LINK;
1186 	dp_srng = &soc->wbm_idle_link_ring;
1187 	tlds = soc->total_link_descs;
1188 
1189 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
1190 	total_mem_size = entry_size * tlds;
1191 
1192 	if (total_mem_size <= max_alloc_size) {
1193 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
1194 			dp_init_err("%pK: Link desc idle ring setup failed",
1195 				    soc);
1196 			goto fail;
1197 		}
1198 
1199 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
1200 				  soc->wbm_idle_link_ring.alloc_size,
1201 				  soc->ctrl_psoc,
1202 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
1203 				  "wbm_idle_link_ring");
1204 	} else {
1205 		uint32_t num_scatter_bufs;
1206 		uint32_t buf_size = 0;
1207 
1208 		soc->wbm_idle_scatter_buf_size =
1209 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1210 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1211 					soc->hal_soc, total_mem_size,
1212 					soc->wbm_idle_scatter_buf_size);
1213 
1214 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1215 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1216 				  FL("scatter bufs size out of bounds"));
1217 			goto fail;
1218 		}
1219 
1220 		for (i = 0; i < num_scatter_bufs; i++) {
1221 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
1222 			buf_size = soc->wbm_idle_scatter_buf_size;
1223 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1224 				qdf_mem_alloc_consistent(soc->osdev,
1225 							 soc->osdev->dev,
1226 							 buf_size,
1227 							 baseaddr);
1228 
1229 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1230 				QDF_TRACE(QDF_MODULE_ID_DP,
1231 					  QDF_TRACE_LEVEL_ERROR,
1232 					  FL("Scatter lst memory alloc fail"));
1233 				goto fail;
1234 			}
1235 		}
1236 		soc->num_scatter_bufs = num_scatter_bufs;
1237 	}
1238 	return QDF_STATUS_SUCCESS;
1239 
1240 fail:
1241 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1242 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
1243 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
1244 
1245 		if (vaddr) {
1246 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1247 						soc->wbm_idle_scatter_buf_size,
1248 						vaddr,
1249 						paddr, 0);
1250 			vaddr = NULL;
1251 		}
1252 	}
1253 	return QDF_STATUS_E_NOMEM;
1254 }
1255 
1256 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
1257 
dp_hw_link_desc_ring_init(struct dp_soc * soc)1258 QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
1259 {
1260 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
1261 
1262 	if (dp_srng->base_vaddr_unaligned) {
1263 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
1264 			return QDF_STATUS_E_FAILURE;
1265 	}
1266 	return QDF_STATUS_SUCCESS;
1267 }
1268 
dp_hw_link_desc_ring_deinit(struct dp_soc * soc)1269 void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
1270 {
1271 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
1272 }
1273 
1274 #ifdef IPA_OFFLOAD
1275 #define USE_1_IPA_RX_REO_RING 1
1276 #define USE_2_IPA_RX_REO_RINGS 2
1277 #define REO_DST_RING_SIZE_QCA6290 1023
1278 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
1279 #define REO_DST_RING_SIZE_QCA8074 1023
1280 #define REO_DST_RING_SIZE_QCN9000 2048
1281 #else
1282 #define REO_DST_RING_SIZE_QCA8074 8
1283 #define REO_DST_RING_SIZE_QCN9000 8
1284 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
1285 
1286 #ifdef IPA_WDI3_TX_TWO_PIPES
1287 #ifdef DP_MEMORY_OPT
dp_ipa_init_alt_tx_ring(struct dp_soc * soc)1288 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1289 {
1290 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1291 }
1292 
dp_ipa_deinit_alt_tx_ring(struct dp_soc * soc)1293 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1294 {
1295 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1296 }
1297 
dp_ipa_alloc_alt_tx_ring(struct dp_soc * soc)1298 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1299 {
1300 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1301 }
1302 
dp_ipa_free_alt_tx_ring(struct dp_soc * soc)1303 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1304 {
1305 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
1306 }
1307 
1308 #else /* !DP_MEMORY_OPT */
dp_ipa_init_alt_tx_ring(struct dp_soc * soc)1309 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1310 {
1311 	return 0;
1312 }
1313 
dp_ipa_deinit_alt_tx_ring(struct dp_soc * soc)1314 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1315 {
1316 }
1317 
dp_ipa_alloc_alt_tx_ring(struct dp_soc * soc)1318 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1319 {
1320 	return 0;
1321 }
1322 
dp_ipa_free_alt_tx_ring(struct dp_soc * soc)1323 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1324 {
1325 }
1326 #endif /* DP_MEMORY_OPT */
1327 
dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc * soc)1328 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1329 {
1330 	hal_tx_init_data_ring(soc->hal_soc,
1331 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
1332 }
1333 
1334 #else /* !IPA_WDI3_TX_TWO_PIPES */
dp_ipa_init_alt_tx_ring(struct dp_soc * soc)1335 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1336 {
1337 	return 0;
1338 }
1339 
dp_ipa_deinit_alt_tx_ring(struct dp_soc * soc)1340 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1341 {
1342 }
1343 
dp_ipa_alloc_alt_tx_ring(struct dp_soc * soc)1344 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1345 {
1346 	return 0;
1347 }
1348 
dp_ipa_free_alt_tx_ring(struct dp_soc * soc)1349 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1350 {
1351 }
1352 
dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc * soc)1353 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1354 {
1355 }
1356 
1357 #endif /* IPA_WDI3_TX_TWO_PIPES */
1358 
1359 #else
1360 
1361 #define REO_DST_RING_SIZE_QCA6290 1024
1362 
dp_ipa_init_alt_tx_ring(struct dp_soc * soc)1363 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
1364 {
1365 	return 0;
1366 }
1367 
dp_ipa_deinit_alt_tx_ring(struct dp_soc * soc)1368 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
1369 {
1370 }
1371 
dp_ipa_alloc_alt_tx_ring(struct dp_soc * soc)1372 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
1373 {
1374 	return 0;
1375 }
1376 
dp_ipa_free_alt_tx_ring(struct dp_soc * soc)1377 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
1378 {
1379 }
1380 
dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc * soc)1381 void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
1382 {
1383 }
1384 
1385 #endif /* IPA_OFFLOAD */
1386 
1387 /**
1388  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
1389  * @soc: Datapath soc handler
1390  *
1391  * This api resets the default cpu ring map
1392  */
dp_soc_reset_cpu_ring_map(struct dp_soc * soc)1393 void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1394 {
1395 	uint8_t i;
1396 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1397 
1398 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1399 		switch (nss_config) {
1400 		case dp_nss_cfg_first_radio:
1401 			/*
1402 			 * Setting Tx ring map for one nss offloaded radio
1403 			 */
1404 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1405 			break;
1406 
1407 		case dp_nss_cfg_second_radio:
1408 			/*
1409 			 * Setting Tx ring for two nss offloaded radios
1410 			 */
1411 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1412 			break;
1413 
1414 		case dp_nss_cfg_dbdc:
1415 			/*
1416 			 * Setting Tx ring map for 2 nss offloaded radios
1417 			 */
1418 			soc->tx_ring_map[i] =
1419 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
1420 			break;
1421 
1422 		case dp_nss_cfg_dbtc:
1423 			/*
1424 			 * Setting Tx ring map for 3 nss offloaded radios
1425 			 */
1426 			soc->tx_ring_map[i] =
1427 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
1428 			break;
1429 
1430 		default:
1431 			dp_err("tx_ring_map failed due to invalid nss cfg");
1432 			break;
1433 		}
1434 	}
1435 }
1436 
1437 /**
1438  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
1439  *					  unused WMAC hw rings
1440  * @soc: DP Soc handle
1441  * @mac_num: wmac num
1442  *
1443  * Return: Return void
1444  */
dp_soc_disable_unused_mac_intr_mask(struct dp_soc * soc,int mac_num)1445 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
1446 						int mac_num)
1447 {
1448 	uint8_t *grp_mask = NULL;
1449 	int group_number;
1450 
1451 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1452 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1453 	if (group_number < 0)
1454 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_BUF, mac_num %d",
1455 			      soc, mac_num);
1456 	else
1457 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1458 						  group_number, 0x0);
1459 
1460 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1461 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1462 	if (group_number < 0)
1463 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_MONITOR_DST, mac_num %d",
1464 			      soc, mac_num);
1465 	else
1466 		wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
1467 					      group_number, 0x0);
1468 
1469 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1470 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1471 	if (group_number < 0)
1472 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_DST, mac_num %d",
1473 			      soc, mac_num);
1474 	else
1475 		wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
1476 						  group_number, 0x0);
1477 
1478 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1479 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
1480 	if (group_number < 0)
1481 		dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_MONITOR_BUF, mac_num %d",
1482 			      soc, mac_num);
1483 	else
1484 		wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
1485 						      group_number, 0x0);
1486 }
1487 
1488 #ifdef IPA_OFFLOAD
1489 #ifdef IPA_WDI3_VLAN_SUPPORT
1490 /**
1491  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
1492  *                                     ring for vlan tagged traffic
1493  * @soc: DP Soc handle
1494  *
1495  * Return: Return void
1496  */
dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc * soc)1497 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1498 {
1499 	uint8_t *grp_mask = NULL;
1500 	int group_number, mask;
1501 
1502 	if (!wlan_ipa_is_vlan_enabled())
1503 		return;
1504 
1505 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1506 
1507 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
1508 	if (group_number < 0) {
1509 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1510 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
1511 		return;
1512 	}
1513 
1514 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1515 
1516 	/* reset the interrupt mask for offloaded ring */
1517 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
1518 
1519 	/*
1520 	 * set the interrupt mask to zero for rx offloaded radio.
1521 	 */
1522 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1523 }
1524 #else
1525 inline
dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc * soc)1526 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1527 { }
1528 #endif /* IPA_WDI3_VLAN_SUPPORT */
1529 #else
1530 inline
dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc * soc)1531 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
1532 { }
1533 #endif /* IPA_OFFLOAD */
1534 
1535 /**
1536  * dp_soc_reset_intr_mask() - reset interrupt mask
1537  * @soc: DP Soc handle
1538  *
1539  * Return: Return void
1540  */
dp_soc_reset_intr_mask(struct dp_soc * soc)1541 void dp_soc_reset_intr_mask(struct dp_soc *soc)
1542 {
1543 	uint8_t j;
1544 	uint8_t *grp_mask = NULL;
1545 	int group_number, mask, num_ring;
1546 
1547 	/* number of tx ring */
1548 	num_ring = soc->num_tcl_data_rings;
1549 
1550 	/*
1551 	 * group mask for tx completion  ring.
1552 	 */
1553 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1554 
1555 	/* loop and reset the mask for only offloaded ring */
1556 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
1557 		/*
1558 		 * Group number corresponding to tx offloaded ring.
1559 		 */
1560 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1561 		if (group_number < 0) {
1562 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1563 				      soc, WBM2SW_RELEASE, j);
1564 			continue;
1565 		}
1566 
1567 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1568 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
1569 		    (!mask)) {
1570 			continue;
1571 		}
1572 
1573 		/* reset the tx mask for offloaded ring */
1574 		mask &= (~(1 << j));
1575 
1576 		/*
1577 		 * reset the interrupt mask for offloaded ring.
1578 		 */
1579 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1580 	}
1581 
1582 	/* number of rx rings */
1583 	num_ring = soc->num_reo_dest_rings;
1584 
1585 	/*
1586 	 * group mask for reo destination ring.
1587 	 */
1588 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1589 
1590 	/* loop and reset the mask for only offloaded ring */
1591 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
1592 		/*
1593 		 * Group number corresponding to rx offloaded ring.
1594 		 */
1595 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1596 		if (group_number < 0) {
1597 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1598 				      soc, REO_DST, j);
1599 			continue;
1600 		}
1601 
1602 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1603 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
1604 		    (!mask)) {
1605 			continue;
1606 		}
1607 
1608 		/* reset the interrupt mask for offloaded ring */
1609 		mask &= (~(1 << j));
1610 
1611 		/*
1612 		 * set the interrupt mask to zero for rx offloaded radio.
1613 		 */
1614 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1615 	}
1616 
1617 	/*
1618 	 * group mask for Rx buffer refill ring
1619 	 */
1620 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1621 
1622 	/* loop and reset the mask for only offloaded ring */
1623 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1624 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1625 
1626 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1627 			continue;
1628 		}
1629 
1630 		/*
1631 		 * Group number corresponding to rx offloaded ring.
1632 		 */
1633 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
1634 		if (group_number < 0) {
1635 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1636 				      soc, REO_DST, lmac_id);
1637 			continue;
1638 		}
1639 
1640 		/* set the interrupt mask for offloaded ring */
1641 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1642 							  group_number);
1643 		mask &= (~(1 << lmac_id));
1644 
1645 		/*
1646 		 * set the interrupt mask to zero for rx offloaded radio.
1647 		 */
1648 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1649 						  group_number, mask);
1650 	}
1651 
1652 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1653 
1654 	for (j = 0; j < num_ring; j++) {
1655 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
1656 			continue;
1657 		}
1658 
1659 		/*
1660 		 * Group number corresponding to rx err ring.
1661 		 */
1662 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1663 		if (group_number < 0) {
1664 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
1665 				      soc, REO_EXCEPTION, j);
1666 			continue;
1667 		}
1668 
1669 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
1670 					      group_number, 0);
1671 	}
1672 }
1673 
1674 #ifdef IPA_OFFLOAD
dp_reo_remap_config(struct dp_soc * soc,uint32_t * remap0,uint32_t * remap1,uint32_t * remap2)1675 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
1676 			 uint32_t *remap1, uint32_t *remap2)
1677 {
1678 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
1679 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
1680 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
1681 
1682 	switch (soc->arch_id) {
1683 	case CDP_ARCH_TYPE_BE:
1684 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1685 					      soc->num_reo_dest_rings -
1686 					      USE_2_IPA_RX_REO_RINGS, remap1,
1687 					      remap2);
1688 		break;
1689 
1690 	case CDP_ARCH_TYPE_LI:
1691 		if (wlan_ipa_is_vlan_enabled()) {
1692 			hal_compute_reo_remap_ix2_ix3(
1693 					soc->hal_soc, ring,
1694 					soc->num_reo_dest_rings -
1695 					USE_2_IPA_RX_REO_RINGS, remap1,
1696 					remap2);
1697 
1698 		} else {
1699 			hal_compute_reo_remap_ix2_ix3(
1700 					soc->hal_soc, ring,
1701 					soc->num_reo_dest_rings -
1702 					USE_1_IPA_RX_REO_RING, remap1,
1703 					remap2);
1704 		}
1705 
1706 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
1707 		break;
1708 	default:
1709 		dp_err("unknown arch_id 0x%x", soc->arch_id);
1710 		QDF_BUG(0);
1711 	}
1712 
1713 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
1714 
1715 	return true;
1716 }
1717 
1718 #ifdef IPA_WDI3_TX_TWO_PIPES
dp_ipa_is_alt_tx_ring(int index)1719 static bool dp_ipa_is_alt_tx_ring(int index)
1720 {
1721 	return index == IPA_TX_ALT_RING_IDX;
1722 }
1723 
dp_ipa_is_alt_tx_comp_ring(int index)1724 static bool dp_ipa_is_alt_tx_comp_ring(int index)
1725 {
1726 	return index == IPA_TX_ALT_COMP_RING_IDX;
1727 }
1728 #else /* !IPA_WDI3_TX_TWO_PIPES */
dp_ipa_is_alt_tx_ring(int index)1729 static bool dp_ipa_is_alt_tx_ring(int index)
1730 {
1731 	return false;
1732 }
1733 
dp_ipa_is_alt_tx_comp_ring(int index)1734 static bool dp_ipa_is_alt_tx_comp_ring(int index)
1735 {
1736 	return false;
1737 }
1738 #endif /* IPA_WDI3_TX_TWO_PIPES */
1739 
1740 /**
1741  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
1742  *
1743  * @tx_ring_num: Tx ring number
1744  * @tx_ipa_ring_sz: Return param only updated for IPA.
1745  * @soc_cfg_ctx: dp soc cfg context
1746  *
1747  * Return: None
1748  */
dp_ipa_get_tx_ring_size(int tx_ring_num,int * tx_ipa_ring_sz,struct wlan_cfg_dp_soc_ctxt * soc_cfg_ctx)1749 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
1750 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1751 {
1752 	if (!soc_cfg_ctx->ipa_enabled)
1753 		return;
1754 
1755 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
1756 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
1757 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
1758 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
1759 }
1760 
1761 /**
1762  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
1763  *
1764  * @tx_comp_ring_num: Tx comp ring number
1765  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
1766  * @soc_cfg_ctx: dp soc cfg context
1767  *
1768  * Return: None
1769  */
dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,int * tx_comp_ipa_ring_sz,struct wlan_cfg_dp_soc_ctxt * soc_cfg_ctx)1770 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
1771 					 int *tx_comp_ipa_ring_sz,
1772 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1773 {
1774 	if (!soc_cfg_ctx->ipa_enabled)
1775 		return;
1776 
1777 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
1778 		*tx_comp_ipa_ring_sz =
1779 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
1780 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
1781 		*tx_comp_ipa_ring_sz =
1782 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
1783 }
1784 #else
dp_reo_ring_selection(uint32_t value,uint32_t * ring)1785 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
1786 {
1787 	uint8_t num = 0;
1788 
1789 	switch (value) {
1790 	/* should we have all the different possible ring configs */
1791 	case 0xFF:
1792 		num = 8;
1793 		ring[0] = REO_REMAP_SW1;
1794 		ring[1] = REO_REMAP_SW2;
1795 		ring[2] = REO_REMAP_SW3;
1796 		ring[3] = REO_REMAP_SW4;
1797 		ring[4] = REO_REMAP_SW5;
1798 		ring[5] = REO_REMAP_SW6;
1799 		ring[6] = REO_REMAP_SW7;
1800 		ring[7] = REO_REMAP_SW8;
1801 		break;
1802 
1803 	case 0x3F:
1804 		num = 6;
1805 		ring[0] = REO_REMAP_SW1;
1806 		ring[1] = REO_REMAP_SW2;
1807 		ring[2] = REO_REMAP_SW3;
1808 		ring[3] = REO_REMAP_SW4;
1809 		ring[4] = REO_REMAP_SW5;
1810 		ring[5] = REO_REMAP_SW6;
1811 		break;
1812 
1813 	case 0xF:
1814 		num = 4;
1815 		ring[0] = REO_REMAP_SW1;
1816 		ring[1] = REO_REMAP_SW2;
1817 		ring[2] = REO_REMAP_SW3;
1818 		ring[3] = REO_REMAP_SW4;
1819 		break;
1820 	case 0xE:
1821 		num = 3;
1822 		ring[0] = REO_REMAP_SW2;
1823 		ring[1] = REO_REMAP_SW3;
1824 		ring[2] = REO_REMAP_SW4;
1825 		break;
1826 	case 0xD:
1827 		num = 3;
1828 		ring[0] = REO_REMAP_SW1;
1829 		ring[1] = REO_REMAP_SW3;
1830 		ring[2] = REO_REMAP_SW4;
1831 		break;
1832 	case 0xC:
1833 		num = 2;
1834 		ring[0] = REO_REMAP_SW3;
1835 		ring[1] = REO_REMAP_SW4;
1836 		break;
1837 	case 0xB:
1838 		num = 3;
1839 		ring[0] = REO_REMAP_SW1;
1840 		ring[1] = REO_REMAP_SW2;
1841 		ring[2] = REO_REMAP_SW4;
1842 		break;
1843 	case 0xA:
1844 		num = 2;
1845 		ring[0] = REO_REMAP_SW2;
1846 		ring[1] = REO_REMAP_SW4;
1847 		break;
1848 	case 0x9:
1849 		num = 2;
1850 		ring[0] = REO_REMAP_SW1;
1851 		ring[1] = REO_REMAP_SW4;
1852 		break;
1853 	case 0x8:
1854 		num = 1;
1855 		ring[0] = REO_REMAP_SW4;
1856 		break;
1857 	case 0x7:
1858 		num = 3;
1859 		ring[0] = REO_REMAP_SW1;
1860 		ring[1] = REO_REMAP_SW2;
1861 		ring[2] = REO_REMAP_SW3;
1862 		break;
1863 	case 0x6:
1864 		num = 2;
1865 		ring[0] = REO_REMAP_SW2;
1866 		ring[1] = REO_REMAP_SW3;
1867 		break;
1868 	case 0x5:
1869 		num = 2;
1870 		ring[0] = REO_REMAP_SW1;
1871 		ring[1] = REO_REMAP_SW3;
1872 		break;
1873 	case 0x4:
1874 		num = 1;
1875 		ring[0] = REO_REMAP_SW3;
1876 		break;
1877 	case 0x3:
1878 		num = 2;
1879 		ring[0] = REO_REMAP_SW1;
1880 		ring[1] = REO_REMAP_SW2;
1881 		break;
1882 	case 0x2:
1883 		num = 1;
1884 		ring[0] = REO_REMAP_SW2;
1885 		break;
1886 	case 0x1:
1887 		num = 1;
1888 		ring[0] = REO_REMAP_SW1;
1889 		break;
1890 	default:
1891 		dp_err("unknown reo ring map 0x%x", value);
1892 		QDF_BUG(0);
1893 	}
1894 	return num;
1895 }
1896 
dp_reo_remap_config(struct dp_soc * soc,uint32_t * remap0,uint32_t * remap1,uint32_t * remap2)1897 bool dp_reo_remap_config(struct dp_soc *soc,
1898 			 uint32_t *remap0,
1899 			 uint32_t *remap1,
1900 			 uint32_t *remap2)
1901 {
1902 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1903 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
1904 	uint8_t num;
1905 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
1906 	uint32_t value;
1907 
1908 	switch (offload_radio) {
1909 	case dp_nss_cfg_default:
1910 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
1911 		num = dp_reo_ring_selection(value, ring);
1912 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1913 					      num, remap1, remap2);
1914 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
1915 
1916 		break;
1917 	case dp_nss_cfg_first_radio:
1918 		value = reo_config & 0xE;
1919 		num = dp_reo_ring_selection(value, ring);
1920 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1921 					      num, remap1, remap2);
1922 
1923 		break;
1924 	case dp_nss_cfg_second_radio:
1925 		value = reo_config & 0xD;
1926 		num = dp_reo_ring_selection(value, ring);
1927 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
1928 					      num, remap1, remap2);
1929 
1930 		break;
1931 	case dp_nss_cfg_dbdc:
1932 	case dp_nss_cfg_dbtc:
1933 		/* return false if both or all are offloaded to NSS */
1934 		return false;
1935 	}
1936 
1937 	dp_debug("remap1 %x remap2 %x offload_radio %u",
1938 		 *remap1, *remap2, offload_radio);
1939 	return true;
1940 }
1941 
dp_ipa_get_tx_ring_size(int ring_num,int * tx_ipa_ring_sz,struct wlan_cfg_dp_soc_ctxt * soc_cfg_ctx)1942 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
1943 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1944 {
1945 }
1946 
dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,int * tx_comp_ipa_ring_sz,struct wlan_cfg_dp_soc_ctxt * soc_cfg_ctx)1947 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
1948 					 int *tx_comp_ipa_ring_sz,
1949 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
1950 {
1951 }
1952 #endif /* IPA_OFFLOAD */
1953 
1954 /**
1955  * dp_reo_frag_dst_set() - configure reo register to set the
1956  *                        fragment destination ring
1957  * @soc: Datapath soc
1958  * @frag_dst_ring: output parameter to set fragment destination ring
1959  *
1960  * Based on offload_radio below fragment destination rings is selected
1961  * 0 - TCL
1962  * 1 - SW1
1963  * 2 - SW2
1964  * 3 - SW3
1965  * 4 - SW4
1966  * 5 - Release
1967  * 6 - FW
1968  * 7 - alternate select
1969  *
1970  * Return: void
1971  */
dp_reo_frag_dst_set(struct dp_soc * soc,uint8_t * frag_dst_ring)1972 void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
1973 {
1974 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1975 
1976 	switch (offload_radio) {
1977 	case dp_nss_cfg_default:
1978 		*frag_dst_ring = REO_REMAP_TCL;
1979 		break;
1980 	case dp_nss_cfg_first_radio:
1981 		/*
1982 		 * This configuration is valid for single band radio which
1983 		 * is also NSS offload.
1984 		 */
1985 	case dp_nss_cfg_dbdc:
1986 	case dp_nss_cfg_dbtc:
1987 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
1988 		break;
1989 	default:
1990 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
1991 		break;
1992 	}
1993 }
1994 
1995 #ifdef WLAN_FEATURE_STATS_EXT
dp_create_ext_stats_event(struct dp_soc * soc)1996 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
1997 {
1998 	qdf_event_create(&soc->rx_hw_stats_event);
1999 }
2000 #else
dp_create_ext_stats_event(struct dp_soc * soc)2001 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2002 {
2003 }
2004 #endif
2005 
dp_deinit_tx_pair_by_index(struct dp_soc * soc,int index)2006 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
2007 {
2008 	int tcl_ring_num, wbm_ring_num;
2009 
2010 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
2011 						index,
2012 						&tcl_ring_num,
2013 						&wbm_ring_num);
2014 
2015 	if (tcl_ring_num == -1) {
2016 		dp_err("incorrect tcl ring num for index %u", index);
2017 		return;
2018 	}
2019 
2020 	dp_ssr_dump_srng_unregister("tcl_data_ring", index);
2021 	dp_ssr_dump_srng_unregister("tx_comp_ring", index);
2022 
2023 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
2024 			     soc->tcl_data_ring[index].alloc_size,
2025 			     soc->ctrl_psoc,
2026 			     WLAN_MD_DP_SRNG_TCL_DATA,
2027 			     "tcl_data_ring");
2028 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
2029 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
2030 		       tcl_ring_num);
2031 
2032 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
2033 		return;
2034 
2035 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
2036 			     soc->tx_comp_ring[index].alloc_size,
2037 			     soc->ctrl_psoc,
2038 			     WLAN_MD_DP_SRNG_TX_COMP,
2039 			     "tcl_comp_ring");
2040 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2041 		       wbm_ring_num);
2042 }
2043 
2044 /**
2045  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
2046  * ring pair
2047  * @soc: DP soc pointer
2048  * @index: index of soc->tcl_data or soc->tx_comp to initialize
2049  *
2050  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
2051  */
dp_init_tx_ring_pair_by_index(struct dp_soc * soc,uint8_t index)2052 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
2053 						uint8_t index)
2054 {
2055 	int tcl_ring_num, wbm_ring_num;
2056 	uint8_t bm_id;
2057 
2058 	if (index >= MAX_TCL_DATA_RINGS) {
2059 		dp_err("unexpected index!");
2060 		QDF_BUG(0);
2061 		goto fail1;
2062 	}
2063 
2064 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
2065 						index,
2066 						&tcl_ring_num,
2067 						&wbm_ring_num);
2068 
2069 	if (tcl_ring_num == -1) {
2070 		dp_err("incorrect tcl ring num for index %u", index);
2071 		goto fail1;
2072 	}
2073 
2074 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
2075 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
2076 			 tcl_ring_num, 0)) {
2077 		dp_err("dp_srng_init failed for tcl_data_ring");
2078 		goto fail1;
2079 	}
2080 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
2081 			  soc->tcl_data_ring[index].alloc_size,
2082 			  soc->ctrl_psoc,
2083 			  WLAN_MD_DP_SRNG_TCL_DATA,
2084 			  "tcl_data_ring");
2085 
2086 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
2087 		goto set_rbm;
2088 
2089 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2090 			 wbm_ring_num, 0)) {
2091 		dp_err("dp_srng_init failed for tx_comp_ring");
2092 		goto fail1;
2093 	}
2094 
2095 	dp_ssr_dump_srng_register("tcl_data_ring",
2096 				  &soc->tcl_data_ring[index], index);
2097 	dp_ssr_dump_srng_register("tx_comp_ring",
2098 				  &soc->tx_comp_ring[index], index);
2099 
2100 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
2101 			  soc->tx_comp_ring[index].alloc_size,
2102 			  soc->ctrl_psoc,
2103 			  WLAN_MD_DP_SRNG_TX_COMP,
2104 			  "tcl_comp_ring");
2105 set_rbm:
2106 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
2107 
2108 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
2109 
2110 	return QDF_STATUS_SUCCESS;
2111 
2112 fail1:
2113 	return QDF_STATUS_E_FAILURE;
2114 }
2115 
dp_free_tx_ring_pair_by_index(struct dp_soc * soc,uint8_t index)2116 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
2117 {
2118 	dp_debug("index %u", index);
2119 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
2120 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
2121 }
2122 
2123 /**
2124  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
2125  * ring pair for the given "index"
2126  * @soc: DP soc pointer
2127  * @index: index of soc->tcl_data or soc->tx_comp to initialize
2128  *
2129  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
2130  */
dp_alloc_tx_ring_pair_by_index(struct dp_soc * soc,uint8_t index)2131 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
2132 						 uint8_t index)
2133 {
2134 	int tx_ring_size;
2135 	int tx_comp_ring_size;
2136 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
2137 	int cached = 0;
2138 
2139 	if (index >= MAX_TCL_DATA_RINGS) {
2140 		dp_err("unexpected index!");
2141 		QDF_BUG(0);
2142 		goto fail1;
2143 	}
2144 
2145 	dp_debug("index %u", index);
2146 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
2147 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
2148 
2149 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
2150 			  tx_ring_size, cached)) {
2151 		dp_err("dp_srng_alloc failed for tcl_data_ring");
2152 		goto fail1;
2153 	}
2154 
2155 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2156 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
2157 	/* Enable cached TCL desc if NSS offload is disabled */
2158 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2159 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
2160 
2161 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
2162 	    INVALID_WBM_RING_NUM)
2163 		return QDF_STATUS_SUCCESS;
2164 
2165 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
2166 			  tx_comp_ring_size, cached)) {
2167 		dp_err("dp_srng_alloc failed for tx_comp_ring");
2168 		goto fail1;
2169 	}
2170 
2171 	return QDF_STATUS_SUCCESS;
2172 
2173 fail1:
2174 	return QDF_STATUS_E_FAILURE;
2175 }
2176 
2177 /**
2178  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
2179  * @pdev: DP_PDEV handle
2180  *
2181  * Return: void
2182  */
2183 void
dp_dscp_tid_map_setup(struct dp_pdev * pdev)2184 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2185 {
2186 	uint8_t map_id;
2187 	struct dp_soc *soc = pdev->soc;
2188 
2189 	if (!soc)
2190 		return;
2191 
2192 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2193 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
2194 			     default_dscp_tid_map,
2195 			     sizeof(default_dscp_tid_map));
2196 	}
2197 
2198 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2199 		hal_tx_set_dscp_tid_map(soc->hal_soc,
2200 					default_dscp_tid_map,
2201 					map_id);
2202 	}
2203 }
2204 
2205 /**
2206  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
2207  * @pdev: DP_PDEV handle
2208  *
2209  * Return: void
2210  */
2211 void
dp_pcp_tid_map_setup(struct dp_pdev * pdev)2212 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
2213 {
2214 	struct dp_soc *soc = pdev->soc;
2215 
2216 	if (!soc)
2217 		return;
2218 
2219 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
2220 		     sizeof(default_pcp_tid_map));
2221 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
2222 }
2223 
2224 #ifndef DP_UMAC_HW_RESET_SUPPORT
2225 static inline
2226 #endif
dp_reo_desc_freelist_destroy(struct dp_soc * soc)2227 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2228 {
2229 	struct reo_desc_list_node *desc;
2230 	struct dp_rx_tid *rx_tid;
2231 
2232 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2233 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2234 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2235 		rx_tid = &desc->rx_tid;
2236 		qdf_mem_unmap_nbytes_single(soc->osdev,
2237 			rx_tid->hw_qdesc_paddr,
2238 			QDF_DMA_BIDIRECTIONAL,
2239 			rx_tid->hw_qdesc_alloc_size);
2240 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2241 		qdf_mem_free(desc);
2242 	}
2243 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2244 	qdf_list_destroy(&soc->reo_desc_freelist);
2245 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2246 }
2247 
2248 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2249 /**
2250  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
2251  *                                          for deferred reo desc list
2252  * @soc: Datapath soc handle
2253  *
2254  * Return: void
2255  */
dp_reo_desc_deferred_freelist_create(struct dp_soc * soc)2256 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
2257 {
2258 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
2259 	qdf_list_create(&soc->reo_desc_deferred_freelist,
2260 			REO_DESC_DEFERRED_FREELIST_SIZE);
2261 	soc->reo_desc_deferred_freelist_init = true;
2262 }
2263 
2264 /**
2265  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
2266  *                                           free the leftover REO QDESCs
2267  * @soc: Datapath soc handle
2268  *
2269  * Return: void
2270  */
dp_reo_desc_deferred_freelist_destroy(struct dp_soc * soc)2271 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
2272 {
2273 	struct reo_desc_deferred_freelist_node *desc;
2274 
2275 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
2276 	soc->reo_desc_deferred_freelist_init = false;
2277 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
2278 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2279 		qdf_mem_unmap_nbytes_single(soc->osdev,
2280 					    desc->hw_qdesc_paddr,
2281 					    QDF_DMA_BIDIRECTIONAL,
2282 					    desc->hw_qdesc_alloc_size);
2283 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
2284 		qdf_mem_free(desc);
2285 	}
2286 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
2287 
2288 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
2289 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
2290 }
2291 #else
dp_reo_desc_deferred_freelist_create(struct dp_soc * soc)2292 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
2293 {
2294 }
2295 
dp_reo_desc_deferred_freelist_destroy(struct dp_soc * soc)2296 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
2297 {
2298 }
2299 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
2300 
2301 /**
2302  * dp_soc_reset_txrx_ring_map() - reset tx ring map
2303  * @soc: DP SOC handle
2304  *
2305  */
dp_soc_reset_txrx_ring_map(struct dp_soc * soc)2306 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
2307 {
2308 	uint32_t i;
2309 
2310 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
2311 		soc->tx_ring_map[i] = 0;
2312 }
2313 
2314 /**
2315  * dp_soc_deinit() - Deinitialize txrx SOC
2316  * @txrx_soc: Opaque DP SOC handle
2317  *
2318  * Return: None
2319  */
dp_soc_deinit(void * txrx_soc)2320 void dp_soc_deinit(void *txrx_soc)
2321 {
2322 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2323 	struct htt_soc *htt_soc = soc->htt_handle;
2324 
2325 	dp_monitor_soc_deinit(soc);
2326 
2327 	/* free peer tables & AST tables allocated during peer_map_attach */
2328 	if (soc->peer_map_attach_success) {
2329 		dp_peer_find_detach(soc);
2330 		soc->arch_ops.txrx_peer_map_detach(soc);
2331 		soc->peer_map_attach_success = FALSE;
2332 	}
2333 
2334 	qdf_flush_work(&soc->htt_stats.work);
2335 	qdf_disable_work(&soc->htt_stats.work);
2336 
2337 	qdf_spinlock_destroy(&soc->htt_stats.lock);
2338 
2339 	dp_soc_reset_txrx_ring_map(soc);
2340 
2341 	dp_reo_desc_freelist_destroy(soc);
2342 	dp_reo_desc_deferred_freelist_destroy(soc);
2343 
2344 	DEINIT_RX_HW_STATS_LOCK(soc);
2345 
2346 	qdf_spinlock_destroy(&soc->ast_lock);
2347 
2348 	dp_peer_mec_spinlock_destroy(soc);
2349 
2350 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2351 
2352 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
2353 
2354 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
2355 
2356 	qdf_spinlock_destroy(&soc->vdev_map_lock);
2357 
2358 	dp_reo_cmdlist_destroy(soc);
2359 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
2360 
2361 	dp_soc_tx_desc_sw_pools_deinit(soc);
2362 
2363 	dp_soc_srng_deinit(soc);
2364 
2365 	dp_hw_link_desc_ring_deinit(soc);
2366 
2367 	dp_soc_print_inactive_objects(soc);
2368 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
2369 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
2370 
2371 	htt_soc_htc_dealloc(soc->htt_handle);
2372 
2373 	htt_soc_detach(htt_soc);
2374 
2375 	/* Free wbm sg list and reset flags in down path */
2376 	dp_rx_wbm_sg_list_deinit(soc);
2377 
2378 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
2379 			     WLAN_MD_DP_SOC, "dp_soc");
2380 }
2381 
2382 #ifdef QCA_HOST2FW_RXBUF_RING
2383 void
dp_htt_setup_rxdma_err_dst_ring(struct dp_soc * soc,int mac_id,int lmac_id)2384 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
2385 				int lmac_id)
2386 {
2387 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
2388 		htt_srng_setup(soc->htt_handle, mac_id,
2389 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
2390 			       RXDMA_DST);
2391 }
2392 #endif
2393 
dp_vdev_get_default_reo_hash(struct dp_vdev * vdev,enum cdp_host_reo_dest_ring * reo_dest,bool * hash_based)2394 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
2395 				  enum cdp_host_reo_dest_ring *reo_dest,
2396 				  bool *hash_based)
2397 {
2398 	struct dp_soc *soc;
2399 	struct dp_pdev *pdev;
2400 
2401 	pdev = vdev->pdev;
2402 	soc = pdev->soc;
2403 	/*
2404 	 * hash based steering is disabled for Radios which are offloaded
2405 	 * to NSS
2406 	 */
2407 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
2408 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
2409 
2410 	/*
2411 	 * Below line of code will ensure the proper reo_dest ring is chosen
2412 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
2413 	 */
2414 	*reo_dest = pdev->reo_dest;
2415 }
2416 
2417 #ifdef IPA_OFFLOAD
2418 /**
2419  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
2420  * @vdev: Virtual device
2421  *
2422  * Return: true if the vdev is of subtype P2P
2423  *	   false if the vdev is of any other subtype
2424  */
dp_is_vdev_subtype_p2p(struct dp_vdev * vdev)2425 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
2426 {
2427 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
2428 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
2429 	    vdev->subtype == wlan_op_subtype_p2p_go)
2430 		return true;
2431 
2432 	return false;
2433 }
2434 
2435 /**
2436  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
2437  * @vdev: Datapath VDEV handle
2438  * @setup_info:
2439  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
2440  * @hash_based: pointer to hash value (enabled/disabled) to be populated
2441  * @lmac_peer_id_msb:
2442  *
2443  * If IPA is enabled in ini, for SAP mode, disable hash based
2444  * steering, use default reo_dst ring for RX. Use config values for other modes.
2445  *
2446  * Return: None
2447  */
dp_peer_setup_get_reo_hash(struct dp_vdev * vdev,struct cdp_peer_setup_info * setup_info,enum cdp_host_reo_dest_ring * reo_dest,bool * hash_based,uint8_t * lmac_peer_id_msb)2448 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
2449 				       struct cdp_peer_setup_info *setup_info,
2450 				       enum cdp_host_reo_dest_ring *reo_dest,
2451 				       bool *hash_based,
2452 				       uint8_t *lmac_peer_id_msb)
2453 {
2454 	struct dp_soc *soc;
2455 	struct dp_pdev *pdev;
2456 
2457 	pdev = vdev->pdev;
2458 	soc = pdev->soc;
2459 
2460 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
2461 
2462 	/* For P2P-GO interfaces we do not need to change the REO
2463 	 * configuration even if IPA config is enabled
2464 	 */
2465 	if (dp_is_vdev_subtype_p2p(vdev))
2466 		return;
2467 
2468 	/*
2469 	 * If IPA is enabled, disable hash-based flow steering and set
2470 	 * reo_dest_ring_4 as the REO ring to receive packets on.
2471 	 * IPA is configured to reap reo_dest_ring_4.
2472 	 *
2473 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
2474 	 * value enum value is from 1 - 4.
2475 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
2476 	 */
2477 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
2478 		if (dp_ipa_is_mdm_platform()) {
2479 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
2480 			if (vdev->opmode == wlan_op_mode_ap)
2481 				*hash_based = 0;
2482 		} else {
2483 			dp_debug("opt_dp: default HOST reo ring is set");
2484 		}
2485 	}
2486 }
2487 
2488 #else
2489 
2490 /**
2491  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
2492  * @vdev: Datapath VDEV handle
2493  * @setup_info:
2494  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
2495  * @hash_based: pointer to hash value (enabled/disabled) to be populated
2496  * @lmac_peer_id_msb:
2497  *
2498  * Use system config values for hash based steering.
2499  * Return: None
2500  */
dp_peer_setup_get_reo_hash(struct dp_vdev * vdev,struct cdp_peer_setup_info * setup_info,enum cdp_host_reo_dest_ring * reo_dest,bool * hash_based,uint8_t * lmac_peer_id_msb)2501 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
2502 				       struct cdp_peer_setup_info *setup_info,
2503 				       enum cdp_host_reo_dest_ring *reo_dest,
2504 				       bool *hash_based,
2505 				       uint8_t *lmac_peer_id_msb)
2506 {
2507 	struct dp_soc *soc = vdev->pdev->soc;
2508 
2509 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
2510 					lmac_peer_id_msb);
2511 }
2512 #endif /* IPA_OFFLOAD */
2513 #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT
2514 
2515 static inline uint8_t
dp_peer_get_local_link_id(struct dp_peer * peer,struct dp_txrx_peer * txrx_peer)2516 dp_peer_get_local_link_id(struct dp_peer *peer, struct dp_txrx_peer *txrx_peer)
2517 {
2518 	struct dp_local_link_id_peer_map *ll_id_peer_map =
2519 						&txrx_peer->ll_id_peer_map[0];
2520 	int i;
2521 
2522 	/*
2523 	 * Search for the peer entry in the
2524 	 * local_link_id to peer mac_addr mapping table
2525 	 */
2526 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2527 		if (ll_id_peer_map[i].in_use &&
2528 		    !qdf_mem_cmp(&peer->mac_addr.raw[0],
2529 				 &ll_id_peer_map[i].mac_addr.raw[0],
2530 				 QDF_MAC_ADDR_SIZE))
2531 			return ll_id_peer_map[i].local_link_id + 1;
2532 	}
2533 
2534 	/*
2535 	 * Create new entry for peer in the
2536 	 * local_link_id to peer mac_addr mapping table
2537 	 */
2538 	for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2539 		if (ll_id_peer_map[i].in_use)
2540 			continue;
2541 
2542 		ll_id_peer_map[i].in_use = 1;
2543 		ll_id_peer_map[i].local_link_id = i;
2544 		qdf_mem_copy(&ll_id_peer_map[i].mac_addr.raw[0],
2545 			     &peer->mac_addr.raw[0], QDF_MAC_ADDR_SIZE);
2546 		return ll_id_peer_map[i].local_link_id + 1;
2547 	}
2548 
2549 	/* We should not hit this case..!! Assert ?? */
2550 	return 0;
2551 }
2552 
2553 /**
2554  *  dp_peer_set_local_link_id() - Set local link id
2555  *  @peer: dp peer handle
2556  *
2557  *  Return: None
2558  */
2559 static inline void
dp_peer_set_local_link_id(struct dp_peer * peer)2560 dp_peer_set_local_link_id(struct dp_peer *peer)
2561 {
2562 	struct dp_txrx_peer *txrx_peer;
2563 
2564 	if (!IS_MLO_DP_LINK_PEER(peer))
2565 		return;
2566 
2567 	txrx_peer = dp_get_txrx_peer(peer);
2568 	if (txrx_peer)
2569 		peer->local_link_id = dp_peer_get_local_link_id(peer,
2570 								txrx_peer);
2571 
2572 	dp_info("Peer " QDF_MAC_ADDR_FMT " txrx_peer %pK local_link_id %d",
2573 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), txrx_peer,
2574 		peer->local_link_id);
2575 }
2576 #else
2577 static inline void
dp_peer_set_local_link_id(struct dp_peer * peer)2578 dp_peer_set_local_link_id(struct dp_peer *peer)
2579 {
2580 }
2581 #endif
2582 
2583 /**
2584  * dp_peer_setup_wifi3() - initialize the peer
2585  * @soc_hdl: soc handle object
2586  * @vdev_id: vdev_id of vdev object
2587  * @peer_mac: Peer's mac address
2588  * @setup_info: peer setup info for MLO
2589  *
2590  * Return: QDF_STATUS
2591  */
2592 QDF_STATUS
dp_peer_setup_wifi3(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,struct cdp_peer_setup_info * setup_info)2593 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2594 		    uint8_t *peer_mac,
2595 		    struct cdp_peer_setup_info *setup_info)
2596 {
2597 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
2598 	struct dp_pdev *pdev;
2599 	bool hash_based = 0;
2600 	enum cdp_host_reo_dest_ring reo_dest;
2601 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2602 	struct dp_vdev *vdev = NULL;
2603 	struct dp_peer *peer =
2604 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
2605 					       DP_MOD_ID_CDP);
2606 	struct dp_peer *mld_peer = NULL;
2607 	enum wlan_op_mode vdev_opmode;
2608 	uint8_t lmac_peer_id_msb = 0;
2609 
2610 	if (!peer)
2611 		return QDF_STATUS_E_FAILURE;
2612 
2613 	vdev = peer->vdev;
2614 	if (!vdev) {
2615 		status = QDF_STATUS_E_FAILURE;
2616 		goto fail;
2617 	}
2618 
2619 	/* save vdev related member in case vdev freed */
2620 	vdev_opmode = vdev->opmode;
2621 	pdev = vdev->pdev;
2622 	dp_peer_setup_get_reo_hash(vdev, setup_info,
2623 				   &reo_dest, &hash_based,
2624 				   &lmac_peer_id_msb);
2625 
2626 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
2627 					   peer, vdev, vdev->vdev_id,
2628 					   setup_info);
2629 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
2630 		"hash-based-steering:%d default-reo_dest:%u",
2631 		pdev->pdev_id, vdev->vdev_id,
2632 		vdev->opmode, peer,
2633 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
2634 
2635 	/*
2636 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
2637 	 * i.e both the devices have same MAC address. In these
2638 	 * cases we want such pkts to be processed in NULL Q handler
2639 	 * which is REO2TCL ring. for this reason we should
2640 	 * not setup reo_queues and default route for bss_peer.
2641 	 */
2642 	if (!IS_MLO_DP_MLD_PEER(peer))
2643 		dp_monitor_peer_tx_init(pdev, peer);
2644 
2645 	if (!setup_info)
2646 		if (dp_peer_legacy_setup(soc, peer) !=
2647 				QDF_STATUS_SUCCESS) {
2648 			status = QDF_STATUS_E_RESOURCES;
2649 			goto fail;
2650 		}
2651 
2652 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
2653 		status = QDF_STATUS_E_FAILURE;
2654 		goto fail;
2655 	}
2656 
2657 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
2658 		/* TODO: Check the destination ring number to be passed to FW */
2659 		soc->cdp_soc.ol_ops->peer_set_default_routing(
2660 				soc->ctrl_psoc,
2661 				peer->vdev->pdev->pdev_id,
2662 				peer->mac_addr.raw,
2663 				peer->vdev->vdev_id, hash_based, reo_dest,
2664 				lmac_peer_id_msb);
2665 	}
2666 
2667 	qdf_atomic_set(&peer->is_default_route_set, 1);
2668 
2669 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
2670 	if (QDF_IS_STATUS_ERROR(status)) {
2671 		dp_peer_err("peer mlo setup failed");
2672 		qdf_assert_always(0);
2673 	}
2674 
2675 	if (vdev_opmode != wlan_op_mode_monitor) {
2676 		/* In case of MLD peer, switch peer to mld peer and
2677 		 * do peer_rx_init.
2678 		 */
2679 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2680 		    IS_MLO_DP_LINK_PEER(peer)) {
2681 			if (setup_info && setup_info->is_first_link) {
2682 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
2683 				if (mld_peer)
2684 					dp_peer_rx_init(pdev, mld_peer);
2685 				else
2686 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
2687 			}
2688 		} else {
2689 			dp_peer_rx_init_wrapper(pdev, peer, setup_info);
2690 		}
2691 	}
2692 
2693 	dp_peer_set_local_link_id(peer);
2694 
2695 	if (!IS_MLO_DP_MLD_PEER(peer))
2696 		dp_peer_ppdu_delayed_ba_init(peer);
2697 
2698 fail:
2699 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2700 	return status;
2701 }
2702 
2703 /**
2704  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
2705  * @txrx_soc: cdp soc handle
2706  * @ac: Access category
2707  * @value: timeout value in millisec
2708  *
2709  * Return: void
2710  */
dp_set_ba_aging_timeout(struct cdp_soc_t * txrx_soc,uint8_t ac,uint32_t value)2711 void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
2712 			     uint8_t ac, uint32_t value)
2713 {
2714 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2715 
2716 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
2717 }
2718 
2719 /**
2720  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
2721  * @txrx_soc: cdp soc handle
2722  * @ac: access category
2723  * @value: timeout value in millisec
2724  *
2725  * Return: void
2726  */
dp_get_ba_aging_timeout(struct cdp_soc_t * txrx_soc,uint8_t ac,uint32_t * value)2727 void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
2728 			     uint8_t ac, uint32_t *value)
2729 {
2730 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2731 
2732 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
2733 }
2734 
2735 /**
2736  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
2737  * @txrx_soc: cdp soc handle
2738  * @pdev_id: id of physical device object
2739  * @val: reo destination ring index (1 - 4)
2740  *
2741  * Return: QDF_STATUS
2742  */
2743 QDF_STATUS
dp_set_pdev_reo_dest(struct cdp_soc_t * txrx_soc,uint8_t pdev_id,enum cdp_host_reo_dest_ring val)2744 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
2745 		     enum cdp_host_reo_dest_ring val)
2746 {
2747 	struct dp_pdev *pdev =
2748 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
2749 						   pdev_id);
2750 
2751 	if (pdev) {
2752 		pdev->reo_dest = val;
2753 		return QDF_STATUS_SUCCESS;
2754 	}
2755 
2756 	return QDF_STATUS_E_FAILURE;
2757 }
2758 
2759 /**
2760  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
2761  * @txrx_soc: cdp soc handle
2762  * @pdev_id: id of physical device object
2763  *
2764  * Return: reo destination ring index
2765  */
2766 enum cdp_host_reo_dest_ring
dp_get_pdev_reo_dest(struct cdp_soc_t * txrx_soc,uint8_t pdev_id)2767 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
2768 {
2769 	struct dp_pdev *pdev =
2770 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
2771 						   pdev_id);
2772 
2773 	if (pdev)
2774 		return pdev->reo_dest;
2775 	else
2776 		return cdp_host_reo_dest_ring_unknown;
2777 }
2778 
dp_rx_bar_stats_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)2779 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
2780 	union hal_reo_status *reo_status)
2781 {
2782 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
2783 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
2784 
2785 	if (!dp_check_pdev_exists(soc, pdev)) {
2786 		dp_err_rl("pdev doesn't exist");
2787 		return;
2788 	}
2789 
2790 	if (!qdf_atomic_read(&soc->cmn_init_done))
2791 		return;
2792 
2793 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
2794 		DP_PRINT_STATS("REO stats failure %d",
2795 			       queue_status->header.status);
2796 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
2797 		return;
2798 	}
2799 
2800 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
2801 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
2802 }
2803 
2804 /**
2805  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
2806  * @soc: dp soc.
2807  * @pdev: dp pdev.
2808  *
2809  * Return: None.
2810  */
2811 void
dp_dump_wbm_idle_hptp(struct dp_soc * soc,struct dp_pdev * pdev)2812 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
2813 {
2814 	uint32_t hw_head;
2815 	uint32_t hw_tail;
2816 	struct dp_srng *srng;
2817 
2818 	if (!soc) {
2819 		dp_err("soc is NULL");
2820 		return;
2821 	}
2822 
2823 	if (!pdev) {
2824 		dp_err("pdev is NULL");
2825 		return;
2826 	}
2827 
2828 	srng = &pdev->soc->wbm_idle_link_ring;
2829 	if (!srng) {
2830 		dp_err("wbm_idle_link_ring srng is NULL");
2831 		return;
2832 	}
2833 
2834 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
2835 			&hw_tail, WBM_IDLE_LINK);
2836 
2837 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
2838 		 hw_head, hw_tail);
2839 }
2840 
2841 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
dp_update_soft_irq_limits(struct dp_soc * soc,uint32_t tx_limit,uint32_t rx_limit)2842 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
2843 				      uint32_t rx_limit)
2844 {
2845 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
2846 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
2847 }
2848 
2849 #else
2850 
2851 static inline
dp_update_soft_irq_limits(struct dp_soc * soc,uint32_t tx_limit,uint32_t rx_limit)2852 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
2853 			       uint32_t rx_limit)
2854 {
2855 }
2856 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
2857 
2858 /**
2859  * dp_display_srng_info() - Dump the srng HP TP info
2860  * @soc_hdl: CDP Soc handle
2861  *
2862  * This function dumps the SW hp/tp values for the important rings.
2863  * HW hp/tp values are not being dumped, since it can lead to
2864  * READ NOC error when UMAC is in low power state. MCC does not have
2865  * device force wake working yet.
2866  *
2867  * Return: rings are empty
2868  */
dp_display_srng_info(struct cdp_soc_t * soc_hdl)2869 bool dp_display_srng_info(struct cdp_soc_t *soc_hdl)
2870 {
2871 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2872 	hal_soc_handle_t hal_soc = soc->hal_soc;
2873 	uint32_t hp, tp, i;
2874 	bool ret = true;
2875 
2876 	dp_info("SRNG HP-TP data:");
2877 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
2878 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
2879 				&tp, &hp);
2880 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2881 
2882 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
2883 		    INVALID_WBM_RING_NUM)
2884 			continue;
2885 
2886 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
2887 				&tp, &hp);
2888 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2889 	}
2890 
2891 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
2892 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
2893 				&tp, &hp);
2894 		if (hp != tp)
2895 			ret = false;
2896 
2897 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
2898 	}
2899 
2900 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
2901 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
2902 
2903 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
2904 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
2905 
2906 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
2907 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
2908 
2909 	return ret;
2910 }
2911 
2912 /**
2913  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
2914  * @psoc: dp soc handle
2915  * @pdev_id: id of DP_PDEV handle
2916  * @pcp: pcp value
2917  * @tid: tid value passed by the user
2918  *
2919  * Return: QDF_STATUS_SUCCESS on success
2920  */
dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,uint8_t pdev_id,uint8_t pcp,uint8_t tid)2921 QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
2922 					 uint8_t pdev_id,
2923 					 uint8_t pcp, uint8_t tid)
2924 {
2925 	struct dp_soc *soc = (struct dp_soc *)psoc;
2926 
2927 	soc->pcp_tid_map[pcp] = tid;
2928 
2929 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
2930 	return QDF_STATUS_SUCCESS;
2931 }
2932 
2933 /**
2934  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
2935  * @soc_hdl: DP soc handle
2936  * @vdev_id: id of DP_VDEV handle
2937  * @pcp: pcp value
2938  * @tid: tid value passed by the user
2939  *
2940  * Return: QDF_STATUS_SUCCESS on success
2941  */
dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t pcp,uint8_t tid)2942 QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
2943 					 uint8_t vdev_id,
2944 					 uint8_t pcp, uint8_t tid)
2945 {
2946 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2947 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2948 						     DP_MOD_ID_CDP);
2949 
2950 	if (!vdev)
2951 		return QDF_STATUS_E_FAILURE;
2952 
2953 	vdev->pcp_tid_map[pcp] = tid;
2954 
2955 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
2956 	return QDF_STATUS_SUCCESS;
2957 }
2958 
2959 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
dp_drain_txrx(struct cdp_soc_t * soc_handle,uint8_t rx_only)2960 QDF_STATUS dp_drain_txrx(struct cdp_soc_t *soc_handle, uint8_t rx_only)
2961 {
2962 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
2963 	uint32_t cur_tx_limit, cur_rx_limit;
2964 	uint32_t budget = 0xffff;
2965 	uint32_t val;
2966 	int i;
2967 	int cpu = dp_srng_get_cpu();
2968 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2969 
2970 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
2971 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
2972 
2973 	/* Temporarily increase soft irq limits when going to drain
2974 	 * the UMAC/LMAC SRNGs and restore them after polling.
2975 	 * Though the budget is on higher side, the TX/RX reaping loops
2976 	 * will not execute longer as both TX and RX would be suspended
2977 	 * by the time this API is called.
2978 	 */
2979 	dp_update_soft_irq_limits(soc, budget, budget);
2980 
2981 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2982 		if (rx_only && !soc->intr_ctx[i].rx_ring_mask)
2983 			continue;
2984 		soc->arch_ops.dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
2985 	}
2986 
2987 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
2988 
2989 	status = hif_try_complete_dp_tasks(soc->hif_handle);
2990 	if (QDF_IS_STATUS_ERROR(status)) {
2991 		dp_err("Failed to complete DP tasks");
2992 		return status;
2993 	}
2994 
2995 	/* Do a dummy read at offset 0; this will ensure all
2996 	 * pendings writes(HP/TP) are flushed before read returns.
2997 	 */
2998 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
2999 	dp_debug("Register value at offset 0: %u", val);
3000 
3001 	return status;
3002 }
3003 #endif
3004 
3005 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
3006 /**
3007  * dp_flush_ring_hptp() - Update ring shadow
3008  *			  register HP/TP address when runtime
3009  *                        resume
3010  * @soc: DP soc context
3011  * @hal_srng: srng
3012  *
3013  * Return: None
3014  */
dp_flush_ring_hptp(struct dp_soc * soc,hal_ring_handle_t hal_srng)3015 static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
3016 {
3017 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
3018 						 HAL_SRNG_FLUSH_EVENT)) {
3019 		/* Acquire the lock */
3020 		hal_srng_access_start(soc->hal_soc, hal_srng);
3021 
3022 		hal_srng_access_end(soc->hal_soc, hal_srng);
3023 
3024 		hal_srng_set_flush_last_ts(hal_srng);
3025 
3026 		dp_debug("flushed");
3027 	}
3028 }
3029 
dp_update_ring_hptp(struct dp_soc * soc,bool force_flush_tx)3030 void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
3031 {
3032 	 uint8_t i;
3033 
3034 	if (force_flush_tx) {
3035 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3036 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
3037 					   HAL_SRNG_FLUSH_EVENT);
3038 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
3039 		}
3040 
3041 		return;
3042 	}
3043 
3044 	for (i = 0; i < soc->num_tcl_data_rings; i++)
3045 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
3046 
3047 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
3048 }
3049 #endif
3050 
3051 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
3052 /*
3053  * dp_flush_tcl_ring() - flush TCL ring hp
3054  * @pdev: dp pdev
3055  * @ring_id: TCL ring id
3056  *
3057  * Return: 0 on success and error code on failure
3058  */
dp_flush_tcl_ring(struct dp_pdev * pdev,int ring_id)3059 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
3060 {
3061 	struct dp_soc *soc = pdev->soc;
3062 	hal_ring_handle_t hal_ring_hdl =
3063 			soc->tcl_data_ring[ring_id].hal_srng;
3064 	int ret;
3065 
3066 	ret = hal_srng_try_access_start(soc->hal_soc, hal_ring_hdl);
3067 	if (ret)
3068 		return ret;
3069 
3070 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
3071 	if (ret) {
3072 		hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
3073 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
3074 		hal_srng_inc_flush_cnt(hal_ring_hdl);
3075 		return ret;
3076 	}
3077 
3078 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
3079 	hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
3080 
3081 	return ret;
3082 }
3083 #else
dp_flush_tcl_ring(struct dp_pdev * pdev,int ring_id)3084 int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
3085 {
3086 	return QDF_STATUS_SUCCESS;
3087 }
3088 #endif
3089 
3090 #ifdef WLAN_FEATURE_STATS_EXT
3091 /* rx hw stats event wait timeout in ms */
3092 #define DP_REO_STATUS_STATS_TIMEOUT 100
3093 
3094 /**
3095  * dp_rx_hw_stats_cb() - request rx hw stats response callback
3096  * @soc: soc handle
3097  * @cb_ctxt: callback context
3098  * @reo_status: reo command response status
3099  *
3100  * Return: None
3101  */
dp_rx_hw_stats_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)3102 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
3103 			      union hal_reo_status *reo_status)
3104 {
3105 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
3106 	bool is_query_timeout;
3107 
3108 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3109 	is_query_timeout = soc->rx_hw_stats->is_query_timeout;
3110 	/* free the cb_ctxt if all pending tid stats query is received */
3111 	if (qdf_atomic_dec_and_test(&soc->rx_hw_stats->pending_tid_stats_cnt)) {
3112 		if (!is_query_timeout) {
3113 			qdf_event_set(&soc->rx_hw_stats_event);
3114 			soc->is_last_stats_ctx_init = false;
3115 		}
3116 
3117 		qdf_mem_free(soc->rx_hw_stats);
3118 		soc->rx_hw_stats = NULL;
3119 	}
3120 
3121 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
3122 		dp_info("REO stats failure %d",
3123 			queue_status->header.status);
3124 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3125 		return;
3126 	}
3127 
3128 	if (!is_query_timeout) {
3129 		soc->ext_stats.rx_mpdu_received +=
3130 					queue_status->mpdu_frms_cnt;
3131 		soc->ext_stats.rx_mpdu_missed +=
3132 					queue_status->hole_cnt;
3133 	}
3134 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3135 }
3136 
3137 /**
3138  * dp_request_rx_hw_stats() - request rx hardware stats
3139  * @soc_hdl: soc handle
3140  * @vdev_id: vdev id
3141  *
3142  * Return: None
3143  */
3144 QDF_STATUS
dp_request_rx_hw_stats(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)3145 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
3146 {
3147 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3148 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3149 						     DP_MOD_ID_CDP);
3150 	struct dp_peer *peer = NULL;
3151 	QDF_STATUS status;
3152 	int rx_stats_sent_cnt = 0;
3153 	uint32_t last_rx_mpdu_received;
3154 	uint32_t last_rx_mpdu_missed;
3155 
3156 	if (soc->rx_hw_stats) {
3157 		dp_err_rl("Stats already requested");
3158 		status = QDF_STATUS_E_ALREADY;
3159 		goto out;
3160 	}
3161 
3162 	if (!vdev) {
3163 		dp_err("vdev is null for vdev_id: %u", vdev_id);
3164 		status = QDF_STATUS_E_INVAL;
3165 		goto out;
3166 	}
3167 
3168 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
3169 
3170 	if (!peer) {
3171 		dp_err("Peer is NULL");
3172 		status = QDF_STATUS_E_INVAL;
3173 		goto out;
3174 	}
3175 
3176 	soc->rx_hw_stats = qdf_mem_malloc(sizeof(*soc->rx_hw_stats));
3177 
3178 	if (!soc->rx_hw_stats) {
3179 		dp_err("malloc failed for hw stats structure");
3180 		status = QDF_STATUS_E_INVAL;
3181 		goto out;
3182 	}
3183 
3184 	qdf_event_reset(&soc->rx_hw_stats_event);
3185 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3186 	/* save the last soc cumulative stats and reset it to 0 */
3187 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
3188 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
3189 	soc->ext_stats.rx_mpdu_received = 0;
3190 	soc->ext_stats.rx_mpdu_missed = 0;
3191 
3192 	dp_debug("HW stats query start");
3193 	rx_stats_sent_cnt =
3194 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, soc->rx_hw_stats);
3195 	if (!rx_stats_sent_cnt) {
3196 		dp_err("no tid stats sent successfully");
3197 		qdf_mem_free(soc->rx_hw_stats);
3198 		soc->rx_hw_stats = NULL;
3199 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3200 		status = QDF_STATUS_E_INVAL;
3201 		goto out;
3202 	}
3203 	qdf_atomic_set(&soc->rx_hw_stats->pending_tid_stats_cnt,
3204 		       rx_stats_sent_cnt);
3205 	soc->rx_hw_stats->is_query_timeout = false;
3206 	soc->is_last_stats_ctx_init = true;
3207 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3208 
3209 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
3210 				       DP_REO_STATUS_STATS_TIMEOUT);
3211 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
3212 
3213 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
3214 	if (status != QDF_STATUS_SUCCESS) {
3215 		if (soc->rx_hw_stats) {
3216 			dp_info("partial rx hw stats event collected with %d",
3217 				qdf_atomic_read(
3218 				  &soc->rx_hw_stats->pending_tid_stats_cnt));
3219 			if (soc->is_last_stats_ctx_init)
3220 				soc->rx_hw_stats->is_query_timeout = true;
3221 		}
3222 
3223 		/*
3224 		 * If query timeout happened, use the last saved stats
3225 		 * for this time query.
3226 		 */
3227 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
3228 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
3229 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
3230 
3231 	}
3232 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
3233 
3234 out:
3235 	if (peer)
3236 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3237 	if (vdev)
3238 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
3239 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
3240 
3241 	return status;
3242 }
3243 
3244 /**
3245  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
3246  * @soc_hdl: soc handle
3247  *
3248  * Return: None
3249  */
dp_reset_rx_hw_ext_stats(struct cdp_soc_t * soc_hdl)3250 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
3251 {
3252 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3253 
3254 	soc->ext_stats.rx_mpdu_received = 0;
3255 	soc->ext_stats.rx_mpdu_missed = 0;
3256 }
3257 #endif /* WLAN_FEATURE_STATS_EXT */
3258 
dp_get_tx_rings_grp_bitmap(struct cdp_soc_t * soc_hdl)3259 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
3260 {
3261 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
3262 
3263 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
3264 }
3265 
dp_soc_set_txrx_ring_map(struct dp_soc * soc)3266 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
3267 {
3268 	uint32_t i;
3269 
3270 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3271 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
3272 	}
3273 }
3274 
3275 qdf_export_symbol(dp_soc_set_txrx_ring_map);
3276 
dp_soc_cfg_dump(struct dp_soc * soc,uint32_t target_type)3277 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
3278 {
3279 	dp_init_info("DP soc Dump for Target = %d", target_type);
3280 	dp_init_info("ast_override_support = %d da_war_enabled = %d",
3281 		     soc->ast_override_support, soc->da_war_enabled);
3282 
3283 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
3284 }
3285 
3286 /**
3287  * dp_soc_cfg_init() - initialize target specific configuration
3288  *		       during dp_soc_init
3289  * @soc: dp soc handle
3290  */
dp_soc_cfg_init(struct dp_soc * soc)3291 static void dp_soc_cfg_init(struct dp_soc *soc)
3292 {
3293 	uint32_t target_type;
3294 
3295 	target_type = hal_get_target_type(soc->hal_soc);
3296 	switch (target_type) {
3297 	case TARGET_TYPE_QCA6290:
3298 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
3299 					       REO_DST_RING_SIZE_QCA6290);
3300 		soc->ast_override_support = 1;
3301 		soc->da_war_enabled = false;
3302 		break;
3303 	case TARGET_TYPE_QCA6390:
3304 	case TARGET_TYPE_QCA6490:
3305 	case TARGET_TYPE_QCA6750:
3306 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
3307 					       REO_DST_RING_SIZE_QCA6290);
3308 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
3309 		soc->ast_override_support = 1;
3310 		if (soc->cdp_soc.ol_ops->get_con_mode &&
3311 		    soc->cdp_soc.ol_ops->get_con_mode() ==
3312 		    QDF_GLOBAL_MONITOR_MODE) {
3313 			int int_ctx;
3314 
3315 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
3316 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
3317 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
3318 			}
3319 		}
3320 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
3321 		break;
3322 	case TARGET_TYPE_KIWI:
3323 	case TARGET_TYPE_MANGO:
3324 	case TARGET_TYPE_PEACH:
3325 		soc->ast_override_support = 1;
3326 		soc->per_tid_basize_max_tid = 8;
3327 
3328 		if (soc->cdp_soc.ol_ops->get_con_mode &&
3329 		    soc->cdp_soc.ol_ops->get_con_mode() ==
3330 		    QDF_GLOBAL_MONITOR_MODE) {
3331 			int int_ctx;
3332 
3333 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
3334 			     int_ctx++) {
3335 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
3336 				if (dp_is_monitor_mode_using_poll(soc))
3337 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
3338 			}
3339 		}
3340 
3341 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
3342 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
3343 		break;
3344 	case TARGET_TYPE_QCA8074:
3345 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
3346 		soc->da_war_enabled = true;
3347 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3348 		break;
3349 	case TARGET_TYPE_QCA8074V2:
3350 	case TARGET_TYPE_QCA6018:
3351 	case TARGET_TYPE_QCA9574:
3352 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3353 		soc->ast_override_support = 1;
3354 		soc->per_tid_basize_max_tid = 8;
3355 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3356 		soc->da_war_enabled = false;
3357 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3358 		break;
3359 	case TARGET_TYPE_QCN9000:
3360 		soc->ast_override_support = 1;
3361 		soc->da_war_enabled = false;
3362 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3363 		soc->per_tid_basize_max_tid = 8;
3364 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3365 		soc->lmac_polled_mode = 0;
3366 		soc->wbm_release_desc_rx_sg_support = 1;
3367 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
3368 		break;
3369 	case TARGET_TYPE_QCA5018:
3370 	case TARGET_TYPE_QCN6122:
3371 	case TARGET_TYPE_QCN9160:
3372 		soc->ast_override_support = 1;
3373 		soc->da_war_enabled = false;
3374 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3375 		soc->per_tid_basize_max_tid = 8;
3376 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
3377 		soc->disable_mac1_intr = 1;
3378 		soc->disable_mac2_intr = 1;
3379 		soc->wbm_release_desc_rx_sg_support = 1;
3380 		break;
3381 	case TARGET_TYPE_QCN9224:
3382 		soc->umac_reset_supported = true;
3383 		soc->ast_override_support = 1;
3384 		soc->da_war_enabled = false;
3385 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3386 		soc->per_tid_basize_max_tid = 8;
3387 		soc->wbm_release_desc_rx_sg_support = 1;
3388 		soc->rxdma2sw_rings_not_supported = 1;
3389 		soc->wbm_sg_last_msdu_war = 1;
3390 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
3391 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
3392 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
3393 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
3394 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
3395 						  CFG_DP_HOST_AST_DB_ENABLE);
3396 		soc->features.wds_ext_ast_override_enable = true;
3397 		break;
3398 	case TARGET_TYPE_QCA5332:
3399 	case TARGET_TYPE_QCN6432:
3400 		soc->umac_reset_supported = true;
3401 		soc->ast_override_support = 1;
3402 		soc->da_war_enabled = false;
3403 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
3404 		soc->per_tid_basize_max_tid = 8;
3405 		soc->wbm_release_desc_rx_sg_support = 1;
3406 		soc->rxdma2sw_rings_not_supported = 1;
3407 		soc->wbm_sg_last_msdu_war = 1;
3408 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
3409 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
3410 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
3411 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
3412 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
3413 						  CFG_DP_HOST_AST_DB_ENABLE);
3414 		soc->features.wds_ext_ast_override_enable = true;
3415 		break;
3416 	default:
3417 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
3418 		qdf_assert_always(0);
3419 		break;
3420 	}
3421 	dp_soc_cfg_dump(soc, target_type);
3422 }
3423 
3424 /**
3425  * dp_soc_get_ap_mld_mode() - store ap mld mode from ini
3426  * @soc: Opaque DP SOC handle
3427  *
3428  * Return: none
3429  */
3430 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
dp_soc_get_ap_mld_mode(struct dp_soc * soc)3431 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
3432 {
3433 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
3434 		soc->mld_mode_ap =
3435 		soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3436 					CDP_CFG_MLD_NETDEV_MODE_AP);
3437 	}
3438 	dp_info("DP mld_mode_ap-%u\n", soc->mld_mode_ap);
3439 }
3440 #else
dp_soc_get_ap_mld_mode(struct dp_soc * soc)3441 static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
3442 {
3443 	(void)soc;
3444 }
3445 #endif
3446 
3447 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
3448 /**
3449  * dp_soc_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_soc
3450  * @soc: Datapath soc handle
3451  *
3452  * Return: none
3453  */
3454 static inline
dp_soc_hw_txrx_stats_init(struct dp_soc * soc)3455 void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
3456 {
3457 	soc->hw_txrx_stats_en =
3458 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
3459 }
3460 #else
3461 static inline
dp_soc_hw_txrx_stats_init(struct dp_soc * soc)3462 void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
3463 {
3464 	soc->hw_txrx_stats_en = 0;
3465 }
3466 #endif
3467 
3468 /**
3469  * dp_soc_init() - Initialize txrx SOC
3470  * @soc: Opaque DP SOC handle
3471  * @htc_handle: Opaque HTC handle
3472  * @hif_handle: Opaque HIF handle
3473  *
3474  * Return: DP SOC handle on success, NULL on failure
3475  */
dp_soc_init(struct dp_soc * soc,HTC_HANDLE htc_handle,struct hif_opaque_softc * hif_handle)3476 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
3477 		  struct hif_opaque_softc *hif_handle)
3478 {
3479 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
3480 	bool is_monitor_mode = false;
3481 	uint8_t i;
3482 	int num_dp_msi;
3483 	bool ppeds_attached = false;
3484 
3485 	htt_soc = htt_soc_attach(soc, htc_handle);
3486 	if (!htt_soc)
3487 		goto fail1;
3488 
3489 	soc->htt_handle = htt_soc;
3490 
3491 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
3492 		goto fail2;
3493 
3494 	htt_set_htc_handle(htt_soc, htc_handle);
3495 
3496 	dp_soc_cfg_init(soc);
3497 
3498 	dp_monitor_soc_cfg_init(soc);
3499 	/* Reset/Initialize wbm sg list and flags */
3500 	dp_rx_wbm_sg_list_reset(soc);
3501 
3502 	/* Note: Any SRNG ring initialization should happen only after
3503 	 * Interrupt mode is set and followed by filling up the
3504 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
3505 	 */
3506 	dp_soc_set_interrupt_mode(soc);
3507 	if (soc->cdp_soc.ol_ops->get_con_mode &&
3508 	    soc->cdp_soc.ol_ops->get_con_mode() ==
3509 	    QDF_GLOBAL_MONITOR_MODE) {
3510 		is_monitor_mode = true;
3511 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
3512 	} else {
3513 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
3514 	}
3515 
3516 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
3517 	if (num_dp_msi < 0) {
3518 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
3519 		goto fail3;
3520 	}
3521 
3522 	if (soc->arch_ops.ppeds_handle_attached)
3523 		ppeds_attached = soc->arch_ops.ppeds_handle_attached(soc);
3524 
3525 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
3526 				     soc->intr_mode, is_monitor_mode,
3527 				     ppeds_attached,
3528 				     soc->umac_reset_supported);
3529 
3530 	/* initialize WBM_IDLE_LINK ring */
3531 	if (dp_hw_link_desc_ring_init(soc)) {
3532 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
3533 		goto fail3;
3534 	}
3535 
3536 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
3537 
3538 	if (dp_soc_srng_init(soc)) {
3539 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
3540 		goto fail4;
3541 	}
3542 
3543 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
3544 			       htt_get_htc_handle(htt_soc),
3545 			       soc->hal_soc, soc->osdev) == NULL)
3546 		goto fail5;
3547 
3548 	/* Initialize descriptors in TCL Rings */
3549 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
3550 		hal_tx_init_data_ring(soc->hal_soc,
3551 				      soc->tcl_data_ring[i].hal_srng);
3552 	}
3553 
3554 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
3555 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
3556 		goto fail6;
3557 	}
3558 
3559 	if (soc->arch_ops.txrx_soc_ppeds_start) {
3560 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
3561 			dp_init_err("%pK: ppeds start failed", soc);
3562 			goto fail7;
3563 		}
3564 	}
3565 
3566 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
3567 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
3568 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3569 	wlan_cfg_set_rx_rr(soc->wlan_cfg_ctx,
3570 			   cfg_get(soc->ctrl_psoc, CFG_DP_RX_RR));
3571 #endif
3572 	soc->cce_disable = false;
3573 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
3574 
3575 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
3576 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
3577 	qdf_spinlock_create(&soc->vdev_map_lock);
3578 	qdf_atomic_init(&soc->num_tx_outstanding);
3579 	qdf_atomic_init(&soc->num_tx_exception);
3580 	soc->num_tx_allowed =
3581 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
3582 	soc->num_tx_spl_allowed =
3583 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
3584 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
3585 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
3586 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3587 				CDP_CFG_MAX_PEER_ID);
3588 
3589 		if (ret != -EINVAL)
3590 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
3591 
3592 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
3593 				CDP_CFG_CCE_DISABLE);
3594 		if (ret == 1)
3595 			soc->cce_disable = true;
3596 	}
3597 
3598 	/*
3599 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
3600 	 * and IPQ5018 WMAC2 is not there in these platforms.
3601 	 */
3602 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
3603 	    soc->disable_mac2_intr)
3604 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
3605 
3606 	/*
3607 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
3608 	 * WMAC1 is not there in this platform.
3609 	 */
3610 	if (soc->disable_mac1_intr)
3611 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
3612 
3613 	/* setup the global rx defrag waitlist */
3614 	TAILQ_INIT(&soc->rx.defrag.waitlist);
3615 	soc->rx.defrag.timeout_ms =
3616 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
3617 	soc->rx.defrag.next_flush_ms = 0;
3618 	soc->rx.flags.defrag_timeout_check =
3619 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
3620 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
3621 
3622 	dp_monitor_soc_init(soc);
3623 
3624 	qdf_atomic_set(&soc->cmn_init_done, 1);
3625 
3626 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
3627 
3628 	qdf_spinlock_create(&soc->ast_lock);
3629 	dp_peer_mec_spinlock_create(soc);
3630 
3631 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
3632 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
3633 	INIT_RX_HW_STATS_LOCK(soc);
3634 
3635 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
3636 	/* fill the tx/rx cpu ring map*/
3637 	dp_soc_set_txrx_ring_map(soc);
3638 
3639 	TAILQ_INIT(&soc->inactive_peer_list);
3640 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
3641 	TAILQ_INIT(&soc->inactive_vdev_list);
3642 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
3643 	qdf_spinlock_create(&soc->htt_stats.lock);
3644 	/* initialize work queue for stats processing */
3645 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3646 
3647 	dp_reo_desc_deferred_freelist_create(soc);
3648 
3649 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
3650 		qdf_dma_mem_stats_read(),
3651 		qdf_heap_mem_stats_read(),
3652 		qdf_skb_total_mem_stats_read());
3653 
3654 	soc->vdev_stats_id_map = 0;
3655 
3656 	dp_soc_hw_txrx_stats_init(soc);
3657 
3658 	dp_soc_get_ap_mld_mode(soc);
3659 
3660 	return soc;
3661 fail7:
3662 	dp_soc_tx_desc_sw_pools_deinit(soc);
3663 fail6:
3664 	htt_soc_htc_dealloc(soc->htt_handle);
3665 fail5:
3666 	dp_soc_srng_deinit(soc);
3667 fail4:
3668 	dp_hw_link_desc_ring_deinit(soc);
3669 fail3:
3670 	htt_htc_pkt_pool_free(htt_soc);
3671 fail2:
3672 	htt_soc_detach(htt_soc);
3673 fail1:
3674 	return NULL;
3675 }
3676 
3677 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
dp_soc_tcl_cmd_cred_srng_init(struct dp_soc * soc)3678 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
3679 {
3680 	QDF_STATUS status;
3681 
3682 	if (soc->init_tcl_cmd_cred_ring) {
3683 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
3684 				       TCL_CMD_CREDIT, 0, 0);
3685 		if (QDF_IS_STATUS_ERROR(status))
3686 			return status;
3687 
3688 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
3689 				  soc->tcl_cmd_credit_ring.alloc_size,
3690 				  soc->ctrl_psoc,
3691 				  WLAN_MD_DP_SRNG_TCL_CMD,
3692 				  "wbm_desc_rel_ring");
3693 	}
3694 
3695 	return QDF_STATUS_SUCCESS;
3696 }
3697 
dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc * soc)3698 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
3699 {
3700 	if (soc->init_tcl_cmd_cred_ring) {
3701 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
3702 				     soc->tcl_cmd_credit_ring.alloc_size,
3703 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
3704 				     "wbm_desc_rel_ring");
3705 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
3706 			       TCL_CMD_CREDIT, 0);
3707 	}
3708 }
3709 
dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc * soc)3710 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
3711 {
3712 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3713 	uint32_t entries;
3714 	QDF_STATUS status;
3715 
3716 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
3717 	if (soc->init_tcl_cmd_cred_ring) {
3718 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
3719 				       TCL_CMD_CREDIT, entries, 0);
3720 		if (QDF_IS_STATUS_ERROR(status))
3721 			return status;
3722 	}
3723 
3724 	return QDF_STATUS_SUCCESS;
3725 }
3726 
dp_soc_tcl_cmd_cred_srng_free(struct dp_soc * soc)3727 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
3728 {
3729 	if (soc->init_tcl_cmd_cred_ring)
3730 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
3731 }
3732 
dp_tx_init_cmd_credit_ring(struct dp_soc * soc)3733 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
3734 {
3735 	if (soc->init_tcl_cmd_cred_ring)
3736 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
3737 					    soc->tcl_cmd_credit_ring.hal_srng);
3738 }
3739 #else
dp_soc_tcl_cmd_cred_srng_init(struct dp_soc * soc)3740 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
3741 {
3742 	return QDF_STATUS_SUCCESS;
3743 }
3744 
dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc * soc)3745 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
3746 {
3747 }
3748 
dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc * soc)3749 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
3750 {
3751 	return QDF_STATUS_SUCCESS;
3752 }
3753 
dp_soc_tcl_cmd_cred_srng_free(struct dp_soc * soc)3754 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
3755 {
3756 }
3757 
dp_tx_init_cmd_credit_ring(struct dp_soc * soc)3758 inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
3759 {
3760 }
3761 #endif
3762 
3763 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
dp_soc_tcl_status_srng_init(struct dp_soc * soc)3764 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
3765 {
3766 	QDF_STATUS status;
3767 
3768 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
3769 	if (QDF_IS_STATUS_ERROR(status))
3770 		return status;
3771 
3772 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
3773 			  soc->tcl_status_ring.alloc_size,
3774 			  soc->ctrl_psoc,
3775 			  WLAN_MD_DP_SRNG_TCL_STATUS,
3776 			  "wbm_desc_rel_ring");
3777 
3778 	return QDF_STATUS_SUCCESS;
3779 }
3780 
dp_soc_tcl_status_srng_deinit(struct dp_soc * soc)3781 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
3782 {
3783 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
3784 			     soc->tcl_status_ring.alloc_size,
3785 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
3786 			     "wbm_desc_rel_ring");
3787 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3788 }
3789 
dp_soc_tcl_status_srng_alloc(struct dp_soc * soc)3790 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
3791 {
3792 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3793 	uint32_t entries;
3794 	QDF_STATUS status = QDF_STATUS_SUCCESS;
3795 
3796 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
3797 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
3798 			       TCL_STATUS, entries, 0);
3799 
3800 	return status;
3801 }
3802 
dp_soc_tcl_status_srng_free(struct dp_soc * soc)3803 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
3804 {
3805 	dp_srng_free(soc, &soc->tcl_status_ring);
3806 }
3807 #else
dp_soc_tcl_status_srng_init(struct dp_soc * soc)3808 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
3809 {
3810 	return QDF_STATUS_SUCCESS;
3811 }
3812 
dp_soc_tcl_status_srng_deinit(struct dp_soc * soc)3813 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
3814 {
3815 }
3816 
dp_soc_tcl_status_srng_alloc(struct dp_soc * soc)3817 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
3818 {
3819 	return QDF_STATUS_SUCCESS;
3820 }
3821 
dp_soc_tcl_status_srng_free(struct dp_soc * soc)3822 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
3823 {
3824 }
3825 #endif
3826 
3827 /**
3828  * dp_soc_srng_deinit() - de-initialize soc srng rings
3829  * @soc: Datapath soc handle
3830  *
3831  */
dp_soc_srng_deinit(struct dp_soc * soc)3832 void dp_soc_srng_deinit(struct dp_soc *soc)
3833 {
3834 	uint32_t i;
3835 
3836 	if (soc->arch_ops.txrx_soc_srng_deinit)
3837 		soc->arch_ops.txrx_soc_srng_deinit(soc);
3838 
3839 	/* Free the ring memories */
3840 	/* Common rings */
3841 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
3842 			     soc->wbm_desc_rel_ring.alloc_size,
3843 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
3844 			     "wbm_desc_rel_ring");
3845 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3846 	dp_ssr_dump_srng_unregister("wbm_desc_rel_ring", -1);
3847 
3848 	/* Tx data rings */
3849 	for (i = 0; i < soc->num_tcl_data_rings; i++)
3850 		dp_deinit_tx_pair_by_index(soc, i);
3851 
3852 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
3853 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
3854 		dp_ipa_deinit_alt_tx_ring(soc);
3855 	}
3856 
3857 	/* TCL command and status rings */
3858 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
3859 	dp_soc_tcl_status_srng_deinit(soc);
3860 
3861 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
3862 		/* TODO: Get number of rings and ring sizes
3863 		 * from wlan_cfg
3864 		 */
3865 		dp_ssr_dump_srng_unregister("reo_dest_ring", i);
3866 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
3867 				     soc->reo_dest_ring[i].alloc_size,
3868 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
3869 				     "reo_dest_ring");
3870 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
3871 	}
3872 
3873 	dp_ssr_dump_srng_unregister("reo_reinject_ring", -1);
3874 	/* REO reinjection ring */
3875 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
3876 			     soc->reo_reinject_ring.alloc_size,
3877 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
3878 			     "reo_reinject_ring");
3879 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3880 
3881 	dp_ssr_dump_srng_unregister("rx_rel_ring", -1);
3882 	/* Rx release ring */
3883 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
3884 			     soc->rx_rel_ring.alloc_size,
3885 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
3886 			     "reo_release_ring");
3887 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3888 
3889 	/* Rx exception ring */
3890 	/* TODO: Better to store ring_type and ring_num in
3891 	 * dp_srng during setup
3892 	 */
3893 	dp_ssr_dump_srng_unregister("reo_exception_ring", -1);
3894 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
3895 			     soc->reo_exception_ring.alloc_size,
3896 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
3897 			     "reo_exception_ring");
3898 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3899 
3900 	/* REO command and status rings */
3901 	dp_ssr_dump_srng_unregister("reo_cmd_ring", -1);
3902 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
3903 			     soc->reo_cmd_ring.alloc_size,
3904 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
3905 			     "reo_cmd_ring");
3906 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3907 	dp_ssr_dump_srng_unregister("reo_status_ring", -1);
3908 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
3909 			     soc->reo_status_ring.alloc_size,
3910 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
3911 			     "reo_status_ring");
3912 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3913 }
3914 
3915 /**
3916  * dp_soc_srng_init() - Initialize soc level srng rings
3917  * @soc: Datapath soc handle
3918  *
3919  * Return: QDF_STATUS_SUCCESS on success
3920  *	   QDF_STATUS_E_FAILURE on failure
3921  */
dp_soc_srng_init(struct dp_soc * soc)3922 QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
3923 {
3924 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3925 	uint8_t i;
3926 	uint8_t wbm2_sw_rx_rel_ring_id;
3927 
3928 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3929 
3930 	dp_enable_verbose_debug(soc);
3931 
3932 	/* WBM descriptor release ring */
3933 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
3934 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
3935 		goto fail1;
3936 	}
3937 	dp_ssr_dump_srng_register("wbm_desc_rel_ring",
3938 				  &soc->wbm_desc_rel_ring, -1);
3939 
3940 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
3941 			  soc->wbm_desc_rel_ring.alloc_size,
3942 			  soc->ctrl_psoc,
3943 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
3944 			  "wbm_desc_rel_ring");
3945 
3946 	/* TCL command and status rings */
3947 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
3948 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
3949 		goto fail1;
3950 	}
3951 
3952 	if (dp_soc_tcl_status_srng_init(soc)) {
3953 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
3954 		goto fail1;
3955 	}
3956 
3957 	/* REO reinjection ring */
3958 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
3959 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
3960 		goto fail1;
3961 	}
3962 	dp_ssr_dump_srng_register("reo_reinject_ring",
3963 				  &soc->reo_reinject_ring, -1);
3964 
3965 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
3966 			  soc->reo_reinject_ring.alloc_size,
3967 			  soc->ctrl_psoc,
3968 			  WLAN_MD_DP_SRNG_REO_REINJECT,
3969 			  "reo_reinject_ring");
3970 
3971 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
3972 	/* Rx release ring */
3973 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
3974 			 wbm2_sw_rx_rel_ring_id, 0)) {
3975 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
3976 		goto fail1;
3977 	}
3978 	dp_ssr_dump_srng_register("rx_rel_ring", &soc->rx_rel_ring, -1);
3979 
3980 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
3981 			  soc->rx_rel_ring.alloc_size,
3982 			  soc->ctrl_psoc,
3983 			  WLAN_MD_DP_SRNG_RX_REL,
3984 			  "reo_release_ring");
3985 
3986 	/* Rx exception ring */
3987 	if (dp_srng_init(soc, &soc->reo_exception_ring,
3988 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
3989 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
3990 		goto fail1;
3991 	}
3992 	dp_ssr_dump_srng_register("reo_exception_ring",
3993 				  &soc->reo_exception_ring, -1);
3994 
3995 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
3996 			  soc->reo_exception_ring.alloc_size,
3997 			  soc->ctrl_psoc,
3998 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
3999 			  "reo_exception_ring");
4000 
4001 	/* REO command and status rings */
4002 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
4003 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
4004 		goto fail1;
4005 	}
4006 	dp_ssr_dump_srng_register("reo_cmd_ring", &soc->reo_cmd_ring, -1);
4007 
4008 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
4009 			  soc->reo_cmd_ring.alloc_size,
4010 			  soc->ctrl_psoc,
4011 			  WLAN_MD_DP_SRNG_REO_CMD,
4012 			  "reo_cmd_ring");
4013 
4014 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
4015 	TAILQ_INIT(&soc->rx.reo_cmd_list);
4016 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
4017 
4018 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
4019 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
4020 		goto fail1;
4021 	}
4022 	dp_ssr_dump_srng_register("reo_status_ring", &soc->reo_status_ring, -1);
4023 
4024 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
4025 			  soc->reo_status_ring.alloc_size,
4026 			  soc->ctrl_psoc,
4027 			  WLAN_MD_DP_SRNG_REO_STATUS,
4028 			  "reo_status_ring");
4029 
4030 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4031 		if (dp_init_tx_ring_pair_by_index(soc, i))
4032 			goto fail1;
4033 	}
4034 
4035 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4036 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
4037 			goto fail1;
4038 
4039 		if (dp_ipa_init_alt_tx_ring(soc))
4040 			goto fail1;
4041 	}
4042 
4043 	dp_create_ext_stats_event(soc);
4044 
4045 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4046 		/* Initialize REO destination ring */
4047 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
4048 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
4049 			goto fail1;
4050 		}
4051 
4052 		dp_ssr_dump_srng_register("reo_dest_ring",
4053 					  &soc->reo_dest_ring[i], i);
4054 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
4055 				  soc->reo_dest_ring[i].alloc_size,
4056 				  soc->ctrl_psoc,
4057 				  WLAN_MD_DP_SRNG_REO_DEST,
4058 				  "reo_dest_ring");
4059 	}
4060 
4061 	if (soc->arch_ops.txrx_soc_srng_init) {
4062 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
4063 			dp_init_err("%pK: dp_srng_init failed for arch rings",
4064 				    soc);
4065 			goto fail1;
4066 		}
4067 	}
4068 
4069 	return QDF_STATUS_SUCCESS;
4070 fail1:
4071 	/*
4072 	 * Cleanup will be done as part of soc_detach, which will
4073 	 * be called on pdev attach failure
4074 	 */
4075 	dp_soc_srng_deinit(soc);
4076 	return QDF_STATUS_E_FAILURE;
4077 }
4078 
4079 /**
4080  * dp_soc_srng_free() - free soc level srng rings
4081  * @soc: Datapath soc handle
4082  *
4083  */
dp_soc_srng_free(struct dp_soc * soc)4084 void dp_soc_srng_free(struct dp_soc *soc)
4085 {
4086 	uint32_t i;
4087 
4088 	if (soc->arch_ops.txrx_soc_srng_free)
4089 		soc->arch_ops.txrx_soc_srng_free(soc);
4090 
4091 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
4092 
4093 	for (i = 0; i < soc->num_tcl_data_rings; i++)
4094 		dp_free_tx_ring_pair_by_index(soc, i);
4095 
4096 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
4097 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4098 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
4099 		dp_ipa_free_alt_tx_ring(soc);
4100 	}
4101 
4102 	dp_soc_tcl_cmd_cred_srng_free(soc);
4103 	dp_soc_tcl_status_srng_free(soc);
4104 
4105 	for (i = 0; i < soc->num_reo_dest_rings; i++)
4106 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
4107 
4108 	dp_srng_free(soc, &soc->reo_reinject_ring);
4109 	dp_srng_free(soc, &soc->rx_rel_ring);
4110 
4111 	dp_srng_free(soc, &soc->reo_exception_ring);
4112 
4113 	dp_srng_free(soc, &soc->reo_cmd_ring);
4114 	dp_srng_free(soc, &soc->reo_status_ring);
4115 }
4116 
4117 /**
4118  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
4119  * @soc: Datapath soc handle
4120  *
4121  * Return: QDF_STATUS_SUCCESS on success
4122  *	   QDF_STATUS_E_NOMEM on failure
4123  */
dp_soc_srng_alloc(struct dp_soc * soc)4124 QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
4125 {
4126 	uint32_t entries;
4127 	uint32_t i;
4128 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4129 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
4130 	uint32_t reo_dst_ring_size;
4131 
4132 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4133 
4134 	/* sw2wbm link descriptor release ring */
4135 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
4136 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
4137 			  entries, 0)) {
4138 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
4139 		goto fail1;
4140 	}
4141 
4142 	/* TCL command and status rings */
4143 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
4144 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
4145 		goto fail1;
4146 	}
4147 
4148 	if (dp_soc_tcl_status_srng_alloc(soc)) {
4149 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
4150 		goto fail1;
4151 	}
4152 
4153 	/* REO reinjection ring */
4154 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
4155 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
4156 			  entries, 0)) {
4157 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
4158 		goto fail1;
4159 	}
4160 
4161 	/* Rx release ring */
4162 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
4163 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
4164 			  entries, 0)) {
4165 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
4166 		goto fail1;
4167 	}
4168 
4169 	/* Rx exception ring */
4170 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
4171 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
4172 			  entries, 0)) {
4173 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
4174 		goto fail1;
4175 	}
4176 
4177 	/* REO command and status rings */
4178 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
4179 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
4180 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
4181 		goto fail1;
4182 	}
4183 
4184 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
4185 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
4186 			  entries, 0)) {
4187 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
4188 		goto fail1;
4189 	}
4190 
4191 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
4192 
4193 	/* Disable cached desc if NSS offload is enabled */
4194 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4195 		cached = 0;
4196 
4197 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
4198 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
4199 			goto fail1;
4200 	}
4201 
4202 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
4203 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4204 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
4205 			goto fail1;
4206 
4207 		if (dp_ipa_alloc_alt_tx_ring(soc))
4208 			goto fail1;
4209 	}
4210 
4211 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
4212 		/* Setup REO destination ring */
4213 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
4214 				  reo_dst_ring_size, cached)) {
4215 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
4216 			goto fail1;
4217 		}
4218 	}
4219 
4220 	if (soc->arch_ops.txrx_soc_srng_alloc) {
4221 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
4222 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
4223 				    soc);
4224 			goto fail1;
4225 		}
4226 	}
4227 
4228 	return QDF_STATUS_SUCCESS;
4229 
4230 fail1:
4231 	dp_soc_srng_free(soc);
4232 	return QDF_STATUS_E_NOMEM;
4233 }
4234 
4235 /**
4236  * dp_soc_cfg_attach() - set target specific configuration in
4237  *			 dp soc cfg.
4238  * @soc: dp soc handle
4239  */
dp_soc_cfg_attach(struct dp_soc * soc)4240 void dp_soc_cfg_attach(struct dp_soc *soc)
4241 {
4242 	int target_type;
4243 	int nss_cfg = 0;
4244 
4245 	target_type = hal_get_target_type(soc->hal_soc);
4246 	switch (target_type) {
4247 	case TARGET_TYPE_QCA6290:
4248 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4249 					       REO_DST_RING_SIZE_QCA6290);
4250 		break;
4251 	case TARGET_TYPE_QCA6390:
4252 	case TARGET_TYPE_QCA6490:
4253 	case TARGET_TYPE_QCA6750:
4254 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
4255 					       REO_DST_RING_SIZE_QCA6290);
4256 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4257 		break;
4258 	case TARGET_TYPE_KIWI:
4259 	case TARGET_TYPE_MANGO:
4260 	case TARGET_TYPE_PEACH:
4261 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4262 		break;
4263 	case TARGET_TYPE_QCA8074:
4264 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4265 		break;
4266 	case TARGET_TYPE_QCA8074V2:
4267 	case TARGET_TYPE_QCA6018:
4268 	case TARGET_TYPE_QCA9574:
4269 	case TARGET_TYPE_QCN6122:
4270 	case TARGET_TYPE_QCA5018:
4271 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4272 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4273 		break;
4274 	case TARGET_TYPE_QCN9160:
4275 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4276 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
4277 		break;
4278 	case TARGET_TYPE_QCN9000:
4279 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4280 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4281 		break;
4282 	case TARGET_TYPE_QCN9224:
4283 	case TARGET_TYPE_QCA5332:
4284 	case TARGET_TYPE_QCN6432:
4285 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
4286 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
4287 		break;
4288 	default:
4289 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
4290 		qdf_assert_always(0);
4291 		break;
4292 	}
4293 
4294 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
4295 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
4296 
4297 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
4298 
4299 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
4300 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
4301 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
4302 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
4303 		wlan_cfg_set_num_tx_spl_desc(soc->wlan_cfg_ctx, 0);
4304 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
4305 		soc->init_tcl_cmd_cred_ring = false;
4306 		soc->num_tcl_data_rings =
4307 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
4308 		soc->num_reo_dest_rings =
4309 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
4310 
4311 	} else {
4312 		soc->init_tcl_cmd_cred_ring = true;
4313 		soc->num_tx_comp_rings =
4314 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
4315 		soc->num_tcl_data_rings =
4316 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
4317 		soc->num_reo_dest_rings =
4318 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4319 	}
4320 
4321 }
4322 
dp_pdev_set_default_reo(struct dp_pdev * pdev)4323 void dp_pdev_set_default_reo(struct dp_pdev *pdev)
4324 {
4325 	struct dp_soc *soc = pdev->soc;
4326 
4327 	switch (pdev->pdev_id) {
4328 	case 0:
4329 		pdev->reo_dest =
4330 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
4331 		break;
4332 
4333 	case 1:
4334 		pdev->reo_dest =
4335 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
4336 		break;
4337 
4338 	case 2:
4339 		pdev->reo_dest =
4340 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
4341 		break;
4342 
4343 	default:
4344 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
4345 			    soc, pdev->pdev_id);
4346 		break;
4347 	}
4348 }
4349 
4350 #ifdef WLAN_SUPPORT_DPDK
dp_soc_reset_dpdk_intr_mask(struct dp_soc * soc)4351 void dp_soc_reset_dpdk_intr_mask(struct dp_soc *soc)
4352 {
4353 	uint8_t j;
4354 	uint8_t *grp_mask = NULL;
4355 	int group_number, mask, num_ring;
4356 
4357 	/* number of tx ring */
4358 	num_ring = soc->num_tcl_data_rings;
4359 
4360 	/*
4361 	 * group mask for tx completion  ring.
4362 	 */
4363 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4364 
4365 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4366 		/*
4367 		 * Group number corresponding to tx offloaded ring.
4368 		 */
4369 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4370 		if (group_number < 0) {
4371 			dp_init_debug("%pK: ring not part of any group; ring_type: %d, ring_num %d",
4372 				      soc, WBM2SW_RELEASE, j);
4373 			continue;
4374 		}
4375 
4376 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx,
4377 						 group_number);
4378 
4379 		/* reset the tx mask for offloaded ring */
4380 		mask &= (~(1 << j));
4381 
4382 		/*
4383 		 * reset the interrupt mask for offloaded ring.
4384 		 */
4385 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx,
4386 					  group_number, mask);
4387 	}
4388 
4389 	/* number of rx rings */
4390 	num_ring = soc->num_reo_dest_rings;
4391 
4392 	/*
4393 	 * group mask for reo destination ring.
4394 	 */
4395 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4396 
4397 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4398 		/*
4399 		 * Group number corresponding to rx offloaded ring.
4400 		 */
4401 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4402 		if (group_number < 0) {
4403 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4404 				      soc, REO_DST, j);
4405 			continue;
4406 		}
4407 
4408 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx,
4409 						  group_number);
4410 
4411 		/* reset the interrupt mask for offloaded ring */
4412 		mask &= (~(1 << j));
4413 
4414 		/*
4415 		 * set the interrupt mask to zero for rx offloaded radio.
4416 		 */
4417 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx,
4418 					  group_number, mask);
4419 	}
4420 
4421 	/*
4422 	 * group mask for Rx buffer refill ring
4423 	 */
4424 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4425 
4426 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4427 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4428 
4429 		/*
4430 		 * Group number corresponding to rx offloaded ring.
4431 		 */
4432 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4433 		if (group_number < 0) {
4434 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4435 				      soc, REO_DST, lmac_id);
4436 			continue;
4437 		}
4438 
4439 		/* set the interrupt mask for offloaded ring */
4440 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4441 							  group_number);
4442 		mask &= (~(1 << lmac_id));
4443 
4444 		/*
4445 		 * set the interrupt mask to zero for rx offloaded radio.
4446 		 */
4447 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4448 						  group_number, mask);
4449 	}
4450 
4451 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4452 
4453 	for (j = 0; j < num_ring; j++) {
4454 		/*
4455 		 * Group number corresponding to rx err ring.
4456 		 */
4457 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4458 		if (group_number < 0) {
4459 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4460 				      soc, REO_EXCEPTION, j);
4461 			continue;
4462 		}
4463 
4464 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4465 					      group_number, 0);
4466 	}
4467 }
4468 #endif
4469