xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision a0751b2b6be239b5ad7274f9e927ad1360ac506e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #ifdef DP_RATETABLE_SUPPORT
36 #include "dp_ratetable.h"
37 #endif
38 #include <cdp_txrx_handle.h>
39 #include <wlan_cfg.h>
40 #include <wlan_utility.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "htt_stats.h"
47 #include "dp_htt.h"
48 #ifdef WLAN_SUPPORT_RX_FISA
49 #include <dp_fisa_rx.h>
50 #endif
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 
55 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
56 #include "cdp_txrx_flow_ctrl_v2.h"
57 #else
58 
59 static inline void
60 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
61 {
62 	return;
63 }
64 #endif
65 #ifdef WIFI_MONITOR_SUPPORT
66 #include <dp_mon.h>
67 #endif
68 #include "dp_ipa.h"
69 #ifdef FEATURE_WDS
70 #include "dp_txrx_wds.h"
71 #endif
72 #ifdef WLAN_SUPPORT_MSCS
73 #include "dp_mscs.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MESH_LATENCY
76 #include "dp_mesh_latency.h"
77 #endif
78 #ifdef ATH_SUPPORT_IQUE
79 #include "dp_txrx_me.h"
80 #endif
81 #if defined(DP_CON_MON)
82 #ifndef REMOVE_PKT_LOG
83 #include <pktlog_ac_api.h>
84 #include <pktlog_ac.h>
85 #endif
86 #endif
87 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
88 #include <dp_swlm.h>
89 #endif
90 
91 #ifdef WLAN_FEATURE_STATS_EXT
92 #define INIT_RX_HW_STATS_LOCK(_soc) \
93 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
94 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
95 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
96 #else
97 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
98 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
99 #endif
100 
101 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
102 #define SET_PEER_REF_CNT_ONE(_peer) \
103 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
104 #else
105 #define SET_PEER_REF_CNT_ONE(_peer)
106 #endif
107 
108 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
109 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
110 
111 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
112 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
113 
114 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
115 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
116 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
117 #define dp_init_info(params...) \
118 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
119 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
120 
121 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
122 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
123 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
124 #define dp_vdev_info(params...) \
125 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
126 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
127 
128 void dp_configure_arch_ops(struct dp_soc *soc);
129 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
130 
131 /*
132  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
133  * If the buffer size is exceeding this size limit,
134  * dp_txrx_get_peer_stats is to be used instead.
135  */
136 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
137 			(sizeof(cdp_peer_stats_param_t) <= 16));
138 
139 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
140 /*
141  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
142  * also should be updated accordingly
143  */
144 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
145 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
146 
147 /*
148  * HIF_EVENT_HIST_MAX should always be power of 2
149  */
150 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
151 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
152 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
153 
154 /*
155  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
156  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
157  */
158 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
159 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
160 			WLAN_CFG_INT_NUM_CONTEXTS);
161 
162 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
163 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
164 static void dp_pdev_srng_free(struct dp_pdev *pdev);
165 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
166 
167 static void dp_soc_srng_deinit(struct dp_soc *soc);
168 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
169 static void dp_soc_srng_free(struct dp_soc *soc);
170 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
171 
172 static void dp_soc_cfg_init(struct dp_soc *soc);
173 static void dp_soc_cfg_attach(struct dp_soc *soc);
174 
175 static inline
176 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
177 				HTC_HANDLE htc_handle,
178 				qdf_device_t qdf_osdev,
179 				uint8_t pdev_id);
180 
181 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
182 
183 static QDF_STATUS
184 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
185 		   HTC_HANDLE htc_handle,
186 		   qdf_device_t qdf_osdev,
187 		   uint8_t pdev_id);
188 
189 static QDF_STATUS
190 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
191 
192 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
193 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
194 
195 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
196 		  struct hif_opaque_softc *hif_handle);
197 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
198 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
199 				       uint8_t pdev_id,
200 				       int force);
201 static struct dp_soc *
202 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
203 	      struct hif_opaque_softc *hif_handle,
204 	      HTC_HANDLE htc_handle,
205 	      qdf_device_t qdf_osdev,
206 	      struct ol_if_ops *ol_ops, uint16_t device_id);
207 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
208 					      uint8_t vdev_id,
209 					      uint8_t *peer_mac_addr);
210 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
211 				       uint8_t vdev_id,
212 				       uint8_t *peer_mac, uint32_t bitmap);
213 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
214 				bool unmap_only);
215 #ifdef ENABLE_VERBOSE_DEBUG
216 bool is_dp_verbose_debug_enabled;
217 #endif
218 
219 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
220 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
221 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
222 			   bool enable);
223 static inline void
224 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
225 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
226 static inline void
227 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
228 #endif
229 
230 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
231 						uint8_t index);
232 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
233 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
234 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
235 						 uint8_t index);
236 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
237 					    enum hal_ring_type ring_type,
238 					    int ring_num);
239 
240 #define DP_INTR_POLL_TIMER_MS	5
241 
242 #define MON_VDEV_TIMER_INIT 0x1
243 #define MON_VDEV_TIMER_RUNNING 0x2
244 
245 /* Generic AST entry aging timer value */
246 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
247 #define DP_MCS_LENGTH (6*MAX_MCS)
248 
249 #define DP_CURR_FW_STATS_AVAIL 19
250 #define DP_HTT_DBG_EXT_STATS_MAX 256
251 #define DP_MAX_SLEEP_TIME 100
252 #ifndef QCA_WIFI_3_0_EMU
253 #define SUSPEND_DRAIN_WAIT 500
254 #else
255 #define SUSPEND_DRAIN_WAIT 3000
256 #endif
257 
258 #ifdef IPA_OFFLOAD
259 /* Exclude IPA rings from the interrupt context */
260 #define TX_RING_MASK_VAL	0xb
261 #define RX_RING_MASK_VAL	0x7
262 #else
263 #define TX_RING_MASK_VAL	0xF
264 #define RX_RING_MASK_VAL	0xF
265 #endif
266 
267 #define STR_MAXLEN	64
268 
269 #define RNG_ERR		"SRNG setup failed for"
270 
271 /* Threshold for peer's cached buf queue beyond which frames are dropped */
272 #define DP_RX_CACHED_BUFQ_THRESH 64
273 
274 /**
275  * default_dscp_tid_map - Default DSCP-TID mapping
276  *
277  * DSCP        TID
278  * 000000      0
279  * 001000      1
280  * 010000      2
281  * 011000      3
282  * 100000      4
283  * 101000      5
284  * 110000      6
285  * 111000      7
286  */
287 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
288 	0, 0, 0, 0, 0, 0, 0, 0,
289 	1, 1, 1, 1, 1, 1, 1, 1,
290 	2, 2, 2, 2, 2, 2, 2, 2,
291 	3, 3, 3, 3, 3, 3, 3, 3,
292 	4, 4, 4, 4, 4, 4, 4, 4,
293 	5, 5, 5, 5, 5, 5, 5, 5,
294 	6, 6, 6, 6, 6, 6, 6, 6,
295 	7, 7, 7, 7, 7, 7, 7, 7,
296 };
297 
298 /**
299  * default_pcp_tid_map - Default PCP-TID mapping
300  *
301  * PCP     TID
302  * 000      0
303  * 001      1
304  * 010      2
305  * 011      3
306  * 100      4
307  * 101      5
308  * 110      6
309  * 111      7
310  */
311 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
312 	0, 1, 2, 3, 4, 5, 6, 7,
313 };
314 
315 /**
316  * @brief Cpu to tx ring map
317  */
318 uint8_t
319 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
320 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
321 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
322 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
323 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
324 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
325 #ifdef WLAN_TX_PKT_CAPTURE_ENH
326 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
327 #endif
328 };
329 
330 qdf_export_symbol(dp_cpu_ring_map);
331 
332 /**
333  * @brief Select the type of statistics
334  */
335 enum dp_stats_type {
336 	STATS_FW = 0,
337 	STATS_HOST = 1,
338 	STATS_TYPE_MAX = 2,
339 };
340 
341 /**
342  * @brief General Firmware statistics options
343  *
344  */
345 enum dp_fw_stats {
346 	TXRX_FW_STATS_INVALID	= -1,
347 };
348 
349 /**
350  * dp_stats_mapping_table - Firmware and Host statistics
351  * currently supported
352  */
353 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
354 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
355 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
356 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
357 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
358 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
359 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
360 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
361 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
362 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
363 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
364 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
365 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
366 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
367 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
368 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
371 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
372 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
373 	/* Last ENUM for HTT FW STATS */
374 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
375 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
376 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
377 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
378 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
379 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
380 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
381 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
382 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
383 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
384 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
385 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
386 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
387 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
388 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
389 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
390 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
391 };
392 
393 /* MCL specific functions */
394 #if defined(DP_CON_MON)
395 /**
396  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
397  * @soc: pointer to dp_soc handle
398  * @intr_ctx_num: interrupt context number for which mon mask is needed
399  *
400  * For MCL, monitor mode rings are being processed in timer contexts (polled).
401  * This function is returning 0, since in interrupt mode(softirq based RX),
402  * we donot want to process monitor mode rings in a softirq.
403  *
404  * So, in case packet log is enabled for SAP/STA/P2P modes,
405  * regular interrupt processing will not process monitor mode rings. It would be
406  * done in a separate timer context.
407  *
408  * Return: 0
409  */
410 static inline
411 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
412 {
413 	return 0;
414 }
415 
416 /**
417  * dp_get_num_rx_contexts() - get number of RX contexts
418  * @soc_hdl: cdp opaque soc handle
419  *
420  * Return: number of RX contexts
421  */
422 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
423 {
424 	int i;
425 	int num_rx_contexts = 0;
426 
427 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
428 
429 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
430 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
431 			num_rx_contexts++;
432 
433 	return num_rx_contexts;
434 }
435 
436 #else
437 
438 /**
439  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
440  * @soc: pointer to dp_soc handle
441  * @intr_ctx_num: interrupt context number for which mon mask is needed
442  *
443  * Return: mon mask value
444  */
445 static inline
446 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
447 {
448 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
449 }
450 
451 /**
452  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
453  * @soc: pointer to dp_soc handle
454  *
455  * Return:
456  */
457 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
458 {
459 	int i;
460 
461 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
462 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
463 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
464 	}
465 }
466 
467 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
468 
469 /*
470  * dp_service_lmac_rings()- timer to reap lmac rings
471  * @arg: SoC Handle
472  *
473  * Return:
474  *
475  */
476 static void dp_service_lmac_rings(void *arg)
477 {
478 	struct dp_soc *soc = (struct dp_soc *)arg;
479 	int ring = 0, i;
480 	struct dp_pdev *pdev = NULL;
481 	union dp_rx_desc_list_elem_t *desc_list = NULL;
482 	union dp_rx_desc_list_elem_t *tail = NULL;
483 
484 	/* Process LMAC interrupts */
485 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
486 		int mac_for_pdev = ring;
487 		struct dp_srng *rx_refill_buf_ring;
488 
489 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
490 		if (!pdev)
491 			continue;
492 
493 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
494 
495 		dp_monitor_process(soc, NULL, mac_for_pdev,
496 				   QCA_NAPI_BUDGET);
497 
498 		for (i = 0;
499 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
500 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
501 					     mac_for_pdev,
502 					     QCA_NAPI_BUDGET);
503 
504 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
505 						  mac_for_pdev))
506 			dp_rx_buffers_replenish(soc, mac_for_pdev,
507 						rx_refill_buf_ring,
508 						&soc->rx_desc_buf[mac_for_pdev],
509 						0, &desc_list, &tail);
510 	}
511 
512 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
513 }
514 
515 #endif
516 
517 #ifdef FEATURE_MEC
518 void dp_peer_mec_flush_entries(struct dp_soc *soc)
519 {
520 	unsigned int index;
521 	struct dp_mec_entry *mecentry, *mecentry_next;
522 
523 	TAILQ_HEAD(, dp_mec_entry) free_list;
524 	TAILQ_INIT(&free_list);
525 
526 	if (!soc->mec_hash.mask)
527 		return;
528 
529 	if (!soc->mec_hash.bins)
530 		return;
531 
532 	if (!qdf_atomic_read(&soc->mec_cnt))
533 		return;
534 
535 	qdf_spin_lock_bh(&soc->mec_lock);
536 	for (index = 0; index <= soc->mec_hash.mask; index++) {
537 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
538 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
539 					   hash_list_elem, mecentry_next) {
540 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
541 			}
542 		}
543 	}
544 	qdf_spin_unlock_bh(&soc->mec_lock);
545 
546 	dp_peer_mec_free_list(soc, &free_list);
547 }
548 
549 /**
550  * dp_print_mec_entries() - Dump MEC entries in table
551  * @soc: Datapath soc handle
552  *
553  * Return: none
554  */
555 static void dp_print_mec_stats(struct dp_soc *soc)
556 {
557 	int i;
558 	uint32_t index;
559 	struct dp_mec_entry *mecentry = NULL, *mec_list;
560 	uint32_t num_entries = 0;
561 
562 	DP_PRINT_STATS("MEC Stats:");
563 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
564 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
565 
566 	if (!qdf_atomic_read(&soc->mec_cnt))
567 		return;
568 
569 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
570 	if (!mec_list) {
571 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
572 		return;
573 	}
574 
575 	DP_PRINT_STATS("MEC Table:");
576 	for (index = 0; index <= soc->mec_hash.mask; index++) {
577 		qdf_spin_lock_bh(&soc->mec_lock);
578 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
579 			qdf_spin_unlock_bh(&soc->mec_lock);
580 			continue;
581 		}
582 
583 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
584 			      hash_list_elem) {
585 			qdf_mem_copy(&mec_list[num_entries], mecentry,
586 				     sizeof(*mecentry));
587 			num_entries++;
588 		}
589 		qdf_spin_unlock_bh(&soc->mec_lock);
590 	}
591 
592 	if (!num_entries) {
593 		qdf_mem_free(mec_list);
594 		return;
595 	}
596 
597 	for (i = 0; i < num_entries; i++) {
598 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
599 			       " is_active = %d pdev_id = %d vdev_id = %d",
600 			       i,
601 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
602 			       mec_list[i].is_active,
603 			       mec_list[i].pdev_id,
604 			       mec_list[i].vdev_id);
605 	}
606 	qdf_mem_free(mec_list);
607 }
608 #else
609 static void dp_print_mec_stats(struct dp_soc *soc)
610 {
611 }
612 #endif
613 
614 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
615 				 uint8_t vdev_id,
616 				 uint8_t *peer_mac,
617 				 uint8_t *mac_addr,
618 				 enum cdp_txrx_ast_entry_type type,
619 				 uint32_t flags)
620 {
621 	int ret = -1;
622 	QDF_STATUS status = QDF_STATUS_SUCCESS;
623 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
624 						       peer_mac, 0, vdev_id,
625 						       DP_MOD_ID_CDP);
626 
627 	if (!peer) {
628 		dp_peer_debug("Peer is NULL!");
629 		return ret;
630 	}
631 
632 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
633 				 peer,
634 				 mac_addr,
635 				 type,
636 				 flags);
637 	if ((status == QDF_STATUS_SUCCESS) ||
638 	    (status == QDF_STATUS_E_ALREADY) ||
639 	    (status == QDF_STATUS_E_AGAIN))
640 		ret = 0;
641 
642 	dp_hmwds_ast_add_notify(peer, mac_addr,
643 				type, status, false);
644 
645 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
646 
647 	return ret;
648 }
649 
650 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
651 						uint8_t vdev_id,
652 						uint8_t *peer_mac,
653 						uint8_t *wds_macaddr,
654 						uint32_t flags)
655 {
656 	int status = -1;
657 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
658 	struct dp_ast_entry  *ast_entry = NULL;
659 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
660 						       peer_mac, 0, vdev_id,
661 						       DP_MOD_ID_CDP);
662 
663 	if (!peer) {
664 		dp_peer_debug("Peer is NULL!");
665 		return status;
666 	}
667 
668 	qdf_spin_lock_bh(&soc->ast_lock);
669 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
670 						    peer->vdev->pdev->pdev_id);
671 
672 	if (ast_entry) {
673 		status = dp_peer_update_ast(soc,
674 					    peer,
675 					    ast_entry, flags);
676 	}
677 	qdf_spin_unlock_bh(&soc->ast_lock);
678 
679 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
680 
681 	return status;
682 }
683 
684 /*
685  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
686  * @soc_handle:		Datapath SOC handle
687  * @peer:		DP peer
688  * @arg:		callback argument
689  *
690  * Return: None
691  */
692 static void
693 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
694 {
695 	struct dp_ast_entry *ast_entry = NULL;
696 	struct dp_ast_entry *tmp_ast_entry;
697 
698 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
699 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
700 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
701 			dp_peer_del_ast(soc, ast_entry);
702 	}
703 }
704 
705 /*
706  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
707  * @soc_handle:		Datapath SOC handle
708  * @wds_macaddr:	WDS entry MAC Address
709  * @peer_macaddr:	WDS entry MAC Address
710  * @vdev_id:		id of vdev handle
711  * Return: QDF_STATUS
712  */
713 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
714 					 uint8_t *wds_macaddr,
715 					 uint8_t *peer_mac_addr,
716 					 uint8_t vdev_id)
717 {
718 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
719 	struct dp_ast_entry *ast_entry = NULL;
720 	struct dp_peer *peer;
721 	struct dp_pdev *pdev;
722 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
723 						     DP_MOD_ID_CDP);
724 
725 	if (!vdev)
726 		return QDF_STATUS_E_FAILURE;
727 
728 	pdev = vdev->pdev;
729 
730 	if (peer_mac_addr) {
731 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
732 					      0, vdev->vdev_id,
733 					      DP_MOD_ID_CDP);
734 		if (!peer) {
735 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
736 			return QDF_STATUS_E_FAILURE;
737 		}
738 
739 		qdf_spin_lock_bh(&soc->ast_lock);
740 		dp_peer_reset_ast_entries(soc, peer, NULL);
741 		qdf_spin_unlock_bh(&soc->ast_lock);
742 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
743 	} else if (wds_macaddr) {
744 		qdf_spin_lock_bh(&soc->ast_lock);
745 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
746 							    pdev->pdev_id);
747 
748 		if (ast_entry) {
749 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
750 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
751 				dp_peer_del_ast(soc, ast_entry);
752 		}
753 		qdf_spin_unlock_bh(&soc->ast_lock);
754 	}
755 
756 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
757 	return QDF_STATUS_SUCCESS;
758 }
759 
760 /*
761  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
762  * @soc:		Datapath SOC handle
763  * @vdev_id:		id of vdev object
764  *
765  * Return: QDF_STATUS
766  */
767 static QDF_STATUS
768 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
769 			     uint8_t vdev_id)
770 {
771 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
772 
773 	qdf_spin_lock_bh(&soc->ast_lock);
774 
775 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
776 			    DP_MOD_ID_CDP);
777 	qdf_spin_unlock_bh(&soc->ast_lock);
778 
779 	return QDF_STATUS_SUCCESS;
780 }
781 
782 /*
783  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
784  * @soc:		Datapath SOC
785  * @peer:		Datapath peer
786  * @arg:		arg to callback
787  *
788  * Return: None
789  */
790 static void
791 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
792 {
793 	struct dp_ast_entry *ase = NULL;
794 	struct dp_ast_entry *temp_ase;
795 
796 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
797 		if ((ase->type ==
798 			CDP_TXRX_AST_TYPE_STATIC) ||
799 			(ase->type ==
800 			 CDP_TXRX_AST_TYPE_SELF) ||
801 			(ase->type ==
802 			 CDP_TXRX_AST_TYPE_STA_BSS))
803 			continue;
804 		dp_peer_del_ast(soc, ase);
805 	}
806 }
807 
808 /*
809  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
810  * @soc:		Datapath SOC handle
811  *
812  * Return: None
813  */
814 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
815 {
816 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
817 
818 	qdf_spin_lock_bh(&soc->ast_lock);
819 
820 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
821 			    DP_MOD_ID_CDP);
822 
823 	qdf_spin_unlock_bh(&soc->ast_lock);
824 	dp_peer_mec_flush_entries(soc);
825 }
826 
827 /**
828  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
829  *                                       and return ast entry information
830  *                                       of first ast entry found in the
831  *                                       table with given mac address
832  *
833  * @soc : data path soc handle
834  * @ast_mac_addr : AST entry mac address
835  * @ast_entry_info : ast entry information
836  *
837  * return : true if ast entry found with ast_mac_addr
838  *          false if ast entry not found
839  */
840 static bool dp_peer_get_ast_info_by_soc_wifi3
841 	(struct cdp_soc_t *soc_hdl,
842 	 uint8_t *ast_mac_addr,
843 	 struct cdp_ast_entry_info *ast_entry_info)
844 {
845 	struct dp_ast_entry *ast_entry = NULL;
846 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
847 	struct dp_peer *peer = NULL;
848 
849 	qdf_spin_lock_bh(&soc->ast_lock);
850 
851 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
852 	if ((!ast_entry) ||
853 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
854 		qdf_spin_unlock_bh(&soc->ast_lock);
855 		return false;
856 	}
857 
858 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
859 				     DP_MOD_ID_AST);
860 	if (!peer) {
861 		qdf_spin_unlock_bh(&soc->ast_lock);
862 		return false;
863 	}
864 
865 	ast_entry_info->type = ast_entry->type;
866 	ast_entry_info->pdev_id = ast_entry->pdev_id;
867 	ast_entry_info->vdev_id = ast_entry->vdev_id;
868 	ast_entry_info->peer_id = ast_entry->peer_id;
869 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
870 		     &peer->mac_addr.raw[0],
871 		     QDF_MAC_ADDR_SIZE);
872 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
873 	qdf_spin_unlock_bh(&soc->ast_lock);
874 	return true;
875 }
876 
877 /**
878  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
879  *                                          and return ast entry information
880  *                                          if mac address and pdev_id matches
881  *
882  * @soc : data path soc handle
883  * @ast_mac_addr : AST entry mac address
884  * @pdev_id : pdev_id
885  * @ast_entry_info : ast entry information
886  *
887  * return : true if ast entry found with ast_mac_addr
888  *          false if ast entry not found
889  */
890 static bool dp_peer_get_ast_info_by_pdevid_wifi3
891 		(struct cdp_soc_t *soc_hdl,
892 		 uint8_t *ast_mac_addr,
893 		 uint8_t pdev_id,
894 		 struct cdp_ast_entry_info *ast_entry_info)
895 {
896 	struct dp_ast_entry *ast_entry;
897 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
898 	struct dp_peer *peer = NULL;
899 
900 	qdf_spin_lock_bh(&soc->ast_lock);
901 
902 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
903 						    pdev_id);
904 
905 	if ((!ast_entry) ||
906 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
907 		qdf_spin_unlock_bh(&soc->ast_lock);
908 		return false;
909 	}
910 
911 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
912 				     DP_MOD_ID_AST);
913 	if (!peer) {
914 		qdf_spin_unlock_bh(&soc->ast_lock);
915 		return false;
916 	}
917 
918 	ast_entry_info->type = ast_entry->type;
919 	ast_entry_info->pdev_id = ast_entry->pdev_id;
920 	ast_entry_info->vdev_id = ast_entry->vdev_id;
921 	ast_entry_info->peer_id = ast_entry->peer_id;
922 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
923 		     &peer->mac_addr.raw[0],
924 		     QDF_MAC_ADDR_SIZE);
925 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
926 	qdf_spin_unlock_bh(&soc->ast_lock);
927 	return true;
928 }
929 
930 /**
931  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
932  *                            with given mac address
933  *
934  * @soc : data path soc handle
935  * @ast_mac_addr : AST entry mac address
936  * @callback : callback function to called on ast delete response from FW
937  * @cookie : argument to be passed to callback
938  *
939  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
940  *          is sent
941  *          QDF_STATUS_E_INVAL false if ast entry not found
942  */
943 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
944 					       uint8_t *mac_addr,
945 					       txrx_ast_free_cb callback,
946 					       void *cookie)
947 
948 {
949 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
950 	struct dp_ast_entry *ast_entry = NULL;
951 	txrx_ast_free_cb cb = NULL;
952 	void *arg = NULL;
953 
954 	qdf_spin_lock_bh(&soc->ast_lock);
955 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
956 	if (!ast_entry) {
957 		qdf_spin_unlock_bh(&soc->ast_lock);
958 		return -QDF_STATUS_E_INVAL;
959 	}
960 
961 	if (ast_entry->callback) {
962 		cb = ast_entry->callback;
963 		arg = ast_entry->cookie;
964 	}
965 
966 	ast_entry->callback = callback;
967 	ast_entry->cookie = cookie;
968 
969 	/*
970 	 * if delete_in_progress is set AST delete is sent to target
971 	 * and host is waiting for response should not send delete
972 	 * again
973 	 */
974 	if (!ast_entry->delete_in_progress)
975 		dp_peer_del_ast(soc, ast_entry);
976 
977 	qdf_spin_unlock_bh(&soc->ast_lock);
978 	if (cb) {
979 		cb(soc->ctrl_psoc,
980 		   dp_soc_to_cdp_soc(soc),
981 		   arg,
982 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
983 	}
984 	return QDF_STATUS_SUCCESS;
985 }
986 
987 /**
988  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
989  *                                   table if mac address and pdev_id matches
990  *
991  * @soc : data path soc handle
992  * @ast_mac_addr : AST entry mac address
993  * @pdev_id : pdev id
994  * @callback : callback function to called on ast delete response from FW
995  * @cookie : argument to be passed to callback
996  *
997  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
998  *          is sent
999  *          QDF_STATUS_E_INVAL false if ast entry not found
1000  */
1001 
1002 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1003 						uint8_t *mac_addr,
1004 						uint8_t pdev_id,
1005 						txrx_ast_free_cb callback,
1006 						void *cookie)
1007 
1008 {
1009 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1010 	struct dp_ast_entry *ast_entry;
1011 	txrx_ast_free_cb cb = NULL;
1012 	void *arg = NULL;
1013 
1014 	qdf_spin_lock_bh(&soc->ast_lock);
1015 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1016 
1017 	if (!ast_entry) {
1018 		qdf_spin_unlock_bh(&soc->ast_lock);
1019 		return -QDF_STATUS_E_INVAL;
1020 	}
1021 
1022 	if (ast_entry->callback) {
1023 		cb = ast_entry->callback;
1024 		arg = ast_entry->cookie;
1025 	}
1026 
1027 	ast_entry->callback = callback;
1028 	ast_entry->cookie = cookie;
1029 
1030 	/*
1031 	 * if delete_in_progress is set AST delete is sent to target
1032 	 * and host is waiting for response should not sent delete
1033 	 * again
1034 	 */
1035 	if (!ast_entry->delete_in_progress)
1036 		dp_peer_del_ast(soc, ast_entry);
1037 
1038 	qdf_spin_unlock_bh(&soc->ast_lock);
1039 
1040 	if (cb) {
1041 		cb(soc->ctrl_psoc,
1042 		   dp_soc_to_cdp_soc(soc),
1043 		   arg,
1044 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1045 	}
1046 	return QDF_STATUS_SUCCESS;
1047 }
1048 
1049 /**
1050  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1051  * @ring_num: ring num of the ring being queried
1052  * @grp_mask: the grp_mask array for the ring type in question.
1053  *
1054  * The grp_mask array is indexed by group number and the bit fields correspond
1055  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1056  *
1057  * Return: the index in the grp_mask array with the ring number.
1058  * -QDF_STATUS_E_NOENT if no entry is found
1059  */
1060 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1061 {
1062 	int ext_group_num;
1063 	uint8_t mask = 1 << ring_num;
1064 
1065 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1066 	     ext_group_num++) {
1067 		if (mask & grp_mask[ext_group_num])
1068 			return ext_group_num;
1069 	}
1070 
1071 	return -QDF_STATUS_E_NOENT;
1072 }
1073 
1074 /**
1075  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1076  * @msi_group_number: MSI group number.
1077  * @msi_data_count: MSI data count.
1078  *
1079  * Return: true if msi_group_number is invalid.
1080  */
1081 #ifdef WLAN_ONE_MSI_VECTOR
1082 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1083 					   int msi_data_count)
1084 {
1085 	return false;
1086 }
1087 #else
1088 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1089 					   int msi_data_count)
1090 {
1091 	return msi_group_number > msi_data_count;
1092 }
1093 #endif
1094 
1095 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1096 /**
1097  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1098  *				rx_near_full_grp1 mask
1099  * @soc: Datapath SoC Handle
1100  * @ring_num: REO ring number
1101  *
1102  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1103  *	   0, otherwise.
1104  */
1105 static inline int
1106 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1107 {
1108 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1109 }
1110 
1111 /**
1112  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1113  *				rx_near_full_grp2 mask
1114  * @soc: Datapath SoC Handle
1115  * @ring_num: REO ring number
1116  *
1117  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1118  *	   0, otherwise.
1119  */
1120 static inline int
1121 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1122 {
1123 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1124 }
1125 
1126 /**
1127  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1128  *				ring type and number
1129  * @soc: Datapath SoC handle
1130  * @ring_type: SRNG type
1131  * @ring_num: ring num
1132  *
1133  * Return: near ful irq mask pointer
1134  */
1135 static inline
1136 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1137 					enum hal_ring_type ring_type,
1138 					int ring_num)
1139 {
1140 	uint8_t *nf_irq_mask = NULL;
1141 
1142 	switch (ring_type) {
1143 	case WBM2SW_RELEASE:
1144 		if (ring_num != WBM2SW_REL_ERR_RING_NUM) {
1145 			nf_irq_mask = &soc->wlan_cfg_ctx->
1146 					int_tx_ring_near_full_irq_mask[0];
1147 		}
1148 		break;
1149 	case REO_DST:
1150 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1151 			nf_irq_mask =
1152 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1153 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1154 			nf_irq_mask =
1155 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1156 		else
1157 			qdf_assert(0);
1158 		break;
1159 	default:
1160 		break;
1161 	}
1162 
1163 	return nf_irq_mask;
1164 }
1165 
1166 /**
1167  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1168  * @soc: Datapath SoC handle
1169  * @ring_params: srng params handle
1170  * @msi2_addr: MSI2 addr to be set for the SRNG
1171  * @msi2_data: MSI2 data to be set for the SRNG
1172  *
1173  * Return: None
1174  */
1175 static inline
1176 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1177 				  struct hal_srng_params *ring_params,
1178 				  qdf_dma_addr_t msi2_addr,
1179 				  uint32_t msi2_data)
1180 {
1181 	ring_params->msi2_addr = msi2_addr;
1182 	ring_params->msi2_data = msi2_data;
1183 }
1184 
1185 /**
1186  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1187  * @soc: Datapath SoC handle
1188  * @ring_params: ring_params for SRNG
1189  * @ring_type: SENG type
1190  * @ring_num: ring number for the SRNG
1191  * @nf_msi_grp_num: near full msi group number
1192  *
1193  * Return: None
1194  */
1195 static inline void
1196 dp_srng_msi2_setup(struct dp_soc *soc,
1197 		   struct hal_srng_params *ring_params,
1198 		   int ring_type, int ring_num, int nf_msi_grp_num)
1199 {
1200 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1201 	int msi_data_count, ret;
1202 
1203 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1204 					  &msi_data_count, &msi_data_start,
1205 					  &msi_irq_start);
1206 	if (ret)
1207 		return;
1208 
1209 	if (nf_msi_grp_num < 0) {
1210 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1211 			     soc, ring_type, ring_num);
1212 		ring_params->msi2_addr = 0;
1213 		ring_params->msi2_data = 0;
1214 		return;
1215 	}
1216 
1217 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1218 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1219 			     soc, nf_msi_grp_num);
1220 		QDF_ASSERT(0);
1221 	}
1222 
1223 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1224 
1225 	ring_params->nf_irq_support = 1;
1226 	ring_params->msi2_addr = addr_low;
1227 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1228 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1229 		+ msi_data_start;
1230 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1231 }
1232 
1233 /* Percentage of ring entries considered as nearly full */
1234 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1235 /* Percentage of ring entries considered as critically full */
1236 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1237 /* Percentage of ring entries considered as safe threshold */
1238 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1239 
1240 /**
1241  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1242  *			near full irq
1243  * @soc: Datapath SoC handle
1244  * @ring_params: ring params for SRNG
1245  * @ring_type: ring type
1246  */
1247 static inline void
1248 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1249 					  struct hal_srng_params *ring_params,
1250 					  int ring_type)
1251 {
1252 	if (ring_params->nf_irq_support) {
1253 		ring_params->high_thresh = (ring_params->num_entries *
1254 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1255 		ring_params->crit_thresh = (ring_params->num_entries *
1256 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1257 		ring_params->safe_thresh = (ring_params->num_entries *
1258 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1259 	}
1260 }
1261 
1262 /**
1263  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1264  *			structure from the ring params
1265  * @soc: Datapath SoC handle
1266  * @srng: SRNG handle
1267  * @ring_params: ring params for a SRNG
1268  *
1269  * Return: None
1270  */
1271 static inline void
1272 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1273 			  struct hal_srng_params *ring_params)
1274 {
1275 	srng->crit_thresh = ring_params->crit_thresh;
1276 	srng->safe_thresh = ring_params->safe_thresh;
1277 }
1278 
1279 #else
1280 static inline
1281 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1282 					enum hal_ring_type ring_type,
1283 					int ring_num)
1284 {
1285 	return NULL;
1286 }
1287 
1288 static inline
1289 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1290 				  struct hal_srng_params *ring_params,
1291 				  qdf_dma_addr_t msi2_addr,
1292 				  uint32_t msi2_data)
1293 {
1294 }
1295 
1296 static inline void
1297 dp_srng_msi2_setup(struct dp_soc *soc,
1298 		   struct hal_srng_params *ring_params,
1299 		   int ring_type, int ring_num, int nf_msi_grp_num)
1300 {
1301 }
1302 
1303 static inline void
1304 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1305 					  struct hal_srng_params *ring_params,
1306 					  int ring_type)
1307 {
1308 }
1309 
1310 static inline void
1311 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1312 			  struct hal_srng_params *ring_params)
1313 {
1314 }
1315 #endif
1316 
1317 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1318 				       enum hal_ring_type ring_type,
1319 				       int ring_num,
1320 				       int *reg_msi_grp_num,
1321 				       bool nf_irq_support,
1322 				       int *nf_msi_grp_num)
1323 {
1324 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1325 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1326 	bool nf_irq_enabled = false;
1327 
1328 	switch (ring_type) {
1329 	case WBM2SW_RELEASE:
1330 		if (ring_num == WBM2SW_REL_ERR_RING_NUM) {
1331 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1332 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1333 			ring_num = 0;
1334 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1335 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1336 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1337 								     ring_type,
1338 								     ring_num);
1339 			if (nf_irq_mask)
1340 				nf_irq_enabled = true;
1341 		}
1342 	break;
1343 
1344 	case REO_EXCEPTION:
1345 		/* dp_rx_err_process - &soc->reo_exception_ring */
1346 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1347 	break;
1348 
1349 	case REO_DST:
1350 		/* dp_rx_process - soc->reo_dest_ring */
1351 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1352 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1353 							     ring_num);
1354 		if (nf_irq_mask)
1355 			nf_irq_enabled = true;
1356 	break;
1357 
1358 	case REO_STATUS:
1359 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1360 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1361 	break;
1362 
1363 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1364 	case RXDMA_MONITOR_STATUS:
1365 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1366 	case RXDMA_MONITOR_DST:
1367 		/* dp_mon_process */
1368 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1369 	break;
1370 	case RXDMA_DST:
1371 		/* dp_rxdma_err_process */
1372 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1373 	break;
1374 
1375 	case RXDMA_BUF:
1376 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1377 	break;
1378 
1379 	case RXDMA_MONITOR_BUF:
1380 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1381 	break;
1382 
1383 	case TCL_DATA:
1384 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1385 	case TCL_CMD_CREDIT:
1386 	case REO_CMD:
1387 	case SW2WBM_RELEASE:
1388 	case WBM_IDLE_LINK:
1389 		/* normally empty SW_TO_HW rings */
1390 		return -QDF_STATUS_E_NOENT;
1391 	break;
1392 
1393 	case TCL_STATUS:
1394 	case REO_REINJECT:
1395 		/* misc unused rings */
1396 		return -QDF_STATUS_E_NOENT;
1397 	break;
1398 
1399 	case CE_SRC:
1400 	case CE_DST:
1401 	case CE_DST_STATUS:
1402 		/* CE_rings - currently handled by hif */
1403 	default:
1404 		return -QDF_STATUS_E_NOENT;
1405 	break;
1406 	}
1407 
1408 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1409 
1410 	if (nf_irq_support && nf_irq_enabled) {
1411 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1412 							    nf_irq_mask);
1413 	}
1414 
1415 	return QDF_STATUS_SUCCESS;
1416 }
1417 
1418 /*
1419  * dp_get_num_msi_available()- API to get number of MSIs available
1420  * @dp_soc: DP soc Handle
1421  * @interrupt_mode: Mode of interrupts
1422  *
1423  * Return: Number of MSIs available or 0 in case of integrated
1424  */
1425 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1426 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1427 {
1428 	return 0;
1429 }
1430 #else
1431 /*
1432  * dp_get_num_msi_available()- API to get number of MSIs available
1433  * @dp_soc: DP soc Handle
1434  * @interrupt_mode: Mode of interrupts
1435  *
1436  * Return: Number of MSIs available or 0 in case of integrated
1437  */
1438 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1439 {
1440 	int msi_data_count;
1441 	int msi_data_start;
1442 	int msi_irq_start;
1443 	int ret;
1444 
1445 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1446 		return 0;
1447 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1448 		   DP_INTR_POLL) {
1449 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1450 						  &msi_data_count,
1451 						  &msi_data_start,
1452 						  &msi_irq_start);
1453 		if (ret) {
1454 			qdf_err("Unable to get DP MSI assignment %d",
1455 				interrupt_mode);
1456 			return -EINVAL;
1457 		}
1458 		return msi_data_count;
1459 	}
1460 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1461 	return -EINVAL;
1462 }
1463 #endif
1464 
1465 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1466 			      *ring_params, int ring_type, int ring_num)
1467 {
1468 	int reg_msi_grp_num;
1469 	/*
1470 	 * nf_msi_grp_num needs to be initialized with negative value,
1471 	 * to avoid configuring near-full msi for WBM2SW3 ring
1472 	 */
1473 	int nf_msi_grp_num = -1;
1474 	int msi_data_count;
1475 	int ret;
1476 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1477 	bool nf_irq_support;
1478 
1479 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1480 					    &msi_data_count, &msi_data_start,
1481 					    &msi_irq_start);
1482 
1483 	if (ret)
1484 		return;
1485 
1486 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1487 							     ring_type,
1488 							     ring_num);
1489 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1490 					  &reg_msi_grp_num,
1491 					  nf_irq_support,
1492 					  &nf_msi_grp_num);
1493 	if (ret < 0) {
1494 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1495 			     soc, ring_type, ring_num);
1496 		ring_params->msi_addr = 0;
1497 		ring_params->msi_data = 0;
1498 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1499 		return;
1500 	}
1501 
1502 	if (reg_msi_grp_num < 0) {
1503 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1504 			     soc, ring_type, ring_num);
1505 		ring_params->msi_addr = 0;
1506 		ring_params->msi_data = 0;
1507 		goto configure_msi2;
1508 	}
1509 
1510 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1511 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1512 			     soc, reg_msi_grp_num);
1513 		QDF_ASSERT(0);
1514 	}
1515 
1516 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1517 
1518 	ring_params->msi_addr = addr_low;
1519 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1520 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1521 		+ msi_data_start;
1522 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1523 
1524 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1525 		 ring_type, ring_num, ring_params->msi_data,
1526 		 (uint64_t)ring_params->msi_addr);
1527 
1528 configure_msi2:
1529 	if (!nf_irq_support) {
1530 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1531 		return;
1532 	}
1533 
1534 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1535 			   nf_msi_grp_num);
1536 }
1537 
1538 #ifdef FEATURE_AST
1539 /**
1540  * dp_print_peer_ast_entries() - Dump AST entries of peer
1541  * @soc: Datapath soc handle
1542  * @peer: Datapath peer
1543  * @arg: argument to iterate function
1544  *
1545  * return void
1546  */
1547 static void
1548 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1549 {
1550 	struct dp_ast_entry *ase, *tmp_ase;
1551 	uint32_t num_entries = 0;
1552 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1553 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1554 			"DA", "HMWDS_SEC"};
1555 
1556 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1557 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1558 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1559 		    " peer_id = %u"
1560 		    " type = %s"
1561 		    " next_hop = %d"
1562 		    " is_active = %d"
1563 		    " ast_idx = %d"
1564 		    " ast_hash = %d"
1565 		    " delete_in_progress = %d"
1566 		    " pdev_id = %d"
1567 		    " vdev_id = %d",
1568 		    ++num_entries,
1569 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1570 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1571 		    ase->peer_id,
1572 		    type[ase->type],
1573 		    ase->next_hop,
1574 		    ase->is_active,
1575 		    ase->ast_idx,
1576 		    ase->ast_hash_value,
1577 		    ase->delete_in_progress,
1578 		    ase->pdev_id,
1579 		    ase->vdev_id);
1580 	}
1581 }
1582 
1583 /**
1584  * dp_print_ast_stats() - Dump AST table contents
1585  * @soc: Datapath soc handle
1586  *
1587  * return void
1588  */
1589 void dp_print_ast_stats(struct dp_soc *soc)
1590 {
1591 	DP_PRINT_STATS("AST Stats:");
1592 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1593 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1594 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1595 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1596 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1597 		       soc->stats.ast.ast_mismatch);
1598 
1599 	DP_PRINT_STATS("AST Table:");
1600 
1601 	qdf_spin_lock_bh(&soc->ast_lock);
1602 
1603 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1604 			    DP_MOD_ID_GENERIC_STATS);
1605 
1606 	qdf_spin_unlock_bh(&soc->ast_lock);
1607 }
1608 #else
1609 void dp_print_ast_stats(struct dp_soc *soc)
1610 {
1611 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1612 	return;
1613 }
1614 #endif
1615 
1616 /**
1617  * dp_print_peer_info() - Dump peer info
1618  * @soc: Datapath soc handle
1619  * @peer: Datapath peer handle
1620  * @arg: argument to iter function
1621  *
1622  * return void
1623  */
1624 static void
1625 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1626 {
1627 	DP_PRINT_STATS("    peer_mac_addr = "QDF_MAC_ADDR_FMT
1628 		       " nawds_enabled = %d"
1629 		       " bss_peer = %d"
1630 		       " wds_enabled = %d"
1631 		       " tx_cap_enabled = %d"
1632 		       " rx_cap_enabled = %d"
1633 		       " peer id = %d",
1634 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1635 		       peer->nawds_enabled,
1636 		       peer->bss_peer,
1637 		       peer->wds_enabled,
1638 		       peer->tx_cap_enabled,
1639 		       peer->rx_cap_enabled,
1640 		       peer->peer_id);
1641 }
1642 
1643 /**
1644  * dp_print_peer_table() - Dump all Peer stats
1645  * @vdev: Datapath Vdev handle
1646  *
1647  * return void
1648  */
1649 static void dp_print_peer_table(struct dp_vdev *vdev)
1650 {
1651 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1652 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1653 			     DP_MOD_ID_GENERIC_STATS);
1654 }
1655 
1656 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1657 /**
1658  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1659  * threshold values from the wlan_srng_cfg table for each ring type
1660  * @soc: device handle
1661  * @ring_params: per ring specific parameters
1662  * @ring_type: Ring type
1663  * @ring_num: Ring number for a given ring type
1664  *
1665  * Fill the ring params with the interrupt threshold
1666  * configuration parameters available in the per ring type wlan_srng_cfg
1667  * table.
1668  *
1669  * Return: None
1670  */
1671 static void
1672 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1673 				       struct hal_srng_params *ring_params,
1674 				       int ring_type, int ring_num,
1675 				       int num_entries)
1676 {
1677 	if (ring_type == REO_DST) {
1678 		ring_params->intr_timer_thres_us =
1679 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1680 		ring_params->intr_batch_cntr_thres_entries =
1681 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1682 	} else if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1683 		ring_params->intr_timer_thres_us =
1684 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1685 		ring_params->intr_batch_cntr_thres_entries =
1686 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1687 	} else {
1688 		ring_params->intr_timer_thres_us =
1689 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1690 		ring_params->intr_batch_cntr_thres_entries =
1691 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1692 	}
1693 	ring_params->low_threshold =
1694 			soc->wlan_srng_cfg[ring_type].low_threshold;
1695 	if (ring_params->low_threshold)
1696 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1697 
1698 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1699 }
1700 #else
1701 static void
1702 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1703 				       struct hal_srng_params *ring_params,
1704 				       int ring_type, int ring_num,
1705 				       int num_entries)
1706 {
1707 	if (ring_type == REO_DST) {
1708 		ring_params->intr_timer_thres_us =
1709 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1710 		ring_params->intr_batch_cntr_thres_entries =
1711 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1712 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1713 		ring_params->intr_timer_thres_us =
1714 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1715 		ring_params->intr_batch_cntr_thres_entries =
1716 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1717 	} else {
1718 		ring_params->intr_timer_thres_us =
1719 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1720 		ring_params->intr_batch_cntr_thres_entries =
1721 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1722 	}
1723 
1724 	/* Enable low threshold interrupts for rx buffer rings (regular and
1725 	 * monitor buffer rings.
1726 	 * TODO: See if this is required for any other ring
1727 	 */
1728 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1729 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1730 		/* TODO: Setting low threshold to 1/8th of ring size
1731 		 * see if this needs to be configurable
1732 		 */
1733 		ring_params->low_threshold = num_entries >> 3;
1734 		ring_params->intr_timer_thres_us =
1735 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1736 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1737 		ring_params->intr_batch_cntr_thres_entries = 0;
1738 	}
1739 
1740 	/* During initialisation monitor rings are only filled with
1741 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1742 	 * a value less than that. Low threshold value is reconfigured again
1743 	 * to 1/8th of the ring size when monitor vap is created.
1744 	 */
1745 	if (ring_type == RXDMA_MONITOR_BUF)
1746 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1747 
1748 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1749 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1750 	 * Keep batch threshold as 8 so that interrupt is received for
1751 	 * every 4 packets in MONITOR_STATUS ring
1752 	 */
1753 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1754 	    (soc->intr_mode == DP_INTR_MSI))
1755 		ring_params->intr_batch_cntr_thres_entries = 4;
1756 }
1757 #endif
1758 
1759 #ifdef DP_MEM_PRE_ALLOC
1760 
1761 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1762 			   size_t ctxt_size)
1763 {
1764 	void *ctxt_mem;
1765 
1766 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1767 		dp_warn("dp_prealloc_get_context null!");
1768 		goto dynamic_alloc;
1769 	}
1770 
1771 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type);
1772 
1773 	if (ctxt_mem)
1774 		goto end;
1775 
1776 dynamic_alloc:
1777 	dp_info("Pre-alloc of ctxt failed. Dynamic allocation");
1778 	ctxt_mem = qdf_mem_malloc(ctxt_size);
1779 end:
1780 	return ctxt_mem;
1781 }
1782 
1783 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1784 			 void *vaddr)
1785 {
1786 	QDF_STATUS status;
1787 
1788 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
1789 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
1790 								ctxt_type,
1791 								vaddr);
1792 	} else {
1793 		dp_warn("dp_prealloc_get_context null!");
1794 		status = QDF_STATUS_E_NOSUPPORT;
1795 	}
1796 
1797 	if (QDF_IS_STATUS_ERROR(status)) {
1798 		dp_info("Context not pre-allocated");
1799 		qdf_mem_free(vaddr);
1800 	}
1801 }
1802 
1803 static inline
1804 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1805 					   struct dp_srng *srng,
1806 					   uint32_t ring_type)
1807 {
1808 	void *mem;
1809 
1810 	qdf_assert(!srng->is_mem_prealloc);
1811 
1812 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
1813 		dp_warn("dp_prealloc_get_consistent is null!");
1814 		goto qdf;
1815 	}
1816 
1817 	mem =
1818 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
1819 						(&srng->alloc_size,
1820 						 &srng->base_vaddr_unaligned,
1821 						 &srng->base_paddr_unaligned,
1822 						 &srng->base_paddr_aligned,
1823 						 DP_RING_BASE_ALIGN, ring_type);
1824 
1825 	if (mem) {
1826 		srng->is_mem_prealloc = true;
1827 		goto end;
1828 	}
1829 qdf:
1830 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1831 						&srng->base_vaddr_unaligned,
1832 						&srng->base_paddr_unaligned,
1833 						&srng->base_paddr_aligned,
1834 						DP_RING_BASE_ALIGN);
1835 end:
1836 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
1837 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
1838 		srng, ring_type, srng->alloc_size, srng->num_entries);
1839 	return mem;
1840 }
1841 
1842 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1843 					       struct dp_srng *srng)
1844 {
1845 	if (srng->is_mem_prealloc) {
1846 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
1847 			dp_warn("dp_prealloc_put_consistent is null!");
1848 			QDF_BUG(0);
1849 			return;
1850 		}
1851 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
1852 						(srng->alloc_size,
1853 						 srng->base_vaddr_unaligned,
1854 						 srng->base_paddr_unaligned);
1855 
1856 	} else {
1857 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1858 					srng->alloc_size,
1859 					srng->base_vaddr_unaligned,
1860 					srng->base_paddr_unaligned, 0);
1861 	}
1862 }
1863 
1864 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
1865 				   enum dp_desc_type desc_type,
1866 				   struct qdf_mem_multi_page_t *pages,
1867 				   size_t element_size,
1868 				   uint16_t element_num,
1869 				   qdf_dma_context_t memctxt,
1870 				   bool cacheable)
1871 {
1872 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
1873 		dp_warn("dp_get_multi_pages is null!");
1874 		goto qdf;
1875 	}
1876 
1877 	pages->num_pages = 0;
1878 	pages->is_mem_prealloc = 0;
1879 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
1880 						element_size,
1881 						element_num,
1882 						pages,
1883 						cacheable);
1884 	if (pages->num_pages)
1885 		goto end;
1886 
1887 qdf:
1888 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
1889 				  element_num, memctxt, cacheable);
1890 end:
1891 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
1892 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
1893 		desc_type, (int)element_size, element_num, cacheable);
1894 }
1895 
1896 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
1897 				  enum dp_desc_type desc_type,
1898 				  struct qdf_mem_multi_page_t *pages,
1899 				  qdf_dma_context_t memctxt,
1900 				  bool cacheable)
1901 {
1902 	if (pages->is_mem_prealloc) {
1903 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
1904 			dp_warn("dp_put_multi_pages is null!");
1905 			QDF_BUG(0);
1906 			return;
1907 		}
1908 
1909 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
1910 		qdf_mem_zero(pages, sizeof(*pages));
1911 	} else {
1912 		qdf_mem_multi_pages_free(soc->osdev, pages,
1913 					 memctxt, cacheable);
1914 	}
1915 }
1916 
1917 #else
1918 
1919 static inline
1920 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1921 					   struct dp_srng *srng,
1922 					   uint32_t ring_type)
1923 
1924 {
1925 	return qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1926 						&srng->base_vaddr_unaligned,
1927 						&srng->base_paddr_unaligned,
1928 						&srng->base_paddr_aligned,
1929 						DP_RING_BASE_ALIGN);
1930 }
1931 
1932 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1933 					       struct dp_srng *srng)
1934 {
1935 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1936 				srng->alloc_size,
1937 				srng->base_vaddr_unaligned,
1938 				srng->base_paddr_unaligned, 0);
1939 }
1940 
1941 #endif /* DP_MEM_PRE_ALLOC */
1942 
1943 /*
1944  * dp_srng_free() - Free SRNG memory
1945  * @soc  : Data path soc handle
1946  * @srng : SRNG pointer
1947  *
1948  * return: None
1949  */
1950 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
1951 {
1952 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1953 		if (!srng->cached) {
1954 			dp_srng_mem_free_consistent(soc, srng);
1955 		} else {
1956 			qdf_mem_free(srng->base_vaddr_unaligned);
1957 		}
1958 		srng->alloc_size = 0;
1959 		srng->base_vaddr_unaligned = NULL;
1960 	}
1961 	srng->hal_srng = NULL;
1962 }
1963 
1964 qdf_export_symbol(dp_srng_free);
1965 
1966 #ifdef DISABLE_MON_RING_MSI_CFG
1967 /*
1968  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
1969  * @ring_type: sring type
1970  *
1971  * Return: True if msi cfg should be skipped for srng type else false
1972  */
1973 static inline bool dp_skip_msi_cfg(int ring_type)
1974 {
1975 	if (ring_type == RXDMA_MONITOR_STATUS)
1976 		return true;
1977 
1978 	return false;
1979 }
1980 #else
1981 static inline bool dp_skip_msi_cfg(int ring_type)
1982 {
1983 	return false;
1984 }
1985 #endif
1986 
1987 /*
1988  * dp_srng_init() - Initialize SRNG
1989  * @soc  : Data path soc handle
1990  * @srng : SRNG pointer
1991  * @ring_type : Ring Type
1992  * @ring_num: Ring number
1993  * @mac_id: mac_id
1994  *
1995  * return: QDF_STATUS
1996  */
1997 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
1998 			int ring_type, int ring_num, int mac_id)
1999 {
2000 	hal_soc_handle_t hal_soc = soc->hal_soc;
2001 	struct hal_srng_params ring_params;
2002 
2003 	if (srng->hal_srng) {
2004 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2005 			    soc, ring_type, ring_num);
2006 		return QDF_STATUS_SUCCESS;
2007 	}
2008 
2009 	/* memset the srng ring to zero */
2010 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2011 
2012 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2013 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2014 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2015 
2016 	ring_params.num_entries = srng->num_entries;
2017 
2018 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2019 		ring_type, ring_num,
2020 		(void *)ring_params.ring_base_vaddr,
2021 		(void *)ring_params.ring_base_paddr,
2022 		ring_params.num_entries);
2023 
2024 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(ring_type)) {
2025 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2026 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2027 				 ring_type, ring_num);
2028 	} else {
2029 		ring_params.msi_data = 0;
2030 		ring_params.msi_addr = 0;
2031 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2032 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2033 				 ring_type, ring_num);
2034 	}
2035 
2036 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2037 					       ring_type, ring_num,
2038 					       srng->num_entries);
2039 
2040 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2041 
2042 	if (srng->cached)
2043 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2044 
2045 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2046 					mac_id, &ring_params);
2047 
2048 	if (!srng->hal_srng) {
2049 		dp_srng_free(soc, srng);
2050 		return QDF_STATUS_E_FAILURE;
2051 	}
2052 
2053 	return QDF_STATUS_SUCCESS;
2054 }
2055 
2056 qdf_export_symbol(dp_srng_init);
2057 
2058 /*
2059  * dp_srng_alloc() - Allocate memory for SRNG
2060  * @soc  : Data path soc handle
2061  * @srng : SRNG pointer
2062  * @ring_type : Ring Type
2063  * @num_entries: Number of entries
2064  * @cached: cached flag variable
2065  *
2066  * return: QDF_STATUS
2067  */
2068 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2069 			 int ring_type, uint32_t num_entries,
2070 			 bool cached)
2071 {
2072 	hal_soc_handle_t hal_soc = soc->hal_soc;
2073 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2074 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2075 
2076 	if (srng->base_vaddr_unaligned) {
2077 		dp_init_err("%pK: Ring type: %d, is already allocated",
2078 			    soc, ring_type);
2079 		return QDF_STATUS_SUCCESS;
2080 	}
2081 
2082 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2083 	srng->hal_srng = NULL;
2084 	srng->alloc_size = num_entries * entry_size;
2085 	srng->num_entries = num_entries;
2086 	srng->cached = cached;
2087 
2088 	if (!cached) {
2089 		srng->base_vaddr_aligned =
2090 		    dp_srng_aligned_mem_alloc_consistent(soc,
2091 							 srng,
2092 							 ring_type);
2093 	} else {
2094 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2095 					&srng->alloc_size,
2096 					&srng->base_vaddr_unaligned,
2097 					&srng->base_paddr_unaligned,
2098 					&srng->base_paddr_aligned,
2099 					DP_RING_BASE_ALIGN);
2100 	}
2101 
2102 	if (!srng->base_vaddr_aligned)
2103 		return QDF_STATUS_E_NOMEM;
2104 
2105 	return QDF_STATUS_SUCCESS;
2106 }
2107 
2108 qdf_export_symbol(dp_srng_alloc);
2109 
2110 /*
2111  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2112  * @soc: DP SOC handle
2113  * @srng: source ring structure
2114  * @ring_type: type of ring
2115  * @ring_num: ring number
2116  *
2117  * Return: None
2118  */
2119 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2120 		    int ring_type, int ring_num)
2121 {
2122 	if (!srng->hal_srng) {
2123 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2124 			    soc, ring_type, ring_num);
2125 		return;
2126 	}
2127 
2128 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2129 	srng->hal_srng = NULL;
2130 }
2131 
2132 qdf_export_symbol(dp_srng_deinit);
2133 
2134 /* TODO: Need this interface from HIF */
2135 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2136 
2137 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2138 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2139 			 hal_ring_handle_t hal_ring_hdl)
2140 {
2141 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2142 	uint32_t hp, tp;
2143 	uint8_t ring_id;
2144 
2145 	if (!int_ctx)
2146 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2147 
2148 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2149 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2150 
2151 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2152 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2153 
2154 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2155 }
2156 
2157 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2158 			hal_ring_handle_t hal_ring_hdl)
2159 {
2160 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2161 	uint32_t hp, tp;
2162 	uint8_t ring_id;
2163 
2164 	if (!int_ctx)
2165 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2166 
2167 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2168 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2169 
2170 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2171 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2172 
2173 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2174 }
2175 
2176 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2177 					      uint8_t hist_group_id)
2178 {
2179 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2180 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2181 }
2182 
2183 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2184 					     uint8_t hist_group_id)
2185 {
2186 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2187 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2188 }
2189 #else
2190 
2191 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2192 					      uint8_t hist_group_id)
2193 {
2194 }
2195 
2196 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2197 					     uint8_t hist_group_id)
2198 {
2199 }
2200 
2201 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2202 
2203 /*
2204  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2205  * @soc: DP soc handle
2206  * @work_done: work done in softirq context
2207  * @start_time: start time for the softirq
2208  *
2209  * Return: enum with yield code
2210  */
2211 enum timer_yield_status
2212 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2213 			  uint64_t start_time)
2214 {
2215 	uint64_t cur_time = qdf_get_log_timestamp();
2216 
2217 	if (!work_done)
2218 		return DP_TIMER_WORK_DONE;
2219 
2220 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2221 		return DP_TIMER_TIME_EXHAUST;
2222 
2223 	return DP_TIMER_NO_YIELD;
2224 }
2225 
2226 qdf_export_symbol(dp_should_timer_irq_yield);
2227 
2228 /**
2229  * dp_process_lmac_rings() - Process LMAC rings
2230  * @int_ctx: interrupt context
2231  * @total_budget: budget of work which can be done
2232  *
2233  * Return: work done
2234  */
2235 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2236 {
2237 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2238 	struct dp_soc *soc = int_ctx->soc;
2239 	uint32_t remaining_quota = total_budget;
2240 	struct dp_pdev *pdev = NULL;
2241 	uint32_t work_done  = 0;
2242 	int budget = total_budget;
2243 	int ring = 0;
2244 
2245 	/* Process LMAC interrupts */
2246 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2247 		int mac_for_pdev = ring;
2248 
2249 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2250 		if (!pdev)
2251 			continue;
2252 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2253 			work_done = dp_monitor_process(soc, int_ctx,
2254 						       mac_for_pdev,
2255 						       remaining_quota);
2256 			if (work_done)
2257 				intr_stats->num_rx_mon_ring_masks++;
2258 			budget -= work_done;
2259 			if (budget <= 0)
2260 				goto budget_done;
2261 			remaining_quota = budget;
2262 		}
2263 
2264 		if (int_ctx->rxdma2host_ring_mask &
2265 				(1 << mac_for_pdev)) {
2266 			work_done = dp_rxdma_err_process(int_ctx, soc,
2267 							 mac_for_pdev,
2268 							 remaining_quota);
2269 			if (work_done)
2270 				intr_stats->num_rxdma2host_ring_masks++;
2271 			budget -=  work_done;
2272 			if (budget <= 0)
2273 				goto budget_done;
2274 			remaining_quota = budget;
2275 		}
2276 
2277 		if (int_ctx->host2rxdma_ring_mask &
2278 					(1 << mac_for_pdev)) {
2279 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2280 			union dp_rx_desc_list_elem_t *tail = NULL;
2281 			struct dp_srng *rx_refill_buf_ring;
2282 
2283 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2284 				rx_refill_buf_ring =
2285 					&soc->rx_refill_buf_ring[mac_for_pdev];
2286 			else
2287 				rx_refill_buf_ring =
2288 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2289 
2290 			intr_stats->num_host2rxdma_ring_masks++;
2291 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
2292 				     1);
2293 			dp_rx_buffers_replenish(soc, mac_for_pdev,
2294 						rx_refill_buf_ring,
2295 						&soc->rx_desc_buf[mac_for_pdev],
2296 						0, &desc_list, &tail);
2297 		}
2298 	}
2299 
2300 budget_done:
2301 	return total_budget - budget;
2302 }
2303 
2304 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2305 /**
2306  * dp_service_near_full_srngs() - Bottom half handler to process the near
2307  *				full IRQ on a SRNG
2308  * @dp_ctx: Datapath SoC handle
2309  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2310  *		without rescheduling
2311  *
2312  * Return: remaining budget/quota for the soc device
2313  */
2314 static uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget)
2315 {
2316 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2317 	struct dp_soc *soc = int_ctx->soc;
2318 
2319 	/*
2320 	 * dp_service_near_full_srngs arch ops should be initialized always
2321 	 * if the NEAR FULL IRQ feature is enabled.
2322 	 */
2323 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2324 							dp_budget);
2325 }
2326 #endif
2327 
2328 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2329 
2330 /*
2331  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2332  * @dp_ctx: DP SOC handle
2333  * @budget: Number of frames/descriptors that can be processed in one shot
2334  *
2335  * Return: remaining budget/quota for the soc device
2336  */
2337 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2338 {
2339 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2340 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2341 	struct dp_soc *soc = int_ctx->soc;
2342 	int ring = 0;
2343 	int index;
2344 	uint32_t work_done  = 0;
2345 	int budget = dp_budget;
2346 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2347 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2348 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2349 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2350 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2351 	uint32_t remaining_quota = dp_budget;
2352 
2353 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2354 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2355 			 reo_status_mask,
2356 			 int_ctx->rx_mon_ring_mask,
2357 			 int_ctx->host2rxdma_ring_mask,
2358 			 int_ctx->rxdma2host_ring_mask);
2359 
2360 	/* Process Tx completion interrupts first to return back buffers */
2361 	for (index = 0; index < soc->num_tcl_data_rings; index++) {
2362 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2363 			continue;
2364 		work_done = dp_tx_comp_handler(int_ctx,
2365 					       soc,
2366 					       soc->tx_comp_ring[index].hal_srng,
2367 					       index, remaining_quota);
2368 		if (work_done) {
2369 			intr_stats->num_tx_ring_masks[index]++;
2370 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2371 					 tx_mask, index, budget,
2372 					 work_done);
2373 		}
2374 		budget -= work_done;
2375 		if (budget <= 0)
2376 			goto budget_done;
2377 
2378 		remaining_quota = budget;
2379 	}
2380 
2381 	/* Process REO Exception ring interrupt */
2382 	if (rx_err_mask) {
2383 		work_done = dp_rx_err_process(int_ctx, soc,
2384 					      soc->reo_exception_ring.hal_srng,
2385 					      remaining_quota);
2386 
2387 		if (work_done) {
2388 			intr_stats->num_rx_err_ring_masks++;
2389 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2390 					 work_done, budget);
2391 		}
2392 
2393 		budget -=  work_done;
2394 		if (budget <= 0) {
2395 			goto budget_done;
2396 		}
2397 		remaining_quota = budget;
2398 	}
2399 
2400 	/* Process Rx WBM release ring interrupt */
2401 	if (rx_wbm_rel_mask) {
2402 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2403 						  soc->rx_rel_ring.hal_srng,
2404 						  remaining_quota);
2405 
2406 		if (work_done) {
2407 			intr_stats->num_rx_wbm_rel_ring_masks++;
2408 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2409 					 work_done, budget);
2410 		}
2411 
2412 		budget -=  work_done;
2413 		if (budget <= 0) {
2414 			goto budget_done;
2415 		}
2416 		remaining_quota = budget;
2417 	}
2418 
2419 	/* Process Rx interrupts */
2420 	if (rx_mask) {
2421 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2422 			if (!(rx_mask & (1 << ring)))
2423 				continue;
2424 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2425 						  soc->reo_dest_ring[ring].hal_srng,
2426 						  ring,
2427 						  remaining_quota);
2428 			if (work_done) {
2429 				intr_stats->num_rx_ring_masks[ring]++;
2430 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2431 						 rx_mask, ring,
2432 						 work_done, budget);
2433 				budget -=  work_done;
2434 				if (budget <= 0)
2435 					goto budget_done;
2436 				remaining_quota = budget;
2437 			}
2438 		}
2439 	}
2440 
2441 	if (reo_status_mask) {
2442 		if (dp_reo_status_ring_handler(int_ctx, soc))
2443 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2444 	}
2445 
2446 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2447 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2448 		if (work_done) {
2449 			budget -=  work_done;
2450 			if (budget <= 0)
2451 				goto budget_done;
2452 			remaining_quota = budget;
2453 		}
2454 	}
2455 
2456 	qdf_lro_flush(int_ctx->lro_ctx);
2457 	intr_stats->num_masks++;
2458 
2459 budget_done:
2460 	return dp_budget - budget;
2461 }
2462 
2463 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2464 
2465 /*
2466  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2467  * @dp_ctx: DP SOC handle
2468  * @budget: Number of frames/descriptors that can be processed in one shot
2469  *
2470  * Return: remaining budget/quota for the soc device
2471  */
2472 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2473 {
2474 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2475 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2476 	struct dp_soc *soc = int_ctx->soc;
2477 	uint32_t remaining_quota = dp_budget;
2478 	uint32_t work_done  = 0;
2479 	int budget = dp_budget;
2480 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2481 
2482 	if (reo_status_mask) {
2483 		if (dp_reo_status_ring_handler(int_ctx, soc))
2484 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2485 	}
2486 
2487 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2488 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2489 		if (work_done) {
2490 			budget -=  work_done;
2491 			if (budget <= 0)
2492 				goto budget_done;
2493 			remaining_quota = budget;
2494 		}
2495 	}
2496 
2497 	qdf_lro_flush(int_ctx->lro_ctx);
2498 	intr_stats->num_masks++;
2499 
2500 budget_done:
2501 	return dp_budget - budget;
2502 }
2503 
2504 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2505 
2506 /* dp_interrupt_timer()- timer poll for interrupts
2507  *
2508  * @arg: SoC Handle
2509  *
2510  * Return:
2511  *
2512  */
2513 static void dp_interrupt_timer(void *arg)
2514 {
2515 	struct dp_soc *soc = (struct dp_soc *) arg;
2516 	struct dp_pdev *pdev = soc->pdev_list[0];
2517 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2518 	uint32_t work_done  = 0, total_work_done = 0;
2519 	int budget = 0xffff, i;
2520 	uint32_t remaining_quota = budget;
2521 	uint64_t start_time;
2522 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2523 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2524 	uint32_t lmac_iter;
2525 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2526 	enum reg_wifi_band mon_band;
2527 
2528 	/*
2529 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2530 	 * and Monitor rings polling mode when NSS offload is disabled
2531 	 */
2532 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2533 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2534 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2535 			for (i = 0; i < wlan_cfg_get_num_contexts(
2536 						soc->wlan_cfg_ctx); i++)
2537 				dp_service_srngs(&soc->intr_ctx[i], 0xffff);
2538 
2539 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2540 		}
2541 		return;
2542 	}
2543 
2544 	if (!qdf_atomic_read(&soc->cmn_init_done))
2545 		return;
2546 
2547 	if (dp_monitor_is_chan_band_known(pdev)) {
2548 		mon_band = dp_monitor_get_chan_band(pdev);
2549 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2550 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2551 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2552 			dp_srng_record_timer_entry(soc, dp_intr_id);
2553 		}
2554 	}
2555 
2556 	start_time = qdf_get_log_timestamp();
2557 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
2558 
2559 	while (yield == DP_TIMER_NO_YIELD) {
2560 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2561 			if (lmac_iter == lmac_id)
2562 				work_done = dp_monitor_process(soc,
2563 						&soc->intr_ctx[dp_intr_id],
2564 						lmac_iter, remaining_quota);
2565 			else
2566 				work_done =
2567 					dp_monitor_drop_packets_for_mac(pdev,
2568 							     lmac_iter,
2569 							     remaining_quota);
2570 			if (work_done) {
2571 				budget -=  work_done;
2572 				if (budget <= 0) {
2573 					yield = DP_TIMER_WORK_EXHAUST;
2574 					goto budget_done;
2575 				}
2576 				remaining_quota = budget;
2577 				total_work_done += work_done;
2578 			}
2579 		}
2580 
2581 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2582 						  start_time);
2583 		total_work_done = 0;
2584 	}
2585 
2586 budget_done:
2587 	if (yield == DP_TIMER_WORK_EXHAUST ||
2588 	    yield == DP_TIMER_TIME_EXHAUST)
2589 		qdf_timer_mod(&soc->int_timer, 1);
2590 	else
2591 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2592 
2593 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2594 		dp_srng_record_timer_exit(soc, dp_intr_id);
2595 }
2596 
2597 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2598 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2599 					struct dp_intr *intr_ctx)
2600 {
2601 	if (intr_ctx->rx_mon_ring_mask)
2602 		return true;
2603 
2604 	return false;
2605 }
2606 #else
2607 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2608 					struct dp_intr *intr_ctx)
2609 {
2610 	return false;
2611 }
2612 #endif
2613 
2614 /*
2615  * dp_soc_attach_poll() - Register handlers for DP interrupts
2616  * @txrx_soc: DP SOC handle
2617  *
2618  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2619  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2620  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2621  *
2622  * Return: 0 for success, nonzero for failure.
2623  */
2624 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
2625 {
2626 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2627 	int i;
2628 	int lmac_id = 0;
2629 
2630 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2631 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2632 	soc->intr_mode = DP_INTR_POLL;
2633 
2634 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2635 		soc->intr_ctx[i].dp_intr_id = i;
2636 		soc->intr_ctx[i].tx_ring_mask =
2637 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2638 		soc->intr_ctx[i].rx_ring_mask =
2639 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2640 		soc->intr_ctx[i].rx_mon_ring_mask =
2641 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2642 		soc->intr_ctx[i].rx_err_ring_mask =
2643 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2644 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2645 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2646 		soc->intr_ctx[i].reo_status_ring_mask =
2647 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2648 		soc->intr_ctx[i].rxdma2host_ring_mask =
2649 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2650 		soc->intr_ctx[i].soc = soc;
2651 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2652 
2653 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2654 			hif_event_history_init(soc->hif_handle, i);
2655 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2656 			lmac_id++;
2657 		}
2658 	}
2659 
2660 	qdf_timer_init(soc->osdev, &soc->int_timer,
2661 			dp_interrupt_timer, (void *)soc,
2662 			QDF_TIMER_TYPE_WAKE_APPS);
2663 
2664 	return QDF_STATUS_SUCCESS;
2665 }
2666 
2667 /**
2668  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2669  * soc: DP soc handle
2670  *
2671  * Set the appropriate interrupt mode flag in the soc
2672  */
2673 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2674 {
2675 	uint32_t msi_base_data, msi_vector_start;
2676 	int msi_vector_count, ret;
2677 
2678 	soc->intr_mode = DP_INTR_INTEGRATED;
2679 
2680 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2681 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2682 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2683 		soc->intr_mode = DP_INTR_POLL;
2684 	} else {
2685 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2686 						  &msi_vector_count,
2687 						  &msi_base_data,
2688 						  &msi_vector_start);
2689 		if (ret)
2690 			return;
2691 
2692 		soc->intr_mode = DP_INTR_MSI;
2693 	}
2694 }
2695 
2696 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2697 #if defined(DP_INTR_POLL_BOTH)
2698 /*
2699  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2700  * @txrx_soc: DP SOC handle
2701  *
2702  * Call the appropriate attach function based on the mode of operation.
2703  * This is a WAR for enabling monitor mode.
2704  *
2705  * Return: 0 for success. nonzero for failure.
2706  */
2707 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2708 {
2709 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2710 
2711 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2712 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2713 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2714 	     QDF_GLOBAL_MONITOR_MODE)) {
2715 		dp_info("Poll mode");
2716 		return dp_soc_attach_poll(txrx_soc);
2717 	} else {
2718 		dp_info("Interrupt  mode");
2719 		return dp_soc_interrupt_attach(txrx_soc);
2720 	}
2721 }
2722 #else
2723 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2724 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2725 {
2726 	return dp_soc_attach_poll(txrx_soc);
2727 }
2728 #else
2729 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2730 {
2731 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2732 
2733 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
2734 		return dp_soc_attach_poll(txrx_soc);
2735 	else
2736 		return dp_soc_interrupt_attach(txrx_soc);
2737 }
2738 #endif
2739 #endif
2740 
2741 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2742 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2743 {
2744 	int j;
2745 	int num_irq = 0;
2746 
2747 	int tx_mask =
2748 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2749 	int rx_mask =
2750 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2751 	int rx_mon_mask =
2752 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2753 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2754 					soc->wlan_cfg_ctx, intr_ctx_num);
2755 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2756 					soc->wlan_cfg_ctx, intr_ctx_num);
2757 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2758 					soc->wlan_cfg_ctx, intr_ctx_num);
2759 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2760 					soc->wlan_cfg_ctx, intr_ctx_num);
2761 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2762 					soc->wlan_cfg_ctx, intr_ctx_num);
2763 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2764 					soc->wlan_cfg_ctx, intr_ctx_num);
2765 
2766 	soc->intr_mode = DP_INTR_INTEGRATED;
2767 
2768 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2769 
2770 		if (tx_mask & (1 << j)) {
2771 			irq_id_map[num_irq++] =
2772 				(wbm2host_tx_completions_ring1 - j);
2773 		}
2774 
2775 		if (rx_mask & (1 << j)) {
2776 			irq_id_map[num_irq++] =
2777 				(reo2host_destination_ring1 - j);
2778 		}
2779 
2780 		if (rxdma2host_ring_mask & (1 << j)) {
2781 			irq_id_map[num_irq++] =
2782 				rxdma2host_destination_ring_mac1 - j;
2783 		}
2784 
2785 		if (host2rxdma_ring_mask & (1 << j)) {
2786 			irq_id_map[num_irq++] =
2787 				host2rxdma_host_buf_ring_mac1 -	j;
2788 		}
2789 
2790 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2791 			irq_id_map[num_irq++] =
2792 				host2rxdma_monitor_ring1 - j;
2793 		}
2794 
2795 		if (rx_mon_mask & (1 << j)) {
2796 			irq_id_map[num_irq++] =
2797 				ppdu_end_interrupts_mac1 - j;
2798 			irq_id_map[num_irq++] =
2799 				rxdma2host_monitor_status_ring_mac1 - j;
2800 			irq_id_map[num_irq++] =
2801 				rxdma2host_monitor_destination_mac1 - j;
2802 		}
2803 
2804 		if (rx_wbm_rel_ring_mask & (1 << j))
2805 			irq_id_map[num_irq++] = wbm2host_rx_release;
2806 
2807 		if (rx_err_ring_mask & (1 << j))
2808 			irq_id_map[num_irq++] = reo2host_exception;
2809 
2810 		if (reo_status_ring_mask & (1 << j))
2811 			irq_id_map[num_irq++] = reo2host_status;
2812 
2813 	}
2814 	*num_irq_r = num_irq;
2815 }
2816 
2817 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
2818 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
2819 		int msi_vector_count, int msi_vector_start)
2820 {
2821 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2822 					soc->wlan_cfg_ctx, intr_ctx_num);
2823 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2824 					soc->wlan_cfg_ctx, intr_ctx_num);
2825 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2826 					soc->wlan_cfg_ctx, intr_ctx_num);
2827 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2828 					soc->wlan_cfg_ctx, intr_ctx_num);
2829 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2830 					soc->wlan_cfg_ctx, intr_ctx_num);
2831 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2832 					soc->wlan_cfg_ctx, intr_ctx_num);
2833 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2834 					soc->wlan_cfg_ctx, intr_ctx_num);
2835 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2836 					soc->wlan_cfg_ctx, intr_ctx_num);
2837 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2838 					soc->wlan_cfg_ctx, intr_ctx_num);
2839 	int rx_near_full_grp_1_mask =
2840 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
2841 						     intr_ctx_num);
2842 	int rx_near_full_grp_2_mask =
2843 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
2844 						     intr_ctx_num);
2845 	int tx_ring_near_full_mask =
2846 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
2847 						    intr_ctx_num);
2848 
2849 	unsigned int vector =
2850 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
2851 	int num_irq = 0;
2852 
2853 	soc->intr_mode = DP_INTR_MSI;
2854 
2855 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
2856 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
2857 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
2858 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
2859 	    tx_ring_near_full_mask)
2860 		irq_id_map[num_irq++] =
2861 			pld_get_msi_irq(soc->osdev->dev, vector);
2862 
2863 	*num_irq_r = num_irq;
2864 }
2865 
2866 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
2867 				    int *irq_id_map, int *num_irq)
2868 {
2869 	int msi_vector_count, ret;
2870 	uint32_t msi_base_data, msi_vector_start;
2871 
2872 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2873 					    &msi_vector_count,
2874 					    &msi_base_data,
2875 					    &msi_vector_start);
2876 	if (ret)
2877 		return dp_soc_interrupt_map_calculate_integrated(soc,
2878 				intr_ctx_num, irq_id_map, num_irq);
2879 
2880 	else
2881 		dp_soc_interrupt_map_calculate_msi(soc,
2882 				intr_ctx_num, irq_id_map, num_irq,
2883 				msi_vector_count, msi_vector_start);
2884 }
2885 
2886 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2887 /**
2888  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
2889  * @soc: DP soc handle
2890  * @num_irq: IRQ number
2891  * @irq_id_map: IRQ map
2892  * intr_id: interrupt context ID
2893  *
2894  * Return: 0 for success. nonzero for failure.
2895  */
2896 static inline int
2897 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
2898 				  int irq_id_map[], int intr_id)
2899 {
2900 	return hif_register_ext_group(soc->hif_handle,
2901 				      num_irq, irq_id_map,
2902 				      dp_service_near_full_srngs,
2903 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
2904 				      HIF_EXEC_NAPI_TYPE,
2905 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2906 }
2907 #else
2908 static inline int
2909 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
2910 				  int *irq_id_map, int intr_id)
2911 {
2912 	return 0;
2913 }
2914 #endif
2915 
2916 /*
2917  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2918  * @txrx_soc: DP SOC handle
2919  *
2920  * Return: none
2921  */
2922 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2923 {
2924 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2925 	int i;
2926 
2927 	if (soc->intr_mode == DP_INTR_POLL) {
2928 		qdf_timer_free(&soc->int_timer);
2929 	} else {
2930 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
2931 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2932 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
2933 	}
2934 
2935 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2936 		soc->intr_ctx[i].tx_ring_mask = 0;
2937 		soc->intr_ctx[i].rx_ring_mask = 0;
2938 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2939 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2940 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2941 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2942 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2943 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2944 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2945 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
2946 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
2947 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
2948 
2949 		hif_event_history_deinit(soc->hif_handle, i);
2950 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2951 	}
2952 
2953 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2954 		    sizeof(soc->mon_intr_id_lmac_map),
2955 		    DP_MON_INVALID_LMAC_ID);
2956 }
2957 
2958 /*
2959  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
2960  * @txrx_soc: DP SOC handle
2961  *
2962  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2963  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2964  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2965  *
2966  * Return: 0 for success. nonzero for failure.
2967  */
2968 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
2969 {
2970 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2971 
2972 	int i = 0;
2973 	int num_irq = 0;
2974 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
2975 
2976 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2977 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2978 
2979 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2980 		int ret = 0;
2981 
2982 		/* Map of IRQ ids registered with one interrupt context */
2983 		int irq_id_map[HIF_MAX_GRP_IRQ];
2984 
2985 		int tx_mask =
2986 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2987 		int rx_mask =
2988 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2989 		int rx_mon_mask =
2990 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
2991 		int rx_err_ring_mask =
2992 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2993 		int rx_wbm_rel_ring_mask =
2994 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2995 		int reo_status_ring_mask =
2996 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2997 		int rxdma2host_ring_mask =
2998 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2999 		int host2rxdma_ring_mask =
3000 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3001 		int host2rxdma_mon_ring_mask =
3002 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3003 				soc->wlan_cfg_ctx, i);
3004 		int rx_near_full_grp_1_mask =
3005 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3006 							     i);
3007 		int rx_near_full_grp_2_mask =
3008 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3009 							     i);
3010 		int tx_ring_near_full_mask =
3011 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3012 							    i);
3013 
3014 		soc->intr_ctx[i].dp_intr_id = i;
3015 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3016 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3017 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3018 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3019 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3020 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3021 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3022 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3023 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3024 			 host2rxdma_mon_ring_mask;
3025 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3026 						rx_near_full_grp_1_mask;
3027 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3028 						rx_near_full_grp_2_mask;
3029 		soc->intr_ctx[i].tx_ring_near_full_mask =
3030 						tx_ring_near_full_mask;
3031 
3032 		soc->intr_ctx[i].soc = soc;
3033 
3034 		num_irq = 0;
3035 
3036 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3037 					       &num_irq);
3038 
3039 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3040 		    tx_ring_near_full_mask) {
3041 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3042 							  irq_id_map, i);
3043 		} else {
3044 			ret = hif_register_ext_group(soc->hif_handle,
3045 				num_irq, irq_id_map, dp_service_srngs,
3046 				&soc->intr_ctx[i], "dp_intr",
3047 				HIF_EXEC_NAPI_TYPE,
3048 				QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3049 		}
3050 
3051 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3052 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3053 
3054 		if (ret) {
3055 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3056 			dp_soc_interrupt_detach(txrx_soc);
3057 			return QDF_STATUS_E_FAILURE;
3058 		}
3059 
3060 		hif_event_history_init(soc->hif_handle, i);
3061 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3062 
3063 		if (rx_err_ring_mask)
3064 			rx_err_ring_intr_ctxt_id = i;
3065 	}
3066 
3067 	hif_configure_ext_group_interrupts(soc->hif_handle);
3068 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3069 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3070 						  rx_err_ring_intr_ctxt_id, 0);
3071 
3072 	return QDF_STATUS_SUCCESS;
3073 }
3074 
3075 #define AVG_MAX_MPDUS_PER_TID 128
3076 #define AVG_TIDS_PER_CLIENT 2
3077 #define AVG_FLOWS_PER_TID 2
3078 #define AVG_MSDUS_PER_FLOW 128
3079 #define AVG_MSDUS_PER_MPDU 4
3080 
3081 /*
3082  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3083  * @soc: DP SOC handle
3084  * @mac_id: mac id
3085  *
3086  * Return: none
3087  */
3088 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3089 {
3090 	struct qdf_mem_multi_page_t *pages;
3091 
3092 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3093 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3094 	} else {
3095 		pages = &soc->link_desc_pages;
3096 	}
3097 
3098 	if (!pages) {
3099 		dp_err("can not get link desc pages");
3100 		QDF_ASSERT(0);
3101 		return;
3102 	}
3103 
3104 	if (pages->dma_pages) {
3105 		wlan_minidump_remove((void *)
3106 				     pages->dma_pages->page_v_addr_start,
3107 				     pages->num_pages * pages->page_size,
3108 				     soc->ctrl_psoc,
3109 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3110 				     "hw_link_desc_bank");
3111 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3112 					     pages, 0, false);
3113 	}
3114 }
3115 
3116 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3117 
3118 /*
3119  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3120  * @soc: DP SOC handle
3121  * @mac_id: mac id
3122  *
3123  * Allocates memory pages for link descriptors, the page size is 4K for
3124  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3125  * allocated for regular RX/TX and if the there is a proper mac_id link
3126  * descriptors are allocated for RX monitor mode.
3127  *
3128  * Return: QDF_STATUS_SUCCESS: Success
3129  *	   QDF_STATUS_E_FAILURE: Failure
3130  */
3131 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3132 {
3133 	hal_soc_handle_t hal_soc = soc->hal_soc;
3134 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3135 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3136 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3137 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3138 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3139 	uint32_t num_mpdu_links_per_queue_desc =
3140 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3141 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3142 	uint32_t *total_link_descs, total_mem_size;
3143 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3144 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3145 	uint32_t num_entries;
3146 	struct qdf_mem_multi_page_t *pages;
3147 	struct dp_srng *dp_srng;
3148 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3149 
3150 	/* Only Tx queue descriptors are allocated from common link descriptor
3151 	 * pool Rx queue descriptors are not included in this because (REO queue
3152 	 * extension descriptors) they are expected to be allocated contiguously
3153 	 * with REO queue descriptors
3154 	 */
3155 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3156 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3157 		/* dp_monitor_get_link_desc_pages returns NULL only
3158 		 * if monitor SOC is  NULL
3159 		 */
3160 		if (!pages) {
3161 			dp_err("can not get link desc pages");
3162 			QDF_ASSERT(0);
3163 			return QDF_STATUS_E_FAULT;
3164 		}
3165 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3166 		num_entries = dp_srng->alloc_size /
3167 			hal_srng_get_entrysize(soc->hal_soc,
3168 					       RXDMA_MONITOR_DESC);
3169 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3170 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3171 			      MINIDUMP_STR_SIZE);
3172 	} else {
3173 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3174 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3175 
3176 		num_mpdu_queue_descs = num_mpdu_link_descs /
3177 			num_mpdu_links_per_queue_desc;
3178 
3179 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3180 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3181 			num_msdus_per_link_desc;
3182 
3183 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3184 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3185 
3186 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3187 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3188 
3189 		pages = &soc->link_desc_pages;
3190 		total_link_descs = &soc->total_link_descs;
3191 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3192 			      MINIDUMP_STR_SIZE);
3193 	}
3194 
3195 	/* If link descriptor banks are allocated, return from here */
3196 	if (pages->num_pages)
3197 		return QDF_STATUS_SUCCESS;
3198 
3199 	/* Round up to power of 2 */
3200 	*total_link_descs = 1;
3201 	while (*total_link_descs < num_entries)
3202 		*total_link_descs <<= 1;
3203 
3204 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3205 		     soc, *total_link_descs, link_desc_size);
3206 	total_mem_size =  *total_link_descs * link_desc_size;
3207 	total_mem_size += link_desc_align;
3208 
3209 	dp_init_info("%pK: total_mem_size: %d",
3210 		     soc, total_mem_size);
3211 
3212 	dp_set_max_page_size(pages, max_alloc_size);
3213 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3214 				      pages,
3215 				      link_desc_size,
3216 				      *total_link_descs,
3217 				      0, false);
3218 	if (!pages->num_pages) {
3219 		dp_err("Multi page alloc fail for hw link desc pool");
3220 		return QDF_STATUS_E_FAULT;
3221 	}
3222 
3223 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3224 			  pages->num_pages * pages->page_size,
3225 			  soc->ctrl_psoc,
3226 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3227 			  "hw_link_desc_bank");
3228 
3229 	return QDF_STATUS_SUCCESS;
3230 }
3231 
3232 /*
3233  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3234  * @soc: DP SOC handle
3235  *
3236  * Return: none
3237  */
3238 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3239 {
3240 	uint32_t i;
3241 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3242 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3243 	qdf_dma_addr_t paddr;
3244 
3245 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3246 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3247 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3248 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3249 			if (vaddr) {
3250 				qdf_mem_free_consistent(soc->osdev,
3251 							soc->osdev->dev,
3252 							size,
3253 							vaddr,
3254 							paddr,
3255 							0);
3256 				vaddr = NULL;
3257 			}
3258 		}
3259 	} else {
3260 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3261 				     soc->wbm_idle_link_ring.alloc_size,
3262 				     soc->ctrl_psoc,
3263 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3264 				     "wbm_idle_link_ring");
3265 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3266 	}
3267 }
3268 
3269 /*
3270  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3271  * @soc: DP SOC handle
3272  *
3273  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3274  * link descriptors is less then the max_allocated size. else
3275  * allocate memory for wbm_idle_scatter_buffer.
3276  *
3277  * Return: QDF_STATUS_SUCCESS: success
3278  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3279  */
3280 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3281 {
3282 	uint32_t entry_size, i;
3283 	uint32_t total_mem_size;
3284 	qdf_dma_addr_t *baseaddr = NULL;
3285 	struct dp_srng *dp_srng;
3286 	uint32_t ring_type;
3287 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3288 	uint32_t tlds;
3289 
3290 	ring_type = WBM_IDLE_LINK;
3291 	dp_srng = &soc->wbm_idle_link_ring;
3292 	tlds = soc->total_link_descs;
3293 
3294 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3295 	total_mem_size = entry_size * tlds;
3296 
3297 	if (total_mem_size <= max_alloc_size) {
3298 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3299 			dp_init_err("%pK: Link desc idle ring setup failed",
3300 				    soc);
3301 			goto fail;
3302 		}
3303 
3304 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3305 				  soc->wbm_idle_link_ring.alloc_size,
3306 				  soc->ctrl_psoc,
3307 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3308 				  "wbm_idle_link_ring");
3309 	} else {
3310 		uint32_t num_scatter_bufs;
3311 		uint32_t num_entries_per_buf;
3312 		uint32_t buf_size = 0;
3313 
3314 		soc->wbm_idle_scatter_buf_size =
3315 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3316 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3317 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3318 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3319 					soc->hal_soc, total_mem_size,
3320 					soc->wbm_idle_scatter_buf_size);
3321 
3322 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3323 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3324 				  FL("scatter bufs size out of bounds"));
3325 			goto fail;
3326 		}
3327 
3328 		for (i = 0; i < num_scatter_bufs; i++) {
3329 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3330 			buf_size = soc->wbm_idle_scatter_buf_size;
3331 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3332 				qdf_mem_alloc_consistent(soc->osdev,
3333 							 soc->osdev->dev,
3334 							 buf_size,
3335 							 baseaddr);
3336 
3337 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3338 				QDF_TRACE(QDF_MODULE_ID_DP,
3339 					  QDF_TRACE_LEVEL_ERROR,
3340 					  FL("Scatter lst memory alloc fail"));
3341 				goto fail;
3342 			}
3343 		}
3344 		soc->num_scatter_bufs = num_scatter_bufs;
3345 	}
3346 	return QDF_STATUS_SUCCESS;
3347 
3348 fail:
3349 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3350 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3351 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3352 
3353 		if (vaddr) {
3354 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3355 						soc->wbm_idle_scatter_buf_size,
3356 						vaddr,
3357 						paddr, 0);
3358 			vaddr = NULL;
3359 		}
3360 	}
3361 	return QDF_STATUS_E_NOMEM;
3362 }
3363 
3364 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3365 
3366 /*
3367  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3368  * @soc: DP SOC handle
3369  *
3370  * Return: QDF_STATUS_SUCCESS: success
3371  *         QDF_STATUS_E_FAILURE: failure
3372  */
3373 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3374 {
3375 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3376 
3377 	if (dp_srng->base_vaddr_unaligned) {
3378 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3379 			return QDF_STATUS_E_FAILURE;
3380 	}
3381 	return QDF_STATUS_SUCCESS;
3382 }
3383 
3384 /*
3385  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3386  * @soc: DP SOC handle
3387  *
3388  * Return: None
3389  */
3390 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3391 {
3392 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3393 }
3394 
3395 /*
3396  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3397  * @soc: DP SOC handle
3398  * @mac_id: mac id
3399  *
3400  * Return: None
3401  */
3402 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3403 {
3404 	uint32_t cookie = 0;
3405 	uint32_t page_idx = 0;
3406 	struct qdf_mem_multi_page_t *pages;
3407 	struct qdf_mem_dma_page_t *dma_pages;
3408 	uint32_t offset = 0;
3409 	uint32_t count = 0;
3410 	void *desc_srng;
3411 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3412 	uint32_t *total_link_descs_addr;
3413 	uint32_t total_link_descs;
3414 	uint32_t scatter_buf_num;
3415 	uint32_t num_entries_per_buf = 0;
3416 	uint32_t rem_entries;
3417 	uint32_t num_descs_per_page;
3418 	uint32_t num_scatter_bufs = 0;
3419 	uint8_t *scatter_buf_ptr;
3420 	void *desc;
3421 
3422 	num_scatter_bufs = soc->num_scatter_bufs;
3423 
3424 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3425 		pages = &soc->link_desc_pages;
3426 		total_link_descs = soc->total_link_descs;
3427 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3428 	} else {
3429 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3430 		/* dp_monitor_get_link_desc_pages returns NULL only
3431 		 * if monitor SOC is  NULL
3432 		 */
3433 		if (!pages) {
3434 			dp_err("can not get link desc pages");
3435 			QDF_ASSERT(0);
3436 			return;
3437 		}
3438 		total_link_descs_addr =
3439 				dp_monitor_get_total_link_descs(soc, mac_id);
3440 		total_link_descs = *total_link_descs_addr;
3441 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3442 	}
3443 
3444 	dma_pages = pages->dma_pages;
3445 	do {
3446 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3447 			     pages->page_size);
3448 		page_idx++;
3449 	} while (page_idx < pages->num_pages);
3450 
3451 	if (desc_srng) {
3452 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3453 		page_idx = 0;
3454 		count = 0;
3455 		offset = 0;
3456 		pages = &soc->link_desc_pages;
3457 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3458 						     desc_srng)) &&
3459 			(count < total_link_descs)) {
3460 			page_idx = count / pages->num_element_per_page;
3461 			offset = count % pages->num_element_per_page;
3462 			cookie = LINK_DESC_COOKIE(count, page_idx,
3463 						  soc->link_desc_id_start);
3464 
3465 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3466 					       dma_pages[page_idx].page_p_addr
3467 					       + (offset * link_desc_size));
3468 			count++;
3469 		}
3470 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3471 	} else {
3472 		/* Populate idle list scatter buffers with link descriptor
3473 		 * pointers
3474 		 */
3475 		scatter_buf_num = 0;
3476 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3477 					soc->hal_soc,
3478 					soc->wbm_idle_scatter_buf_size);
3479 
3480 		scatter_buf_ptr = (uint8_t *)(
3481 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3482 		rem_entries = num_entries_per_buf;
3483 		pages = &soc->link_desc_pages;
3484 		page_idx = 0; count = 0;
3485 		offset = 0;
3486 		num_descs_per_page = pages->num_element_per_page;
3487 
3488 		while (count < total_link_descs) {
3489 			page_idx = count / num_descs_per_page;
3490 			offset = count % num_descs_per_page;
3491 			cookie = LINK_DESC_COOKIE(count, page_idx,
3492 						  soc->link_desc_id_start);
3493 			hal_set_link_desc_addr(soc->hal_soc,
3494 					       (void *)scatter_buf_ptr,
3495 					       cookie,
3496 					       dma_pages[page_idx].page_p_addr +
3497 					       (offset * link_desc_size));
3498 			rem_entries--;
3499 			if (rem_entries) {
3500 				scatter_buf_ptr += link_desc_size;
3501 			} else {
3502 				rem_entries = num_entries_per_buf;
3503 				scatter_buf_num++;
3504 				if (scatter_buf_num >= num_scatter_bufs)
3505 					break;
3506 				scatter_buf_ptr = (uint8_t *)
3507 					(soc->wbm_idle_scatter_buf_base_vaddr[
3508 					 scatter_buf_num]);
3509 			}
3510 			count++;
3511 		}
3512 		/* Setup link descriptor idle list in HW */
3513 		hal_setup_link_idle_list(soc->hal_soc,
3514 			soc->wbm_idle_scatter_buf_base_paddr,
3515 			soc->wbm_idle_scatter_buf_base_vaddr,
3516 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
3517 			(uint32_t)(scatter_buf_ptr -
3518 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
3519 			scatter_buf_num-1])), total_link_descs);
3520 	}
3521 }
3522 
3523 qdf_export_symbol(dp_link_desc_ring_replenish);
3524 
3525 #ifdef IPA_OFFLOAD
3526 #define USE_1_IPA_RX_REO_RING 1
3527 #define USE_2_IPA_RX_REO_RINGS 2
3528 #define REO_DST_RING_SIZE_QCA6290 1023
3529 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3530 #define REO_DST_RING_SIZE_QCA8074 1023
3531 #define REO_DST_RING_SIZE_QCN9000 2048
3532 #else
3533 #define REO_DST_RING_SIZE_QCA8074 8
3534 #define REO_DST_RING_SIZE_QCN9000 8
3535 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3536 
3537 #ifdef IPA_WDI3_TX_TWO_PIPES
3538 #ifdef DP_MEMORY_OPT
3539 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3540 {
3541 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3542 }
3543 
3544 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3545 {
3546 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3547 }
3548 
3549 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3550 {
3551 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3552 }
3553 
3554 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3555 {
3556 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3557 }
3558 
3559 #else /* !DP_MEMORY_OPT */
3560 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3561 {
3562 	return 0;
3563 }
3564 
3565 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3566 {
3567 }
3568 
3569 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3570 {
3571 	return 0
3572 }
3573 
3574 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3575 {
3576 }
3577 #endif /* DP_MEMORY_OPT */
3578 
3579 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3580 {
3581 	hal_tx_init_data_ring(soc->hal_soc,
3582 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
3583 }
3584 
3585 #else /* !IPA_WDI3_TX_TWO_PIPES */
3586 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3587 {
3588 	return 0;
3589 }
3590 
3591 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3592 {
3593 }
3594 
3595 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3596 {
3597 	return 0;
3598 }
3599 
3600 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3601 {
3602 }
3603 
3604 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3605 {
3606 }
3607 
3608 #endif /* IPA_WDI3_TX_TWO_PIPES */
3609 
3610 #else
3611 
3612 #define REO_DST_RING_SIZE_QCA6290 1024
3613 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3614 #define REO_DST_RING_SIZE_QCA8074 2048
3615 #define REO_DST_RING_SIZE_QCN9000 2048
3616 #else
3617 #define REO_DST_RING_SIZE_QCA8074 8
3618 #define REO_DST_RING_SIZE_QCN9000 8
3619 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3620 
3621 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3622 {
3623 	return 0;
3624 }
3625 
3626 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3627 {
3628 }
3629 
3630 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3631 {
3632 	return 0;
3633 }
3634 
3635 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3636 {
3637 }
3638 
3639 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3640 {
3641 }
3642 
3643 #endif /* IPA_OFFLOAD */
3644 
3645 /*
3646  * dp_soc_reset_ring_map() - Reset cpu ring map
3647  * @soc: Datapath soc handler
3648  *
3649  * This api resets the default cpu ring map
3650  */
3651 
3652 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
3653 {
3654 	uint8_t i;
3655 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3656 
3657 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3658 		switch (nss_config) {
3659 		case dp_nss_cfg_first_radio:
3660 			/*
3661 			 * Setting Tx ring map for one nss offloaded radio
3662 			 */
3663 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
3664 			break;
3665 
3666 		case dp_nss_cfg_second_radio:
3667 			/*
3668 			 * Setting Tx ring for two nss offloaded radios
3669 			 */
3670 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
3671 			break;
3672 
3673 		case dp_nss_cfg_dbdc:
3674 			/*
3675 			 * Setting Tx ring map for 2 nss offloaded radios
3676 			 */
3677 			soc->tx_ring_map[i] =
3678 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
3679 			break;
3680 
3681 		case dp_nss_cfg_dbtc:
3682 			/*
3683 			 * Setting Tx ring map for 3 nss offloaded radios
3684 			 */
3685 			soc->tx_ring_map[i] =
3686 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
3687 			break;
3688 
3689 		default:
3690 			dp_err("tx_ring_map failed due to invalid nss cfg");
3691 			break;
3692 		}
3693 	}
3694 }
3695 
3696 /*
3697  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
3698  * @dp_soc - DP soc handle
3699  * @ring_type - ring type
3700  * @ring_num - ring_num
3701  *
3702  * return 0 or 1
3703  */
3704 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
3705 {
3706 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3707 	uint8_t status = 0;
3708 
3709 	switch (ring_type) {
3710 	case WBM2SW_RELEASE:
3711 	case REO_DST:
3712 	case RXDMA_BUF:
3713 	case REO_EXCEPTION:
3714 		status = ((nss_config) & (1 << ring_num));
3715 		break;
3716 	default:
3717 		break;
3718 	}
3719 
3720 	return status;
3721 }
3722 
3723 /*
3724  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
3725  *					  unused WMAC hw rings
3726  * @dp_soc - DP Soc handle
3727  * @mac_num - wmac num
3728  *
3729  * Return: Return void
3730  */
3731 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
3732 						int mac_num)
3733 {
3734 	uint8_t *grp_mask = NULL;
3735 	int group_number;
3736 
3737 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3738 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3739 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3740 					  group_number, 0x0);
3741 
3742 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
3743 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3744 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
3745 				      group_number, 0x0);
3746 
3747 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
3748 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3749 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
3750 					  group_number, 0x0);
3751 
3752 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
3753 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3754 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
3755 					      group_number, 0x0);
3756 }
3757 
3758 /*
3759  * dp_soc_reset_intr_mask() - reset interrupt mask
3760  * @dp_soc - DP Soc handle
3761  *
3762  * Return: Return void
3763  */
3764 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
3765 {
3766 	uint8_t j;
3767 	uint8_t *grp_mask = NULL;
3768 	int group_number, mask, num_ring;
3769 
3770 	/* number of tx ring */
3771 	num_ring = soc->num_tcl_data_rings;
3772 
3773 	/*
3774 	 * group mask for tx completion  ring.
3775 	 */
3776 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
3777 
3778 	/* loop and reset the mask for only offloaded ring */
3779 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
3780 		/*
3781 		 * Group number corresponding to tx offloaded ring.
3782 		 */
3783 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3784 		if (group_number < 0) {
3785 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3786 				      soc, WBM2SW_RELEASE, j);
3787 			continue;
3788 		}
3789 
3790 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
3791 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
3792 		    (!mask)) {
3793 			continue;
3794 		}
3795 
3796 		/* reset the tx mask for offloaded ring */
3797 		mask &= (~(1 << j));
3798 
3799 		/*
3800 		 * reset the interrupt mask for offloaded ring.
3801 		 */
3802 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
3803 	}
3804 
3805 	/* number of rx rings */
3806 	num_ring = soc->num_reo_dest_rings;
3807 
3808 	/*
3809 	 * group mask for reo destination ring.
3810 	 */
3811 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
3812 
3813 	/* loop and reset the mask for only offloaded ring */
3814 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
3815 		/*
3816 		 * Group number corresponding to rx offloaded ring.
3817 		 */
3818 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3819 		if (group_number < 0) {
3820 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3821 				      soc, REO_DST, j);
3822 			continue;
3823 		}
3824 
3825 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
3826 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
3827 		    (!mask)) {
3828 			continue;
3829 		}
3830 
3831 		/* reset the interrupt mask for offloaded ring */
3832 		mask &= (~(1 << j));
3833 
3834 		/*
3835 		 * set the interrupt mask to zero for rx offloaded radio.
3836 		 */
3837 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
3838 	}
3839 
3840 	/*
3841 	 * group mask for Rx buffer refill ring
3842 	 */
3843 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3844 
3845 	/* loop and reset the mask for only offloaded ring */
3846 	for (j = 0; j < MAX_PDEV_CNT; j++) {
3847 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
3848 
3849 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
3850 			continue;
3851 		}
3852 
3853 		/*
3854 		 * Group number corresponding to rx offloaded ring.
3855 		 */
3856 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
3857 		if (group_number < 0) {
3858 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3859 				      soc, REO_DST, lmac_id);
3860 			continue;
3861 		}
3862 
3863 		/* set the interrupt mask for offloaded ring */
3864 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3865 				group_number);
3866 		mask &= (~(1 << lmac_id));
3867 
3868 		/*
3869 		 * set the interrupt mask to zero for rx offloaded radio.
3870 		 */
3871 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3872 			group_number, mask);
3873 	}
3874 
3875 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
3876 
3877 	for (j = 0; j < num_ring; j++) {
3878 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
3879 			continue;
3880 		}
3881 
3882 		/*
3883 		 * Group number corresponding to rx err ring.
3884 		 */
3885 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3886 		if (group_number < 0) {
3887 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3888 				      soc, REO_EXCEPTION, j);
3889 			continue;
3890 		}
3891 
3892 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
3893 					      group_number, 0);
3894 	}
3895 }
3896 
3897 #ifdef IPA_OFFLOAD
3898 /**
3899  * dp_reo_remap_config() - configure reo remap register value based
3900  *                         nss configuration.
3901  *		based on offload_radio value below remap configuration
3902  *		get applied.
3903  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
3904  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
3905  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
3906  *		3 - both Radios handled by NSS (remap not required)
3907  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
3908  *
3909  * @remap1: output parameter indicates reo remap 1 register value
3910  * @remap2: output parameter indicates reo remap 2 register value
3911  * Return: bool type, true if remap is configured else false.
3912  */
3913 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
3914 {
3915 	uint32_t ring[8] = {REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3};
3916 	int target_type;
3917 
3918 	target_type = hal_get_target_type(soc->hal_soc);
3919 
3920 	switch (target_type) {
3921 	case TARGET_TYPE_WCN7850:
3922 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3923 					      soc->num_reo_dest_rings -
3924 					      USE_2_IPA_RX_REO_RINGS, remap1,
3925 					      remap2);
3926 		break;
3927 
3928 	default:
3929 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3930 					      soc->num_reo_dest_rings -
3931 					      USE_1_IPA_RX_REO_RING, remap1,
3932 					      remap2);
3933 		break;
3934 	}
3935 
3936 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
3937 
3938 	return true;
3939 }
3940 
3941 #ifdef IPA_WDI3_TX_TWO_PIPES
3942 static bool dp_ipa_is_alt_tx_ring(int index)
3943 {
3944 	return index == IPA_TX_ALT_RING_IDX;
3945 }
3946 
3947 static bool dp_ipa_is_alt_tx_comp_ring(int index)
3948 {
3949 	return index == IPA_TX_ALT_COMP_RING_IDX;
3950 }
3951 #else /* !IPA_WDI3_TX_TWO_PIPES */
3952 static bool dp_ipa_is_alt_tx_ring(int index)
3953 {
3954 	return false;
3955 }
3956 
3957 static bool dp_ipa_is_alt_tx_comp_ring(int index)
3958 {
3959 	return false;
3960 }
3961 #endif /* IPA_WDI3_TX_TWO_PIPES */
3962 
3963 /**
3964  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
3965  *
3966  * @tx_ring_num: Tx ring number
3967  * @tx_ipa_ring_sz: Return param only updated for IPA.
3968  * @soc_cfg_ctx: dp soc cfg context
3969  *
3970  * Return: None
3971  */
3972 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
3973 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3974 {
3975 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
3976 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
3977 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
3978 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
3979 }
3980 
3981 /**
3982  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
3983  *
3984  * @tx_comp_ring_num: Tx comp ring number
3985  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
3986  * @soc_cfg_ctx: dp soc cfg context
3987  *
3988  * Return: None
3989  */
3990 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3991 					 int *tx_comp_ipa_ring_sz,
3992 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3993 {
3994 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
3995 		*tx_comp_ipa_ring_sz =
3996 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
3997 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
3998 		*tx_comp_ipa_ring_sz =
3999 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4000 }
4001 #else
4002 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4003 {
4004 	uint8_t num = 0;
4005 
4006 	switch (value) {
4007 	case 0xF:
4008 		num = 4;
4009 		ring[0] = REO_REMAP_SW1;
4010 		ring[1] = REO_REMAP_SW2;
4011 		ring[2] = REO_REMAP_SW3;
4012 		ring[3] = REO_REMAP_SW4;
4013 		break;
4014 	case 0xE:
4015 		num = 3;
4016 		ring[0] = REO_REMAP_SW2;
4017 		ring[1] = REO_REMAP_SW3;
4018 		ring[2] = REO_REMAP_SW4;
4019 		break;
4020 	case 0xD:
4021 		num = 3;
4022 		ring[0] = REO_REMAP_SW1;
4023 		ring[1] = REO_REMAP_SW3;
4024 		ring[2] = REO_REMAP_SW4;
4025 		break;
4026 	case 0xC:
4027 		num = 2;
4028 		ring[0] = REO_REMAP_SW3;
4029 		ring[1] = REO_REMAP_SW4;
4030 		break;
4031 	case 0xB:
4032 		num = 3;
4033 		ring[0] = REO_REMAP_SW1;
4034 		ring[1] = REO_REMAP_SW2;
4035 		ring[2] = REO_REMAP_SW4;
4036 		break;
4037 	case 0xA:
4038 		num = 2;
4039 		ring[0] = REO_REMAP_SW2;
4040 		ring[1] = REO_REMAP_SW4;
4041 		break;
4042 	case 0x9:
4043 		num = 2;
4044 		ring[0] = REO_REMAP_SW1;
4045 		ring[1] = REO_REMAP_SW4;
4046 		break;
4047 	case 0x8:
4048 		num = 1;
4049 		ring[0] = REO_REMAP_SW4;
4050 		break;
4051 	case 0x7:
4052 		num = 3;
4053 		ring[0] = REO_REMAP_SW1;
4054 		ring[1] = REO_REMAP_SW2;
4055 		ring[2] = REO_REMAP_SW3;
4056 		break;
4057 	case 0x6:
4058 		num = 2;
4059 		ring[0] = REO_REMAP_SW2;
4060 		ring[1] = REO_REMAP_SW3;
4061 		break;
4062 	case 0x5:
4063 		num = 2;
4064 		ring[0] = REO_REMAP_SW1;
4065 		ring[1] = REO_REMAP_SW3;
4066 		break;
4067 	case 0x4:
4068 		num = 1;
4069 		ring[0] = REO_REMAP_SW3;
4070 		break;
4071 	case 0x3:
4072 		num = 2;
4073 		ring[0] = REO_REMAP_SW1;
4074 		ring[1] = REO_REMAP_SW2;
4075 		break;
4076 	case 0x2:
4077 		num = 1;
4078 		ring[0] = REO_REMAP_SW2;
4079 		break;
4080 	case 0x1:
4081 		num = 1;
4082 		ring[0] = REO_REMAP_SW1;
4083 		break;
4084 	}
4085 	return num;
4086 }
4087 
4088 static bool dp_reo_remap_config(struct dp_soc *soc,
4089 				uint32_t *remap1,
4090 				uint32_t *remap2)
4091 {
4092 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4093 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4094 	uint8_t target_type, num;
4095 	uint32_t ring[4];
4096 	uint32_t value;
4097 
4098 	target_type = hal_get_target_type(soc->hal_soc);
4099 
4100 	switch (offload_radio) {
4101 	case dp_nss_cfg_default:
4102 		value = reo_config & 0xF;
4103 		num = dp_reo_ring_selection(value, ring);
4104 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4105 					      num, remap1, remap2);
4106 
4107 		break;
4108 	case dp_nss_cfg_first_radio:
4109 		value = reo_config & 0xE;
4110 		num = dp_reo_ring_selection(value, ring);
4111 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4112 					      num, remap1, remap2);
4113 
4114 		break;
4115 	case dp_nss_cfg_second_radio:
4116 		value = reo_config & 0xD;
4117 		num = dp_reo_ring_selection(value, ring);
4118 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4119 					      num, remap1, remap2);
4120 
4121 		break;
4122 	case dp_nss_cfg_dbdc:
4123 	case dp_nss_cfg_dbtc:
4124 		/* return false if both or all are offloaded to NSS */
4125 		return false;
4126 
4127 	}
4128 
4129 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4130 		 *remap1, *remap2, offload_radio);
4131 	return true;
4132 }
4133 
4134 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4135 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4136 {
4137 }
4138 
4139 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4140 					 int *tx_comp_ipa_ring_sz,
4141 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4142 {
4143 }
4144 #endif /* IPA_OFFLOAD */
4145 
4146 /*
4147  * dp_reo_frag_dst_set() - configure reo register to set the
4148  *                        fragment destination ring
4149  * @soc : Datapath soc
4150  * @frag_dst_ring : output parameter to set fragment destination ring
4151  *
4152  * Based on offload_radio below fragment destination rings is selected
4153  * 0 - TCL
4154  * 1 - SW1
4155  * 2 - SW2
4156  * 3 - SW3
4157  * 4 - SW4
4158  * 5 - Release
4159  * 6 - FW
4160  * 7 - alternate select
4161  *
4162  * return: void
4163  */
4164 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4165 {
4166 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4167 
4168 	switch (offload_radio) {
4169 	case dp_nss_cfg_default:
4170 		*frag_dst_ring = REO_REMAP_TCL;
4171 		break;
4172 	case dp_nss_cfg_first_radio:
4173 		/*
4174 		 * This configuration is valid for single band radio which
4175 		 * is also NSS offload.
4176 		 */
4177 	case dp_nss_cfg_dbdc:
4178 	case dp_nss_cfg_dbtc:
4179 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4180 		break;
4181 	default:
4182 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4183 		break;
4184 	}
4185 }
4186 
4187 #ifdef ENABLE_VERBOSE_DEBUG
4188 static void dp_enable_verbose_debug(struct dp_soc *soc)
4189 {
4190 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4191 
4192 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4193 
4194 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4195 		is_dp_verbose_debug_enabled = true;
4196 
4197 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4198 		hal_set_verbose_debug(true);
4199 	else
4200 		hal_set_verbose_debug(false);
4201 }
4202 #else
4203 static void dp_enable_verbose_debug(struct dp_soc *soc)
4204 {
4205 }
4206 #endif
4207 
4208 #ifdef WLAN_FEATURE_STATS_EXT
4209 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4210 {
4211 	qdf_event_create(&soc->rx_hw_stats_event);
4212 }
4213 #else
4214 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4215 {
4216 }
4217 #endif
4218 
4219 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4220 {
4221 	int tcl_ring_num, wbm_ring_num;
4222 
4223 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4224 						index,
4225 						&tcl_ring_num,
4226 						&wbm_ring_num);
4227 
4228 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4229 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4230 		return;
4231 	}
4232 
4233 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4234 			     soc->tcl_data_ring[index].alloc_size,
4235 			     soc->ctrl_psoc,
4236 			     WLAN_MD_DP_SRNG_TCL_DATA,
4237 			     "tcl_data_ring");
4238 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4239 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4240 		       tcl_ring_num);
4241 
4242 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4243 			     soc->tx_comp_ring[index].alloc_size,
4244 			     soc->ctrl_psoc,
4245 			     WLAN_MD_DP_SRNG_TX_COMP,
4246 			     "tcl_comp_ring");
4247 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4248 		       wbm_ring_num);
4249 }
4250 
4251 /**
4252  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4253  * ring pair
4254  * @soc: DP soc pointer
4255  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4256  *
4257  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4258  */
4259 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4260 						uint8_t index)
4261 {
4262 	int tcl_ring_num, wbm_ring_num;
4263 
4264 	if (index >= MAX_TCL_DATA_RINGS) {
4265 		dp_err("unexpected index!");
4266 		QDF_BUG(0);
4267 		goto fail1;
4268 	}
4269 
4270 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4271 						index,
4272 						&tcl_ring_num,
4273 						&wbm_ring_num);
4274 
4275 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4276 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4277 		goto fail1;
4278 	}
4279 
4280 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4281 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4282 			 tcl_ring_num, 0)) {
4283 		dp_err("dp_srng_init failed for tcl_data_ring");
4284 		goto fail1;
4285 	}
4286 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4287 			  soc->tcl_data_ring[index].alloc_size,
4288 			  soc->ctrl_psoc,
4289 			  WLAN_MD_DP_SRNG_TCL_DATA,
4290 			  "tcl_data_ring");
4291 
4292 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4293 			 wbm_ring_num, 0)) {
4294 		dp_err("dp_srng_init failed for tx_comp_ring");
4295 		goto fail1;
4296 	}
4297 
4298 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4299 			  soc->tx_comp_ring[index].alloc_size,
4300 			  soc->ctrl_psoc,
4301 			  WLAN_MD_DP_SRNG_TX_COMP,
4302 			  "tcl_comp_ring");
4303 
4304 	return QDF_STATUS_SUCCESS;
4305 
4306 fail1:
4307 	return QDF_STATUS_E_FAILURE;
4308 }
4309 
4310 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4311 {
4312 	dp_debug("index %u", index);
4313 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4314 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4315 }
4316 
4317 /**
4318  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4319  * ring pair for the given "index"
4320  * @soc: DP soc pointer
4321  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4322  *
4323  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4324  */
4325 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4326 						 uint8_t index)
4327 {
4328 	int tx_ring_size;
4329 	int tx_comp_ring_size;
4330 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4331 	int cached = 0;
4332 
4333 	if (index >= MAX_TCL_DATA_RINGS) {
4334 		dp_err("unexpected index!");
4335 		QDF_BUG(0);
4336 		goto fail1;
4337 	}
4338 
4339 	dp_debug("index %u", index);
4340 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4341 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4342 
4343 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4344 			  tx_ring_size, cached)) {
4345 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4346 		goto fail1;
4347 	}
4348 
4349 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4350 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4351 	/* Enable cached TCL desc if NSS offload is disabled */
4352 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4353 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4354 
4355 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4356 			  tx_comp_ring_size, cached)) {
4357 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4358 		goto fail1;
4359 	}
4360 
4361 	return QDF_STATUS_SUCCESS;
4362 
4363 fail1:
4364 	return QDF_STATUS_E_FAILURE;
4365 }
4366 
4367 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4368 {
4369 	struct cdp_lro_hash_config lro_hash;
4370 	QDF_STATUS status;
4371 
4372 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4373 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4374 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4375 		dp_err("LRO, GRO and RX hash disabled");
4376 		return QDF_STATUS_E_FAILURE;
4377 	}
4378 
4379 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4380 
4381 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4382 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4383 		lro_hash.lro_enable = 1;
4384 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4385 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4386 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4387 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4388 	}
4389 
4390 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
4391 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4392 			      LRO_IPV4_SEED_ARR_SZ));
4393 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
4394 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4395 			      LRO_IPV6_SEED_ARR_SZ));
4396 
4397 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4398 
4399 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
4400 		QDF_BUG(0);
4401 		dp_err("lro_hash_config not configured");
4402 		return QDF_STATUS_E_FAILURE;
4403 	}
4404 
4405 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
4406 						      pdev->pdev_id,
4407 						      &lro_hash);
4408 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4409 		dp_err("failed to send lro_hash_config to FW %u", status);
4410 		return status;
4411 	}
4412 
4413 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
4414 		lro_hash.lro_enable, lro_hash.tcp_flag,
4415 		lro_hash.tcp_flag_mask);
4416 
4417 	dp_info("toeplitz_hash_ipv4:");
4418 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4419 			   lro_hash.toeplitz_hash_ipv4,
4420 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4421 			   LRO_IPV4_SEED_ARR_SZ));
4422 
4423 	dp_info("toeplitz_hash_ipv6:");
4424 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4425 			   lro_hash.toeplitz_hash_ipv6,
4426 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4427 			   LRO_IPV6_SEED_ARR_SZ));
4428 
4429 	return status;
4430 }
4431 
4432 /*
4433  * dp_rxdma_ring_setup() - configure the RX DMA rings
4434  * @soc: data path SoC handle
4435  * @pdev: Physical device handle
4436  *
4437  * Return: 0 - success, > 0 - failure
4438  */
4439 #ifdef QCA_HOST2FW_RXBUF_RING
4440 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4441 {
4442 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4443 	int max_mac_rings;
4444 	int i;
4445 	int ring_size;
4446 
4447 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4448 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4449 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
4450 
4451 	for (i = 0; i < max_mac_rings; i++) {
4452 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4453 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
4454 				  RXDMA_BUF, ring_size, 0)) {
4455 			dp_init_err("%pK: failed rx mac ring setup", soc);
4456 			return QDF_STATUS_E_FAILURE;
4457 		}
4458 
4459 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
4460 				 RXDMA_BUF, 1, i)) {
4461 			dp_init_err("%pK: failed rx mac ring setup", soc);
4462 
4463 			dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4464 			return QDF_STATUS_E_FAILURE;
4465 		}
4466 	}
4467 	return QDF_STATUS_SUCCESS;
4468 }
4469 #else
4470 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4471 {
4472 	return QDF_STATUS_SUCCESS;
4473 }
4474 #endif
4475 
4476 /**
4477  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
4478  * @pdev - DP_PDEV handle
4479  *
4480  * Return: void
4481  */
4482 static inline void
4483 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
4484 {
4485 	uint8_t map_id;
4486 	struct dp_soc *soc = pdev->soc;
4487 
4488 	if (!soc)
4489 		return;
4490 
4491 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
4492 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
4493 			     default_dscp_tid_map,
4494 			     sizeof(default_dscp_tid_map));
4495 	}
4496 
4497 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
4498 		hal_tx_set_dscp_tid_map(soc->hal_soc,
4499 					default_dscp_tid_map,
4500 					map_id);
4501 	}
4502 }
4503 
4504 /**
4505  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
4506  * @pdev - DP_PDEV handle
4507  *
4508  * Return: void
4509  */
4510 static inline void
4511 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
4512 {
4513 	struct dp_soc *soc = pdev->soc;
4514 
4515 	if (!soc)
4516 		return;
4517 
4518 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
4519 		     sizeof(default_pcp_tid_map));
4520 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
4521 }
4522 
4523 #ifdef IPA_OFFLOAD
4524 /**
4525  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
4526  * @soc: data path instance
4527  * @pdev: core txrx pdev context
4528  *
4529  * Return: QDF_STATUS_SUCCESS: success
4530  *         QDF_STATUS_E_RESOURCES: Error return
4531  */
4532 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4533 					   struct dp_pdev *pdev)
4534 {
4535 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4536 	int entries;
4537 
4538 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4539 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
4540 
4541 	/* Setup second Rx refill buffer ring */
4542 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4543 			  entries, 0)) {
4544 		dp_init_err("%pK: dp_srng_alloc failed second rx refill ring", soc);
4545 		return QDF_STATUS_E_FAILURE;
4546 	}
4547 
4548 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4549 			 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
4550 		dp_init_err("%pK: dp_srng_init failed second rx refill ring", soc);
4551 		return QDF_STATUS_E_FAILURE;
4552 	}
4553 
4554 	return QDF_STATUS_SUCCESS;
4555 }
4556 
4557 /**
4558  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
4559  * @soc: data path instance
4560  * @pdev: core txrx pdev context
4561  *
4562  * Return: void
4563  */
4564 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4565 					      struct dp_pdev *pdev)
4566 {
4567 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
4568 	dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
4569 }
4570 
4571 #else
4572 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4573 					   struct dp_pdev *pdev)
4574 {
4575 	return QDF_STATUS_SUCCESS;
4576 }
4577 
4578 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4579 					      struct dp_pdev *pdev)
4580 {
4581 }
4582 #endif
4583 
4584 #ifdef DP_TX_HW_DESC_HISTORY
4585 /**
4586  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
4587  *
4588  * @soc: DP soc handle
4589  *
4590  * Return: None
4591  */
4592 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4593 {
4594 	soc->tx_hw_desc_history = dp_context_alloc_mem(
4595 			soc, DP_TX_HW_DESC_HIST_TYPE,
4596 			sizeof(*soc->tx_hw_desc_history));
4597 	if (soc->tx_hw_desc_history)
4598 		soc->tx_hw_desc_history->index = 0;
4599 }
4600 
4601 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4602 {
4603 	dp_context_free_mem(soc, DP_TX_HW_DESC_HIST_TYPE,
4604 			    soc->tx_hw_desc_history);
4605 }
4606 
4607 #else /* DP_TX_HW_DESC_HISTORY */
4608 static inline void
4609 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4610 {
4611 }
4612 
4613 static inline void
4614 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4615 {
4616 }
4617 #endif /* DP_TX_HW_DESC_HISTORY */
4618 
4619 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
4620 #ifndef RX_DEFRAG_DO_NOT_REINJECT
4621 /**
4622  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
4623  *					    history.
4624  * @soc: DP soc handle
4625  *
4626  * Return: None
4627  */
4628 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
4629 {
4630 	soc->rx_reinject_ring_history =
4631 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
4632 				     sizeof(struct dp_rx_reinject_history));
4633 	if (soc->rx_reinject_ring_history)
4634 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
4635 }
4636 #else /* RX_DEFRAG_DO_NOT_REINJECT */
4637 static inline void
4638 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
4639 {
4640 }
4641 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
4642 
4643 /**
4644  * dp_soc_rx_history_attach() - Attach the ring history record buffers
4645  * @soc: DP soc structure
4646  *
4647  * This function allocates the memory for recording the rx ring, rx error
4648  * ring and the reinject ring entries. There is no error returned in case
4649  * of allocation failure since the record function checks if the history is
4650  * initialized or not. We do not want to fail the driver load in case of
4651  * failure to allocate memory for debug history.
4652  *
4653  * Returns: None
4654  */
4655 static void dp_soc_rx_history_attach(struct dp_soc *soc)
4656 {
4657 	int i;
4658 	uint32_t rx_ring_hist_size;
4659 	uint32_t rx_refill_ring_hist_size;
4660 
4661 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
4662 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
4663 
4664 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
4665 		soc->rx_ring_history[i] = dp_context_alloc_mem(
4666 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
4667 		if (soc->rx_ring_history[i])
4668 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
4669 	}
4670 
4671 	soc->rx_err_ring_history = dp_context_alloc_mem(
4672 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
4673 	if (soc->rx_err_ring_history)
4674 		qdf_atomic_init(&soc->rx_err_ring_history->index);
4675 
4676 	dp_soc_rx_reinject_ring_history_attach(soc);
4677 
4678 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4679 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
4680 						soc,
4681 						DP_RX_REFILL_RING_HIST_TYPE,
4682 						rx_refill_ring_hist_size);
4683 
4684 		if (soc->rx_refill_ring_history[i])
4685 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
4686 	}
4687 }
4688 
4689 static void dp_soc_rx_history_detach(struct dp_soc *soc)
4690 {
4691 	int i;
4692 
4693 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
4694 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
4695 				    soc->rx_ring_history[i]);
4696 
4697 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
4698 			    soc->rx_err_ring_history);
4699 
4700 	/*
4701 	 * No need for a featurized detach since qdf_mem_free takes
4702 	 * care of NULL pointer.
4703 	 */
4704 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
4705 			    soc->rx_reinject_ring_history);
4706 
4707 	for (i = 0; i < MAX_PDEV_CNT; i++)
4708 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
4709 				    soc->rx_refill_ring_history[i]);
4710 }
4711 
4712 #else
4713 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
4714 {
4715 }
4716 
4717 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
4718 {
4719 }
4720 #endif
4721 
4722 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
4723 /**
4724  * dp_soc_tx_history_attach() - Attach the ring history record buffers
4725  * @soc: DP soc structure
4726  *
4727  * This function allocates the memory for recording the tx tcl ring and
4728  * the tx comp ring entries. There is no error returned in case
4729  * of allocation failure since the record function checks if the history is
4730  * initialized or not. We do not want to fail the driver load in case of
4731  * failure to allocate memory for debug history.
4732  *
4733  * Returns: None
4734  */
4735 static void dp_soc_tx_history_attach(struct dp_soc *soc)
4736 {
4737 	uint32_t tx_tcl_hist_size;
4738 	uint32_t tx_comp_hist_size;
4739 
4740 	tx_tcl_hist_size = sizeof(*soc->tx_tcl_history);
4741 	soc->tx_tcl_history = dp_context_alloc_mem(soc, DP_TX_TCL_HIST_TYPE,
4742 						   tx_tcl_hist_size);
4743 	if (soc->tx_tcl_history)
4744 		qdf_atomic_init(&soc->tx_tcl_history->index);
4745 
4746 	tx_comp_hist_size = sizeof(*soc->tx_comp_history);
4747 	soc->tx_comp_history = dp_context_alloc_mem(soc, DP_TX_COMP_HIST_TYPE,
4748 						    tx_comp_hist_size);
4749 	if (soc->tx_comp_history)
4750 		qdf_atomic_init(&soc->tx_comp_history->index);
4751 }
4752 
4753 /**
4754  * dp_soc_tx_history_detach() - Detach the ring history record buffers
4755  * @soc: DP soc structure
4756  *
4757  * This function frees the memory for recording the tx tcl ring and
4758  * the tx comp ring entries.
4759  *
4760  * Returns: None
4761  */
4762 static void dp_soc_tx_history_detach(struct dp_soc *soc)
4763 {
4764 	dp_context_free_mem(soc, DP_TX_TCL_HIST_TYPE, soc->tx_tcl_history);
4765 	dp_context_free_mem(soc, DP_TX_COMP_HIST_TYPE, soc->tx_comp_history);
4766 }
4767 
4768 #else
4769 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
4770 {
4771 }
4772 
4773 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
4774 {
4775 }
4776 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
4777 
4778 /*
4779 * dp_pdev_attach_wifi3() - attach txrx pdev
4780 * @txrx_soc: Datapath SOC handle
4781 * @htc_handle: HTC handle for host-target interface
4782 * @qdf_osdev: QDF OS device
4783 * @pdev_id: PDEV ID
4784 *
4785 * Return: QDF_STATUS
4786 */
4787 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
4788 					      HTC_HANDLE htc_handle,
4789 					      qdf_device_t qdf_osdev,
4790 					      uint8_t pdev_id)
4791 {
4792 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4793 	struct dp_pdev *pdev = NULL;
4794 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4795 	int nss_cfg;
4796 
4797 	pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, sizeof(*pdev));
4798 	if (!pdev) {
4799 		dp_init_err("%pK: DP PDEV memory allocation failed",
4800 			    soc);
4801 		goto fail0;
4802 	}
4803 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
4804 			  WLAN_MD_DP_PDEV, "dp_pdev");
4805 
4806 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4807 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
4808 
4809 	if (!pdev->wlan_cfg_ctx) {
4810 		dp_init_err("%pK: pdev cfg_attach failed", soc);
4811 		goto fail1;
4812 	}
4813 
4814 	/*
4815 	 * set nss pdev config based on soc config
4816 	 */
4817 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
4818 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
4819 					 (nss_cfg & (1 << pdev_id)));
4820 
4821 	pdev->soc = soc;
4822 	pdev->pdev_id = pdev_id;
4823 	soc->pdev_list[pdev_id] = pdev;
4824 
4825 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
4826 	soc->pdev_count++;
4827 
4828 	/* Allocate memory for pdev srng rings */
4829 	if (dp_pdev_srng_alloc(pdev)) {
4830 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
4831 		goto fail2;
4832 	}
4833 
4834 	/* Rx specific init */
4835 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
4836 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
4837 		goto fail3;
4838 	}
4839 
4840 	if (dp_monitor_pdev_attach(pdev)) {
4841 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
4842 		goto fail4;
4843 	}
4844 
4845 	return QDF_STATUS_SUCCESS;
4846 fail4:
4847 	dp_rx_pdev_desc_pool_free(pdev);
4848 fail3:
4849 	dp_pdev_srng_free(pdev);
4850 fail2:
4851 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4852 fail1:
4853 	soc->pdev_list[pdev_id] = NULL;
4854 	qdf_mem_free(pdev);
4855 fail0:
4856 	return QDF_STATUS_E_FAILURE;
4857 }
4858 
4859 /*
4860  * dp_rxdma_ring_cleanup() - configure the RX DMA rings
4861  * @soc: data path SoC handle
4862  * @pdev: Physical device handle
4863  *
4864  * Return: void
4865  */
4866 #ifdef QCA_HOST2FW_RXBUF_RING
4867 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4868 {
4869 	int i;
4870 
4871 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
4872 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4873 		dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4874 	}
4875 
4876 	dp_monitor_reap_timer_deinit(soc);
4877 }
4878 #else
4879 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4880 {
4881 	if (soc->lmac_timer_init) {
4882 		qdf_timer_stop(&soc->lmac_reap_timer);
4883 		qdf_timer_free(&soc->lmac_reap_timer);
4884 		soc->lmac_timer_init = 0;
4885 	}
4886 }
4887 #endif
4888 
4889 #ifdef WLAN_DP_PENDING_MEM_FLUSH
4890 /**
4891  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
4892  * @pdev: Datapath PDEV handle
4893  *
4894  * This is the last chance to flush all pending dp vdevs/peers,
4895  * some peer/vdev leak case like Non-SSR + peer unmap missing
4896  * will be covered here.
4897  *
4898  * Return: None
4899  */
4900 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4901 {
4902 	struct dp_vdev *vdev = NULL;
4903 	struct dp_soc *soc = pdev->soc;
4904 
4905 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
4906 		return;
4907 
4908 	while (true) {
4909 		qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
4910 		TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
4911 			      inactive_list_elem) {
4912 			if (vdev->pdev == pdev)
4913 				break;
4914 		}
4915 		qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
4916 
4917 		/* vdev will be freed when all peers get cleanup */
4918 		if (vdev)
4919 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
4920 		else
4921 			break;
4922 	}
4923 }
4924 #else
4925 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4926 {
4927 }
4928 #endif
4929 
4930 /**
4931  * dp_pdev_deinit() - Deinit txrx pdev
4932  * @txrx_pdev: Datapath PDEV handle
4933  * @force: Force deinit
4934  *
4935  * Return: None
4936  */
4937 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
4938 {
4939 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4940 	qdf_nbuf_t curr_nbuf, next_nbuf;
4941 
4942 	if (pdev->pdev_deinit)
4943 		return;
4944 
4945 	dp_tx_me_exit(pdev);
4946 	dp_rx_fst_detach(pdev->soc, pdev);
4947 	dp_rx_pdev_buffers_free(pdev);
4948 	dp_rx_pdev_desc_pool_deinit(pdev);
4949 	dp_pdev_bkp_stats_detach(pdev);
4950 	qdf_event_destroy(&pdev->fw_peer_stats_event);
4951 	if (pdev->sojourn_buf)
4952 		qdf_nbuf_free(pdev->sojourn_buf);
4953 
4954 	dp_pdev_flush_pending_vdevs(pdev);
4955 	dp_tx_desc_flush(pdev, NULL, true);
4956 
4957 	qdf_spinlock_destroy(&pdev->tx_mutex);
4958 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
4959 
4960 	if (pdev->invalid_peer)
4961 		qdf_mem_free(pdev->invalid_peer);
4962 
4963 	dp_monitor_pdev_deinit(pdev);
4964 
4965 	dp_pdev_srng_deinit(pdev);
4966 
4967 	dp_ipa_uc_detach(pdev->soc, pdev);
4968 	dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev);
4969 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
4970 
4971 	curr_nbuf = pdev->invalid_peer_head_msdu;
4972 	while (curr_nbuf) {
4973 		next_nbuf = qdf_nbuf_next(curr_nbuf);
4974 		qdf_nbuf_free(curr_nbuf);
4975 		curr_nbuf = next_nbuf;
4976 	}
4977 	pdev->invalid_peer_head_msdu = NULL;
4978 	pdev->invalid_peer_tail_msdu = NULL;
4979 
4980 	dp_wdi_event_detach(pdev);
4981 	pdev->pdev_deinit = 1;
4982 }
4983 
4984 /**
4985  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4986  * @psoc: Datapath psoc handle
4987  * @pdev_id: Id of datapath PDEV handle
4988  * @force: Force deinit
4989  *
4990  * Return: QDF_STATUS
4991  */
4992 static QDF_STATUS
4993 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4994 		     int force)
4995 {
4996 	struct dp_pdev *txrx_pdev;
4997 
4998 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4999 						       pdev_id);
5000 
5001 	if (!txrx_pdev)
5002 		return QDF_STATUS_E_FAILURE;
5003 
5004 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5005 
5006 	return QDF_STATUS_SUCCESS;
5007 }
5008 
5009 /*
5010  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5011  * @txrx_pdev: Datapath PDEV handle
5012  *
5013  * Return: None
5014  */
5015 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5016 {
5017 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5018 
5019 	dp_monitor_tx_capture_debugfs_init(pdev);
5020 
5021 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5022 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
5023 	}
5024 }
5025 
5026 /*
5027  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
5028  * @psoc: Datapath soc handle
5029  * @pdev_id: pdev id of pdev
5030  *
5031  * Return: QDF_STATUS
5032  */
5033 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
5034 				     uint8_t pdev_id)
5035 {
5036 	struct dp_pdev *pdev;
5037 
5038 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5039 						  pdev_id);
5040 
5041 	if (!pdev) {
5042 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5043 			    (struct dp_soc *)soc, pdev_id);
5044 		return QDF_STATUS_E_FAILURE;
5045 	}
5046 
5047 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
5048 	return QDF_STATUS_SUCCESS;
5049 }
5050 
5051 /*
5052  * dp_pdev_detach() - Complete rest of pdev detach
5053  * @txrx_pdev: Datapath PDEV handle
5054  * @force: Force deinit
5055  *
5056  * Return: None
5057  */
5058 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
5059 {
5060 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5061 	struct dp_soc *soc = pdev->soc;
5062 
5063 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
5064 	dp_rx_pdev_desc_pool_free(pdev);
5065 	dp_monitor_pdev_detach(pdev);
5066 	dp_pdev_srng_free(pdev);
5067 
5068 	soc->pdev_count--;
5069 	soc->pdev_list[pdev->pdev_id] = NULL;
5070 
5071 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5072 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
5073 			     WLAN_MD_DP_PDEV, "dp_pdev");
5074 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
5075 }
5076 
5077 /*
5078  * dp_pdev_detach_wifi3() - detach txrx pdev
5079  * @psoc: Datapath soc handle
5080  * @pdev_id: pdev id of pdev
5081  * @force: Force detach
5082  *
5083  * Return: QDF_STATUS
5084  */
5085 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5086 				       int force)
5087 {
5088 	struct dp_pdev *pdev;
5089 
5090 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5091 						  pdev_id);
5092 
5093 	if (!pdev) {
5094 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5095 			    (struct dp_soc *)psoc, pdev_id);
5096 		return QDF_STATUS_E_FAILURE;
5097 	}
5098 
5099 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
5100 	return QDF_STATUS_SUCCESS;
5101 }
5102 
5103 /*
5104  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
5105  * @soc: DP SOC handle
5106  */
5107 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
5108 {
5109 	struct reo_desc_list_node *desc;
5110 	struct dp_rx_tid *rx_tid;
5111 
5112 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
5113 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
5114 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5115 		rx_tid = &desc->rx_tid;
5116 		qdf_mem_unmap_nbytes_single(soc->osdev,
5117 			rx_tid->hw_qdesc_paddr,
5118 			QDF_DMA_BIDIRECTIONAL,
5119 			rx_tid->hw_qdesc_alloc_size);
5120 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
5121 		qdf_mem_free(desc);
5122 	}
5123 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
5124 	qdf_list_destroy(&soc->reo_desc_freelist);
5125 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
5126 }
5127 
5128 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
5129 /*
5130  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
5131  *                                          for deferred reo desc list
5132  * @psoc: Datapath soc handle
5133  *
5134  * Return: void
5135  */
5136 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5137 {
5138 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
5139 	qdf_list_create(&soc->reo_desc_deferred_freelist,
5140 			REO_DESC_DEFERRED_FREELIST_SIZE);
5141 	soc->reo_desc_deferred_freelist_init = true;
5142 }
5143 
5144 /*
5145  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
5146  *                                           free the leftover REO QDESCs
5147  * @psoc: Datapath soc handle
5148  *
5149  * Return: void
5150  */
5151 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5152 {
5153 	struct reo_desc_deferred_freelist_node *desc;
5154 
5155 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
5156 	soc->reo_desc_deferred_freelist_init = false;
5157 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
5158 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5159 		qdf_mem_unmap_nbytes_single(soc->osdev,
5160 					    desc->hw_qdesc_paddr,
5161 					    QDF_DMA_BIDIRECTIONAL,
5162 					    desc->hw_qdesc_alloc_size);
5163 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
5164 		qdf_mem_free(desc);
5165 	}
5166 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
5167 
5168 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
5169 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
5170 }
5171 #else
5172 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5173 {
5174 }
5175 
5176 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5177 {
5178 }
5179 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
5180 
5181 /*
5182  * dp_soc_reset_txrx_ring_map() - reset tx ring map
5183  * @soc: DP SOC handle
5184  *
5185  */
5186 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
5187 {
5188 	uint32_t i;
5189 
5190 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
5191 		soc->tx_ring_map[i] = 0;
5192 }
5193 
5194 /*
5195  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
5196  * @soc: DP SOC handle
5197  *
5198  */
5199 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
5200 {
5201 	struct dp_peer *peer = NULL;
5202 	struct dp_peer *tmp_peer = NULL;
5203 	struct dp_vdev *vdev = NULL;
5204 	struct dp_vdev *tmp_vdev = NULL;
5205 	int i = 0;
5206 	uint32_t count;
5207 
5208 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
5209 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
5210 		return;
5211 
5212 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
5213 			   inactive_list_elem, tmp_peer) {
5214 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5215 			count = qdf_atomic_read(&peer->mod_refs[i]);
5216 			if (count)
5217 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
5218 					       peer, i, count);
5219 		}
5220 	}
5221 
5222 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
5223 			   inactive_list_elem, tmp_vdev) {
5224 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5225 			count = qdf_atomic_read(&vdev->mod_refs[i]);
5226 			if (count)
5227 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
5228 					       vdev, i, count);
5229 		}
5230 	}
5231 	QDF_BUG(0);
5232 }
5233 
5234 /**
5235  * dp_soc_deinit() - Deinitialize txrx SOC
5236  * @txrx_soc: Opaque DP SOC handle
5237  *
5238  * Return: None
5239  */
5240 static void dp_soc_deinit(void *txrx_soc)
5241 {
5242 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5243 	struct htt_soc *htt_soc = soc->htt_handle;
5244 
5245 	qdf_atomic_set(&soc->cmn_init_done, 0);
5246 
5247 	soc->arch_ops.txrx_soc_deinit(soc);
5248 
5249 	/* free peer tables & AST tables allocated during peer_map_attach */
5250 	if (soc->peer_map_attach_success) {
5251 		dp_peer_find_detach(soc);
5252 		soc->peer_map_attach_success = FALSE;
5253 	}
5254 
5255 	qdf_flush_work(&soc->htt_stats.work);
5256 	qdf_disable_work(&soc->htt_stats.work);
5257 
5258 	qdf_spinlock_destroy(&soc->htt_stats.lock);
5259 
5260 	dp_soc_reset_txrx_ring_map(soc);
5261 
5262 	dp_reo_desc_freelist_destroy(soc);
5263 	dp_reo_desc_deferred_freelist_destroy(soc);
5264 
5265 	DEINIT_RX_HW_STATS_LOCK(soc);
5266 
5267 	qdf_spinlock_destroy(&soc->ast_lock);
5268 
5269 	dp_peer_mec_spinlock_destroy(soc);
5270 
5271 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
5272 
5273 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
5274 
5275 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
5276 
5277 	qdf_spinlock_destroy(&soc->vdev_map_lock);
5278 
5279 	dp_reo_cmdlist_destroy(soc);
5280 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
5281 
5282 	dp_soc_tx_desc_sw_pools_deinit(soc);
5283 
5284 	dp_soc_srng_deinit(soc);
5285 
5286 	dp_hw_link_desc_ring_deinit(soc);
5287 
5288 	dp_soc_print_inactive_objects(soc);
5289 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
5290 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
5291 
5292 	htt_soc_htc_dealloc(soc->htt_handle);
5293 
5294 	htt_soc_detach(htt_soc);
5295 
5296 	/* Free wbm sg list and reset flags in down path */
5297 	dp_rx_wbm_sg_list_deinit(soc);
5298 
5299 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
5300 			     WLAN_MD_DP_SOC, "dp_soc");
5301 }
5302 
5303 /**
5304  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
5305  * @txrx_soc: Opaque DP SOC handle
5306  *
5307  * Return: None
5308  */
5309 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
5310 {
5311 	dp_soc_deinit(txrx_soc);
5312 }
5313 
5314 /*
5315  * dp_soc_detach() - Detach rest of txrx SOC
5316  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5317  *
5318  * Return: None
5319  */
5320 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
5321 {
5322 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5323 
5324 	soc->arch_ops.txrx_soc_detach(soc);
5325 
5326 	dp_soc_swlm_detach(soc);
5327 	dp_soc_tx_desc_sw_pools_free(soc);
5328 	dp_soc_srng_free(soc);
5329 	dp_hw_link_desc_ring_free(soc);
5330 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
5331 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
5332 	dp_soc_tx_hw_desc_history_detach(soc);
5333 	dp_soc_tx_history_detach(soc);
5334 	dp_soc_rx_history_detach(soc);
5335 
5336 	if (!dp_monitor_modularized_enable()) {
5337 		dp_mon_soc_detach_wrapper(soc);
5338 	}
5339 
5340 	qdf_mem_free(soc);
5341 }
5342 
5343 /*
5344  * dp_soc_detach_wifi3() - Detach txrx SOC
5345  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5346  *
5347  * Return: None
5348  */
5349 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
5350 {
5351 	dp_soc_detach(txrx_soc);
5352 }
5353 
5354 /*
5355  * dp_rxdma_ring_config() - configure the RX DMA rings
5356  *
5357  * This function is used to configure the MAC rings.
5358  * On MCL host provides buffers in Host2FW ring
5359  * FW refills (copies) buffers to the ring and updates
5360  * ring_idx in register
5361  *
5362  * @soc: data path SoC handle
5363  *
5364  * Return: zero on success, non-zero on failure
5365  */
5366 #ifdef QCA_HOST2FW_RXBUF_RING
5367 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5368 {
5369 	int i;
5370 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5371 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5372 		struct dp_pdev *pdev = soc->pdev_list[i];
5373 
5374 		if (pdev) {
5375 			int mac_id;
5376 			bool dbs_enable = 0;
5377 			int max_mac_rings =
5378 				 wlan_cfg_get_num_mac_rings
5379 				(pdev->wlan_cfg_ctx);
5380 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5381 
5382 			htt_srng_setup(soc->htt_handle, 0,
5383 				       soc->rx_refill_buf_ring[lmac_id]
5384 				       .hal_srng,
5385 				       RXDMA_BUF);
5386 
5387 			if (pdev->rx_refill_buf_ring2.hal_srng)
5388 				htt_srng_setup(soc->htt_handle, 0,
5389 					pdev->rx_refill_buf_ring2.hal_srng,
5390 					RXDMA_BUF);
5391 
5392 			if (soc->cdp_soc.ol_ops->
5393 				is_hw_dbs_2x2_capable) {
5394 				dbs_enable = soc->cdp_soc.ol_ops->
5395 					is_hw_dbs_2x2_capable(
5396 							(void *)soc->ctrl_psoc);
5397 			}
5398 
5399 			if (dbs_enable) {
5400 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5401 				QDF_TRACE_LEVEL_ERROR,
5402 				FL("DBS enabled max_mac_rings %d"),
5403 					 max_mac_rings);
5404 			} else {
5405 				max_mac_rings = 1;
5406 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5407 					 QDF_TRACE_LEVEL_ERROR,
5408 					 FL("DBS disabled, max_mac_rings %d"),
5409 					 max_mac_rings);
5410 			}
5411 
5412 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5413 					 FL("pdev_id %d max_mac_rings %d"),
5414 					 pdev->pdev_id, max_mac_rings);
5415 
5416 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
5417 				int mac_for_pdev =
5418 					dp_get_mac_id_for_pdev(mac_id,
5419 							       pdev->pdev_id);
5420 				/*
5421 				 * Obtain lmac id from pdev to access the LMAC
5422 				 * ring in soc context
5423 				 */
5424 				lmac_id =
5425 				dp_get_lmac_id_for_pdev_id(soc,
5426 							   mac_id,
5427 							   pdev->pdev_id);
5428 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5429 					 QDF_TRACE_LEVEL_ERROR,
5430 					 FL("mac_id %d"), mac_for_pdev);
5431 
5432 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5433 					 pdev->rx_mac_buf_ring[mac_id]
5434 						.hal_srng,
5435 					 RXDMA_BUF);
5436 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5437 				soc->rxdma_err_dst_ring[lmac_id]
5438 					.hal_srng,
5439 					RXDMA_DST);
5440 
5441 				/* Configure monitor mode rings */
5442 				status = dp_monitor_htt_srng_setup(soc, pdev,
5443 								   lmac_id,
5444 								   mac_for_pdev);
5445 				if (status != QDF_STATUS_SUCCESS) {
5446 					dp_err("Failed to send htt monitor messages to target");
5447 					return status;
5448 				}
5449 
5450 			}
5451 		}
5452 	}
5453 
5454 	/*
5455 	 * Timer to reap rxdma status rings.
5456 	 * Needed until we enable ppdu end interrupts
5457 	 */
5458 	dp_monitor_reap_timer_init(soc);
5459 	dp_monitor_vdev_timer_init(soc);
5460 	return status;
5461 }
5462 #else
5463 /* This is only for WIN */
5464 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5465 {
5466 	int i;
5467 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5468 	int mac_for_pdev;
5469 	int lmac_id;
5470 
5471 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5472 		struct dp_pdev *pdev =  soc->pdev_list[i];
5473 
5474 		if (!pdev)
5475 			continue;
5476 
5477 		mac_for_pdev = i;
5478 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5479 
5480 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
5481 			       soc->rx_refill_buf_ring[lmac_id].
5482 			       hal_srng, RXDMA_BUF);
5483 		/* Configure monitor mode rings */
5484 		dp_monitor_htt_srng_setup(soc, pdev,
5485 					  lmac_id,
5486 					  mac_for_pdev);
5487 		if (!soc->rxdma2sw_rings_not_supported)
5488 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5489 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
5490 				       RXDMA_DST);
5491 	}
5492 
5493 	/* Configure LMAC rings in Polled mode */
5494 	if (soc->lmac_polled_mode) {
5495 		/*
5496 		 * Timer to reap lmac rings.
5497 		 */
5498 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5499 			       dp_service_lmac_rings, (void *)soc,
5500 			       QDF_TIMER_TYPE_WAKE_APPS);
5501 		soc->lmac_timer_init = 1;
5502 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5503 	}
5504 	return status;
5505 }
5506 #endif
5507 
5508 /*
5509  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
5510  *
5511  * This function is used to configure the FSE HW block in RX OLE on a
5512  * per pdev basis. Here, we will be programming parameters related to
5513  * the Flow Search Table.
5514  *
5515  * @soc: data path SoC handle
5516  *
5517  * Return: zero on success, non-zero on failure
5518  */
5519 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5520 static QDF_STATUS
5521 dp_rx_target_fst_config(struct dp_soc *soc)
5522 {
5523 	int i;
5524 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5525 
5526 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5527 		struct dp_pdev *pdev = soc->pdev_list[i];
5528 
5529 		/* Flow search is not enabled if NSS offload is enabled */
5530 		if (pdev &&
5531 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5532 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
5533 			if (status != QDF_STATUS_SUCCESS)
5534 				break;
5535 		}
5536 	}
5537 	return status;
5538 }
5539 #elif defined(WLAN_SUPPORT_RX_FISA)
5540 /**
5541  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
5542  * @soc: SoC handle
5543  *
5544  * Return: Success
5545  */
5546 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5547 {
5548 	/* Check if it is enabled in the INI */
5549 	if (!soc->fisa_enable) {
5550 		dp_err("RX FISA feature is disabled");
5551 		return QDF_STATUS_E_NOSUPPORT;
5552 	}
5553 
5554 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
5555 }
5556 
5557 #define FISA_MAX_TIMEOUT 0xffffffff
5558 #define FISA_DISABLE_TIMEOUT 0
5559 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5560 {
5561 	struct dp_htt_rx_fisa_cfg fisa_config;
5562 
5563 	fisa_config.pdev_id = 0;
5564 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
5565 
5566 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
5567 }
5568 #else /* !WLAN_SUPPORT_RX_FISA */
5569 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5570 {
5571 	return QDF_STATUS_SUCCESS;
5572 }
5573 #endif /* !WLAN_SUPPORT_RX_FISA */
5574 
5575 #ifndef WLAN_SUPPORT_RX_FISA
5576 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5577 {
5578 	return QDF_STATUS_SUCCESS;
5579 }
5580 
5581 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
5582 {
5583 	return QDF_STATUS_SUCCESS;
5584 }
5585 
5586 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
5587 {
5588 }
5589 
5590 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
5591 {
5592 }
5593 
5594 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
5595 {
5596 }
5597 #endif /* !WLAN_SUPPORT_RX_FISA */
5598 
5599 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
5600 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
5601 {
5602 	return QDF_STATUS_SUCCESS;
5603 }
5604 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
5605 
5606 /*
5607  * dp_soc_attach_target_wifi3() - SOC initialization in the target
5608  * @cdp_soc: Opaque Datapath SOC handle
5609  *
5610  * Return: zero on success, non-zero on failure
5611  */
5612 static QDF_STATUS
5613 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
5614 {
5615 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5616 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5617 
5618 	htt_soc_attach_target(soc->htt_handle);
5619 
5620 	status = dp_rxdma_ring_config(soc);
5621 	if (status != QDF_STATUS_SUCCESS) {
5622 		dp_err("Failed to send htt srng setup messages to target");
5623 		return status;
5624 	}
5625 
5626 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
5627 	if (status != QDF_STATUS_SUCCESS) {
5628 		dp_err("Failed to send htt ring config message to target");
5629 		return status;
5630 	}
5631 
5632 	status = dp_rx_target_fst_config(soc);
5633 	if (status != QDF_STATUS_SUCCESS &&
5634 	    status != QDF_STATUS_E_NOSUPPORT) {
5635 		dp_err("Failed to send htt fst setup config message to target");
5636 		return status;
5637 	}
5638 
5639 	if (status == QDF_STATUS_SUCCESS) {
5640 		status = dp_rx_fisa_config(soc);
5641 		if (status != QDF_STATUS_SUCCESS) {
5642 			dp_err("Failed to send htt FISA config message to target");
5643 			return status;
5644 		}
5645 	}
5646 
5647 	DP_STATS_INIT(soc);
5648 
5649 	dp_runtime_init(soc);
5650 
5651 	/* initialize work queue for stats processing */
5652 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
5653 
5654 	return QDF_STATUS_SUCCESS;
5655 }
5656 
5657 /*
5658  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
5659  * @soc: SoC handle
5660  * @vdev: vdev handle
5661  * @vdev_id: vdev_id
5662  *
5663  * Return: None
5664  */
5665 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
5666 				   struct dp_vdev *vdev,
5667 				   uint8_t vdev_id)
5668 {
5669 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
5670 
5671 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5672 
5673 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5674 			QDF_STATUS_SUCCESS) {
5675 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
5676 			     soc, vdev, vdev_id);
5677 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
5678 		return;
5679 	}
5680 
5681 	if (!soc->vdev_id_map[vdev_id])
5682 		soc->vdev_id_map[vdev_id] = vdev;
5683 	else
5684 		QDF_ASSERT(0);
5685 
5686 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5687 }
5688 
5689 /*
5690  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
5691  * @soc: SoC handle
5692  * @vdev: vdev handle
5693  *
5694  * Return: None
5695  */
5696 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
5697 				      struct dp_vdev *vdev)
5698 {
5699 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5700 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
5701 
5702 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5703 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5704 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5705 }
5706 
5707 /*
5708  * dp_vdev_pdev_list_add() - add vdev into pdev's list
5709  * @soc: soc handle
5710  * @pdev: pdev handle
5711  * @vdev: vdev handle
5712  *
5713  * return: none
5714  */
5715 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
5716 				  struct dp_pdev *pdev,
5717 				  struct dp_vdev *vdev)
5718 {
5719 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5720 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5721 			QDF_STATUS_SUCCESS) {
5722 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
5723 			     soc, vdev);
5724 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5725 		return;
5726 	}
5727 	/* add this vdev into the pdev's list */
5728 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
5729 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5730 }
5731 
5732 /*
5733  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
5734  * @soc: SoC handle
5735  * @pdev: pdev handle
5736  * @vdev: VDEV handle
5737  *
5738  * Return: none
5739  */
5740 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
5741 				     struct dp_pdev *pdev,
5742 				     struct dp_vdev *vdev)
5743 {
5744 	uint8_t found = 0;
5745 	struct dp_vdev *tmpvdev = NULL;
5746 
5747 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5748 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
5749 		if (tmpvdev == vdev) {
5750 			found = 1;
5751 			break;
5752 		}
5753 	}
5754 
5755 	if (found) {
5756 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5757 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5758 	} else {
5759 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
5760 			      soc, vdev, pdev, &pdev->vdev_list);
5761 		QDF_ASSERT(0);
5762 	}
5763 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5764 }
5765 
5766 /*
5767 * dp_vdev_attach_wifi3() - attach txrx vdev
5768 * @txrx_pdev: Datapath PDEV handle
5769 * @vdev_mac_addr: MAC address of the virtual interface
5770 * @vdev_id: VDEV Id
5771 * @wlan_op_mode: VDEV operating mode
5772 * @subtype: VDEV operating subtype
5773 *
5774 * Return: status
5775 */
5776 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
5777 				       uint8_t pdev_id,
5778 				       uint8_t *vdev_mac_addr,
5779 				       uint8_t vdev_id,
5780 				       enum wlan_op_mode op_mode,
5781 				       enum wlan_op_subtype subtype)
5782 {
5783 	int i = 0;
5784 	qdf_size_t vdev_context_size;
5785 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5786 	struct dp_pdev *pdev =
5787 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5788 						   pdev_id);
5789 	struct dp_vdev *vdev;
5790 
5791 	vdev_context_size =
5792 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
5793 	vdev = qdf_mem_malloc(vdev_context_size);
5794 
5795 	if (!pdev) {
5796 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5797 			    cdp_soc, pdev_id);
5798 		qdf_mem_free(vdev);
5799 		goto fail0;
5800 	}
5801 
5802 	if (!vdev) {
5803 		dp_init_err("%pK: DP VDEV memory allocation failed",
5804 			    cdp_soc);
5805 		goto fail0;
5806 	}
5807 
5808 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
5809 			  WLAN_MD_DP_VDEV, "dp_vdev");
5810 
5811 	vdev->pdev = pdev;
5812 	vdev->vdev_id = vdev_id;
5813 	vdev->opmode = op_mode;
5814 	vdev->subtype = subtype;
5815 	vdev->osdev = soc->osdev;
5816 
5817 	vdev->osif_rx = NULL;
5818 	vdev->osif_rsim_rx_decap = NULL;
5819 	vdev->osif_get_key = NULL;
5820 	vdev->osif_tx_free_ext = NULL;
5821 	vdev->osif_vdev = NULL;
5822 
5823 	vdev->delete.pending = 0;
5824 	vdev->safemode = 0;
5825 	vdev->drop_unenc = 1;
5826 	vdev->sec_type = cdp_sec_type_none;
5827 	vdev->multipass_en = false;
5828 	qdf_atomic_init(&vdev->ref_cnt);
5829 	for (i = 0; i < DP_MOD_ID_MAX; i++)
5830 		qdf_atomic_init(&vdev->mod_refs[i]);
5831 
5832 	/* Take one reference for create*/
5833 	qdf_atomic_inc(&vdev->ref_cnt);
5834 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
5835 	vdev->num_peers = 0;
5836 #ifdef notyet
5837 	vdev->filters_num = 0;
5838 #endif
5839 	vdev->lmac_id = pdev->lmac_id;
5840 
5841 	qdf_mem_copy(
5842 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
5843 
5844 	/* TODO: Initialize default HTT meta data that will be used in
5845 	 * TCL descriptors for packets transmitted from this VDEV
5846 	 */
5847 
5848 	qdf_spinlock_create(&vdev->peer_list_lock);
5849 	TAILQ_INIT(&vdev->peer_list);
5850 	dp_peer_multipass_list_init(vdev);
5851 	if ((soc->intr_mode == DP_INTR_POLL) &&
5852 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
5853 		if ((pdev->vdev_count == 0) ||
5854 		    (wlan_op_mode_monitor == vdev->opmode))
5855 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
5856 	} else if (soc->intr_mode == DP_INTR_MSI &&
5857 		   wlan_op_mode_monitor == vdev->opmode) {
5858 		dp_monitor_vdev_timer_start(soc);
5859 	}
5860 
5861 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
5862 
5863 	if (wlan_op_mode_monitor == vdev->opmode) {
5864 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
5865 			dp_monitor_pdev_set_mon_vdev(vdev);
5866 			dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
5867 			return QDF_STATUS_SUCCESS;
5868 		}
5869 		return QDF_STATUS_E_FAILURE;
5870 	}
5871 
5872 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5873 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5874 	vdev->dscp_tid_map_id = 0;
5875 	vdev->mcast_enhancement_en = 0;
5876 	vdev->igmp_mcast_enhanc_en = 0;
5877 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
5878 	vdev->prev_tx_enq_tstamp = 0;
5879 	vdev->prev_rx_deliver_tstamp = 0;
5880 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
5881 
5882 	dp_vdev_pdev_list_add(soc, pdev, vdev);
5883 	pdev->vdev_count++;
5884 
5885 	if (wlan_op_mode_sta != vdev->opmode)
5886 		vdev->ap_bridge_enabled = true;
5887 	else
5888 		vdev->ap_bridge_enabled = false;
5889 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
5890 		     cdp_soc, vdev->ap_bridge_enabled);
5891 
5892 	dp_tx_vdev_attach(vdev);
5893 
5894 	dp_monitor_vdev_attach(vdev);
5895 	if (!pdev->is_lro_hash_configured) {
5896 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
5897 			pdev->is_lro_hash_configured = true;
5898 		else
5899 			dp_err("LRO hash setup failure!");
5900 	}
5901 
5902 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
5903 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
5904 	DP_STATS_INIT(vdev);
5905 
5906 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
5907 		goto fail0;
5908 
5909 	if (wlan_op_mode_sta == vdev->opmode)
5910 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
5911 				     vdev->mac_addr.raw);
5912 	return QDF_STATUS_SUCCESS;
5913 
5914 fail0:
5915 	return QDF_STATUS_E_FAILURE;
5916 }
5917 
5918 #ifndef QCA_HOST_MODE_WIFI_DISABLED
5919 /**
5920  * dp_vdev_register_tx_handler() - Register Tx handler
5921  * @vdev: struct dp_vdev *
5922  * @soc: struct dp_soc *
5923  * @txrx_ops: struct ol_txrx_ops *
5924  */
5925 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
5926 					       struct dp_soc *soc,
5927 					       struct ol_txrx_ops *txrx_ops)
5928 {
5929 	/* Enable vdev_id check only for ap, if flag is enabled */
5930 	if (vdev->mesh_vdev)
5931 		txrx_ops->tx.tx = dp_tx_send_mesh;
5932 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
5933 		 (vdev->opmode == wlan_op_mode_ap))
5934 		txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
5935 	else
5936 		txrx_ops->tx.tx = dp_tx_send;
5937 
5938 	/* Avoid check in regular exception Path */
5939 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
5940 	    (vdev->opmode == wlan_op_mode_ap))
5941 		txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
5942 	else
5943 		txrx_ops->tx.tx_exception = dp_tx_send_exception;
5944 
5945 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
5946 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
5947 		vdev->opmode, vdev->vdev_id);
5948 }
5949 #else /* QCA_HOST_MODE_WIFI_DISABLED */
5950 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
5951 					       struct dp_soc *soc,
5952 					       struct ol_txrx_ops *txrx_ops)
5953 {
5954 }
5955 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
5956 
5957 /**
5958  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
5959  * @soc: Datapath soc handle
5960  * @vdev_id: id of Datapath VDEV handle
5961  * @osif_vdev: OSIF vdev handle
5962  * @txrx_ops: Tx and Rx operations
5963  *
5964  * Return: DP VDEV handle on success, NULL on failure
5965  */
5966 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
5967 					 uint8_t vdev_id,
5968 					 ol_osif_vdev_handle osif_vdev,
5969 					 struct ol_txrx_ops *txrx_ops)
5970 {
5971 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5972 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
5973 						      DP_MOD_ID_CDP);
5974 
5975 	if (!vdev)
5976 		return QDF_STATUS_E_FAILURE;
5977 
5978 	vdev->osif_vdev = osif_vdev;
5979 	vdev->osif_rx = txrx_ops->rx.rx;
5980 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
5981 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
5982 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
5983 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
5984 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
5985 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
5986 	vdev->osif_get_key = txrx_ops->get_key;
5987 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
5988 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
5989 	vdev->tx_comp = txrx_ops->tx.tx_comp;
5990 	vdev->stats_cb = txrx_ops->rx.stats_rx;
5991 #ifdef notyet
5992 #if ATH_SUPPORT_WAPI
5993 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
5994 #endif
5995 #endif
5996 #ifdef UMAC_SUPPORT_PROXY_ARP
5997 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
5998 #endif
5999 	vdev->me_convert = txrx_ops->me_convert;
6000 
6001 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
6002 
6003 	dp_init_info("%pK: DP Vdev Register success", soc);
6004 
6005 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6006 	return QDF_STATUS_SUCCESS;
6007 }
6008 
6009 /**
6010  * dp_peer_delete() - delete DP peer
6011  *
6012  * @soc: Datatpath soc
6013  * @peer: Datapath peer
6014  * @arg: argument to iter function
6015  *
6016  * Return: void
6017  */
6018 static void
6019 dp_peer_delete(struct dp_soc *soc,
6020 	       struct dp_peer *peer,
6021 	       void *arg)
6022 {
6023 	if (!peer->valid)
6024 		return;
6025 
6026 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
6027 			     peer->vdev->vdev_id,
6028 			     peer->mac_addr.raw, 0);
6029 }
6030 
6031 /**
6032  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
6033  * @vdev: Datapath VDEV handle
6034  * @unmap_only: Flag to indicate "only unmap"
6035  *
6036  * Return: void
6037  */
6038 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
6039 {
6040 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6041 	struct dp_pdev *pdev = vdev->pdev;
6042 	struct dp_soc *soc = pdev->soc;
6043 	struct dp_peer *peer;
6044 	uint32_t i = 0;
6045 
6046 
6047 	if (!unmap_only)
6048 		dp_vdev_iterate_peer_lock_safe(vdev, dp_peer_delete, NULL,
6049 					       DP_MOD_ID_CDP);
6050 
6051 	for (i = 0; i < soc->max_peers ; i++) {
6052 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
6053 
6054 		if (!peer)
6055 			continue;
6056 
6057 		if (peer->vdev != vdev) {
6058 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6059 			continue;
6060 		}
6061 
6062 		dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap",
6063 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6064 
6065 		dp_rx_peer_unmap_handler(soc, i,
6066 					 vdev->vdev_id,
6067 					 peer->mac_addr.raw, 0,
6068 					 DP_PEER_WDS_COUNT_INVALID);
6069 		SET_PEER_REF_CNT_ONE(peer);
6070 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6071 	}
6072 
6073 }
6074 
6075 /*
6076  * dp_vdev_detach_wifi3() - Detach txrx vdev
6077  * @cdp_soc: Datapath soc handle
6078  * @vdev_id: VDEV Id
6079  * @callback: Callback OL_IF on completion of detach
6080  * @cb_context:	Callback context
6081  *
6082  */
6083 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
6084 				       uint8_t vdev_id,
6085 				       ol_txrx_vdev_delete_cb callback,
6086 				       void *cb_context)
6087 {
6088 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6089 	struct dp_pdev *pdev;
6090 	struct dp_neighbour_peer *peer = NULL;
6091 	struct dp_peer *vap_self_peer = NULL;
6092 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6093 						     DP_MOD_ID_CDP);
6094 
6095 	if (!vdev)
6096 		return QDF_STATUS_E_FAILURE;
6097 
6098 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
6099 
6100 	pdev = vdev->pdev;
6101 
6102 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
6103 							DP_MOD_ID_CONFIG);
6104 	if (vap_self_peer) {
6105 		qdf_spin_lock_bh(&soc->ast_lock);
6106 		if (vap_self_peer->self_ast_entry) {
6107 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
6108 			vap_self_peer->self_ast_entry = NULL;
6109 		}
6110 		qdf_spin_unlock_bh(&soc->ast_lock);
6111 
6112 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
6113 				     vap_self_peer->mac_addr.raw, 0);
6114 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
6115 	}
6116 
6117 	/*
6118 	 * If Target is hung, flush all peers before detaching vdev
6119 	 * this will free all references held due to missing
6120 	 * unmap commands from Target
6121 	 */
6122 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
6123 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
6124 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
6125 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
6126 
6127 	/* indicate that the vdev needs to be deleted */
6128 	vdev->delete.pending = 1;
6129 	dp_rx_vdev_detach(vdev);
6130 	/*
6131 	 * move it after dp_rx_vdev_detach(),
6132 	 * as the call back done in dp_rx_vdev_detach()
6133 	 * still need to get vdev pointer by vdev_id.
6134 	 */
6135 	dp_vdev_id_map_tbl_remove(soc, vdev);
6136 
6137 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
6138 
6139 	dp_tx_vdev_multipass_deinit(vdev);
6140 
6141 	if (vdev->vdev_dp_ext_handle) {
6142 		qdf_mem_free(vdev->vdev_dp_ext_handle);
6143 		vdev->vdev_dp_ext_handle = NULL;
6144 	}
6145 	vdev->delete.callback = callback;
6146 	vdev->delete.context = cb_context;
6147 
6148 	if (vdev->opmode != wlan_op_mode_monitor)
6149 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
6150 
6151 	pdev->vdev_count--;
6152 	/* release reference taken above for find */
6153 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6154 
6155 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6156 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
6157 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6158 
6159 	/* release reference taken at dp_vdev_create */
6160 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6161 
6162 	return QDF_STATUS_SUCCESS;
6163 }
6164 
6165 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
6166 						uint8_t *peer_mac_addr)
6167 {
6168 	struct dp_peer *peer;
6169 	struct dp_soc *soc = vdev->pdev->soc;
6170 
6171 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
6172 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
6173 		      inactive_list_elem) {
6174 
6175 		/* reuse bss peer only when vdev matches*/
6176 		if (peer->bss_peer && (peer->vdev == vdev) &&
6177 		    qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
6178 				QDF_MAC_ADDR_SIZE) == 0) {
6179 			/* increment ref count for cdp_peer_create*/
6180 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
6181 						QDF_STATUS_SUCCESS) {
6182 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
6183 					     inactive_list_elem);
6184 				qdf_spin_unlock_bh
6185 					(&soc->inactive_peer_list_lock);
6186 				return peer;
6187 			}
6188 		}
6189 	}
6190 
6191 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
6192 	return NULL;
6193 }
6194 
6195 #ifdef FEATURE_AST
6196 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
6197 					       struct dp_pdev *pdev,
6198 					       uint8_t *peer_mac_addr)
6199 {
6200 	struct dp_ast_entry *ast_entry;
6201 
6202 	qdf_spin_lock_bh(&soc->ast_lock);
6203 	if (soc->ast_override_support)
6204 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
6205 							    pdev->pdev_id);
6206 	else
6207 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
6208 
6209 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
6210 		dp_peer_del_ast(soc, ast_entry);
6211 
6212 	qdf_spin_unlock_bh(&soc->ast_lock);
6213 }
6214 #endif
6215 
6216 #ifdef PEER_CACHE_RX_PKTS
6217 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
6218 {
6219 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
6220 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
6221 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
6222 }
6223 #else
6224 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
6225 {
6226 }
6227 #endif
6228 
6229 /*
6230  * dp_peer_create_wifi3() - attach txrx peer
6231  * @soc_hdl: Datapath soc handle
6232  * @vdev_id: id of vdev
6233  * @peer_mac_addr: Peer MAC address
6234  *
6235  * Return: 0 on success, -1 on failure
6236  */
6237 static QDF_STATUS
6238 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6239 		     uint8_t *peer_mac_addr)
6240 {
6241 	struct dp_peer *peer;
6242 	int i;
6243 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6244 	struct dp_pdev *pdev;
6245 	struct cdp_peer_cookie peer_cookie;
6246 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
6247 	struct dp_vdev *vdev = NULL;
6248 
6249 	if (!peer_mac_addr)
6250 		return QDF_STATUS_E_FAILURE;
6251 
6252 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
6253 
6254 	if (!vdev)
6255 		return QDF_STATUS_E_FAILURE;
6256 
6257 	pdev = vdev->pdev;
6258 	soc = pdev->soc;
6259 
6260 	/*
6261 	 * If a peer entry with given MAC address already exists,
6262 	 * reuse the peer and reset the state of peer.
6263 	 */
6264 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
6265 
6266 	if (peer) {
6267 		dp_peer_vdev_list_add(soc, vdev, peer);
6268 
6269 		dp_peer_find_hash_add(soc, peer);
6270 		qdf_atomic_init(&peer->is_default_route_set);
6271 		dp_peer_cleanup(vdev, peer);
6272 
6273 		for (i = 0; i < DP_MAX_TIDS; i++)
6274 			qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
6275 
6276 		qdf_spin_lock_bh(&soc->ast_lock);
6277 		dp_peer_delete_ast_entries(soc, peer);
6278 		qdf_spin_unlock_bh(&soc->ast_lock);
6279 
6280 		if ((vdev->opmode == wlan_op_mode_sta) &&
6281 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6282 		     QDF_MAC_ADDR_SIZE)) {
6283 			ast_type = CDP_TXRX_AST_TYPE_SELF;
6284 		}
6285 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6286 
6287 		peer->valid = 1;
6288 		dp_local_peer_id_alloc(pdev, peer);
6289 
6290 		qdf_spinlock_create(&peer->peer_info_lock);
6291 		dp_peer_rx_bufq_resources_init(peer);
6292 
6293 		DP_STATS_INIT(peer);
6294 		DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
6295 
6296 		/*
6297 		 * In tx_monitor mode, filter may be set for unassociated peer
6298 		 * when unassociated peer get associated peer need to
6299 		 * update tx_cap_enabled flag to support peer filter.
6300 		 */
6301 		dp_monitor_peer_tx_capture_filter_check(pdev, peer);
6302 
6303 		dp_set_peer_isolation(peer, false);
6304 
6305 		dp_wds_ext_peer_init(peer);
6306 
6307 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6308 
6309 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6310 		return QDF_STATUS_SUCCESS;
6311 	} else {
6312 		/*
6313 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
6314 		 * need to remove the AST entry which was earlier added as a WDS
6315 		 * entry.
6316 		 * If an AST entry exists, but no peer entry exists with a given
6317 		 * MAC addresses, we could deduce it as a WDS entry
6318 		 */
6319 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
6320 	}
6321 
6322 #ifdef notyet
6323 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
6324 		soc->mempool_ol_ath_peer);
6325 #else
6326 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
6327 #endif
6328 	wlan_minidump_log(peer,
6329 			  sizeof(*peer),
6330 			  soc->ctrl_psoc,
6331 			  WLAN_MD_DP_PEER, "dp_peer");
6332 	if (!peer) {
6333 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6334 		return QDF_STATUS_E_FAILURE; /* failure */
6335 	}
6336 
6337 	qdf_mem_zero(peer, sizeof(struct dp_peer));
6338 
6339 	TAILQ_INIT(&peer->ast_entry_list);
6340 
6341 	/* store provided params */
6342 	peer->vdev = vdev;
6343 	/* get the vdev reference for new peer */
6344 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
6345 
6346 	if ((vdev->opmode == wlan_op_mode_sta) &&
6347 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6348 			 QDF_MAC_ADDR_SIZE)) {
6349 		ast_type = CDP_TXRX_AST_TYPE_SELF;
6350 	}
6351 	qdf_spinlock_create(&peer->peer_state_lock);
6352 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6353 	qdf_spinlock_create(&peer->peer_info_lock);
6354 	dp_wds_ext_peer_init(peer);
6355 
6356 	dp_peer_rx_bufq_resources_init(peer);
6357 
6358 	qdf_mem_copy(
6359 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
6360 
6361 	/* initialize the peer_id */
6362 	peer->peer_id = HTT_INVALID_PEER;
6363 
6364 	/* reset the ast index to flowid table */
6365 	dp_peer_reset_flowq_map(peer);
6366 
6367 	qdf_atomic_init(&peer->ref_cnt);
6368 
6369 	for (i = 0; i < DP_MOD_ID_MAX; i++)
6370 		qdf_atomic_init(&peer->mod_refs[i]);
6371 
6372 	/* keep one reference for attach */
6373 	qdf_atomic_inc(&peer->ref_cnt);
6374 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
6375 
6376 	dp_peer_vdev_list_add(soc, vdev, peer);
6377 
6378 	/* TODO: See if hash based search is required */
6379 	dp_peer_find_hash_add(soc, peer);
6380 
6381 	/* Initialize the peer state */
6382 	peer->state = OL_TXRX_PEER_STATE_DISC;
6383 
6384 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
6385 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
6386 		qdf_atomic_read(&peer->ref_cnt));
6387 	/*
6388 	 * For every peer MAp message search and set if bss_peer
6389 	 */
6390 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
6391 			QDF_MAC_ADDR_SIZE) == 0 &&
6392 			(wlan_op_mode_sta != vdev->opmode)) {
6393 		dp_info("vdev bss_peer!!");
6394 		peer->bss_peer = 1;
6395 	}
6396 
6397 	if (wlan_op_mode_sta == vdev->opmode &&
6398 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
6399 			QDF_MAC_ADDR_SIZE) == 0) {
6400 		peer->sta_self_peer = 1;
6401 	}
6402 
6403 	for (i = 0; i < DP_MAX_TIDS; i++)
6404 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
6405 
6406 	peer->valid = 1;
6407 	dp_local_peer_id_alloc(pdev, peer);
6408 	DP_STATS_INIT(peer);
6409 	DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
6410 
6411 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6412 		     QDF_MAC_ADDR_SIZE);
6413 	peer_cookie.ctx = NULL;
6414 	peer_cookie.pdev_id = pdev->pdev_id;
6415 	peer_cookie.cookie = pdev->next_peer_cookie++;
6416 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6417 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
6418 			     (void *)&peer_cookie,
6419 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
6420 #endif
6421 	if (soc->rdkstats_enabled) {
6422 		if (!peer_cookie.ctx) {
6423 			pdev->next_peer_cookie--;
6424 			qdf_err("Failed to initialize peer rate stats");
6425 		} else {
6426 			peer->rdkstats_ctx = (struct cdp_peer_rate_stats_ctx *)
6427 						peer_cookie.ctx;
6428 		}
6429 	}
6430 
6431 	/*
6432 	 * Allocate peer extended stats context. Fall through in
6433 	 * case of failure as its not an implicit requirement to have
6434 	 * this object for regular statistics updates.
6435 	 */
6436 	if (dp_peer_ext_stats_ctx_alloc(soc, peer) !=
6437 			QDF_STATUS_SUCCESS)
6438 		dp_warn("peer ext_stats ctx alloc failed");
6439 
6440 	if (dp_monitor_peer_attach(soc, peer) !=
6441 	    QDF_STATUS_SUCCESS)
6442 		dp_warn("peer monitor ctx alloc failed");
6443 
6444 	dp_set_peer_isolation(peer, false);
6445 
6446 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6447 
6448 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6449 
6450 	return QDF_STATUS_SUCCESS;
6451 }
6452 
6453 /*
6454  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
6455  * @vdev: Datapath VDEV handle
6456  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6457  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6458  *
6459  * Return: None
6460  */
6461 static
6462 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
6463 				  enum cdp_host_reo_dest_ring *reo_dest,
6464 				  bool *hash_based)
6465 {
6466 	struct dp_soc *soc;
6467 	struct dp_pdev *pdev;
6468 
6469 	pdev = vdev->pdev;
6470 	soc = pdev->soc;
6471 	/*
6472 	 * hash based steering is disabled for Radios which are offloaded
6473 	 * to NSS
6474 	 */
6475 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
6476 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
6477 
6478 	/*
6479 	 * Below line of code will ensure the proper reo_dest ring is chosen
6480 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
6481 	 */
6482 	*reo_dest = pdev->reo_dest;
6483 }
6484 
6485 #ifdef IPA_OFFLOAD
6486 /**
6487  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
6488  * @vdev: Virtual device
6489  *
6490  * Return: true if the vdev is of subtype P2P
6491  *	   false if the vdev is of any other subtype
6492  */
6493 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
6494 {
6495 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
6496 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
6497 	    vdev->subtype == wlan_op_subtype_p2p_go)
6498 		return true;
6499 
6500 	return false;
6501 }
6502 
6503 /*
6504  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
6505  * @vdev: Datapath VDEV handle
6506  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6507  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6508  *
6509  * If IPA is enabled in ini, for SAP mode, disable hash based
6510  * steering, use default reo_dst ring for RX. Use config values for other modes.
6511  * Return: None
6512  */
6513 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
6514 				       enum cdp_host_reo_dest_ring *reo_dest,
6515 				       bool *hash_based)
6516 {
6517 	struct dp_soc *soc;
6518 	struct dp_pdev *pdev;
6519 
6520 	pdev = vdev->pdev;
6521 	soc = pdev->soc;
6522 
6523 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
6524 
6525 	/* For P2P-GO interfaces we do not need to change the REO
6526 	 * configuration even if IPA config is enabled
6527 	 */
6528 	if (dp_is_vdev_subtype_p2p(vdev))
6529 		return;
6530 
6531 	/*
6532 	 * If IPA is enabled, disable hash-based flow steering and set
6533 	 * reo_dest_ring_4 as the REO ring to receive packets on.
6534 	 * IPA is configured to reap reo_dest_ring_4.
6535 	 *
6536 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
6537 	 * value enum value is from 1 - 4.
6538 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
6539 	 */
6540 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
6541 		if (vdev->opmode == wlan_op_mode_ap) {
6542 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
6543 			*hash_based = 0;
6544 		} else if (vdev->opmode == wlan_op_mode_sta &&
6545 			   dp_ipa_is_mdm_platform()) {
6546 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
6547 		}
6548 	}
6549 }
6550 
6551 #else
6552 
6553 /*
6554  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
6555  * @vdev: Datapath VDEV handle
6556  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6557  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6558  *
6559  * Use system config values for hash based steering.
6560  * Return: None
6561  */
6562 
6563 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
6564 				       enum cdp_host_reo_dest_ring *reo_dest,
6565 				       bool *hash_based)
6566 {
6567 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
6568 }
6569 #endif /* IPA_OFFLOAD */
6570 
6571 /*
6572  * dp_peer_setup_wifi3() - initialize the peer
6573  * @soc_hdl: soc handle object
6574  * @vdev_id : vdev_id of vdev object
6575  * @peer_mac: Peer's mac address
6576  *
6577  * Return: QDF_STATUS
6578  */
6579 static QDF_STATUS
6580 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6581 		    uint8_t *peer_mac)
6582 {
6583 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6584 	struct dp_pdev *pdev;
6585 	bool hash_based = 0;
6586 	enum cdp_host_reo_dest_ring reo_dest;
6587 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6588 	struct dp_vdev *vdev = NULL;
6589 	struct dp_peer *peer =
6590 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
6591 					       DP_MOD_ID_CDP);
6592 	enum wlan_op_mode vdev_opmode;
6593 
6594 	if (!peer)
6595 		return QDF_STATUS_E_FAILURE;
6596 
6597 	vdev = peer->vdev;
6598 	if (!vdev) {
6599 		status = QDF_STATUS_E_FAILURE;
6600 		goto fail;
6601 	}
6602 
6603 	/* save vdev related member in case vdev freed */
6604 	vdev_opmode = vdev->opmode;
6605 	pdev = vdev->pdev;
6606 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
6607 
6608 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
6609 		pdev->pdev_id, vdev->vdev_id,
6610 		vdev->opmode, hash_based, reo_dest);
6611 
6612 	/*
6613 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
6614 	 * i.e both the devices have same MAC address. In these
6615 	 * cases we want such pkts to be processed in NULL Q handler
6616 	 * which is REO2TCL ring. for this reason we should
6617 	 * not setup reo_queues and default route for bss_peer.
6618 	 */
6619 	dp_monitor_peer_tx_init(pdev, peer);
6620 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
6621 		status = QDF_STATUS_E_FAILURE;
6622 		goto fail;
6623 	}
6624 
6625 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
6626 		/* TODO: Check the destination ring number to be passed to FW */
6627 		soc->cdp_soc.ol_ops->peer_set_default_routing(
6628 				soc->ctrl_psoc,
6629 				peer->vdev->pdev->pdev_id,
6630 				peer->mac_addr.raw,
6631 				peer->vdev->vdev_id, hash_based, reo_dest);
6632 	}
6633 
6634 	qdf_atomic_set(&peer->is_default_route_set, 1);
6635 
6636 	if (vdev_opmode != wlan_op_mode_monitor)
6637 		dp_peer_rx_init(pdev, peer);
6638 
6639 	dp_peer_ppdu_delayed_ba_init(peer);
6640 
6641 fail:
6642 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6643 	return status;
6644 }
6645 
6646 /*
6647  * dp_cp_peer_del_resp_handler - Handle the peer delete response
6648  * @soc_hdl: Datapath SOC handle
6649  * @vdev_id: id of virtual device object
6650  * @mac_addr: Mac address of the peer
6651  *
6652  * Return: QDF_STATUS
6653  */
6654 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
6655 					      uint8_t vdev_id,
6656 					      uint8_t *mac_addr)
6657 {
6658 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6659 	struct dp_ast_entry  *ast_entry = NULL;
6660 	txrx_ast_free_cb cb = NULL;
6661 	void *cookie;
6662 
6663 	qdf_spin_lock_bh(&soc->ast_lock);
6664 
6665 	ast_entry =
6666 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
6667 						vdev_id);
6668 
6669 	/* in case of qwrap we have multiple BSS peers
6670 	 * with same mac address
6671 	 *
6672 	 * AST entry for this mac address will be created
6673 	 * only for one peer hence it will be NULL here
6674 	 */
6675 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
6676 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
6677 		qdf_spin_unlock_bh(&soc->ast_lock);
6678 		return QDF_STATUS_E_FAILURE;
6679 	}
6680 
6681 	if (ast_entry->is_mapped)
6682 		soc->ast_table[ast_entry->ast_idx] = NULL;
6683 
6684 	DP_STATS_INC(soc, ast.deleted, 1);
6685 	dp_peer_ast_hash_remove(soc, ast_entry);
6686 
6687 	cb = ast_entry->callback;
6688 	cookie = ast_entry->cookie;
6689 	ast_entry->callback = NULL;
6690 	ast_entry->cookie = NULL;
6691 
6692 	soc->num_ast_entries--;
6693 	qdf_spin_unlock_bh(&soc->ast_lock);
6694 
6695 	if (cb) {
6696 		cb(soc->ctrl_psoc,
6697 		   dp_soc_to_cdp_soc(soc),
6698 		   cookie,
6699 		   CDP_TXRX_AST_DELETED);
6700 	}
6701 	qdf_mem_free(ast_entry);
6702 
6703 	return QDF_STATUS_SUCCESS;
6704 }
6705 
6706 /*
6707  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
6708  * @txrx_soc: cdp soc handle
6709  * @ac: Access category
6710  * @value: timeout value in millisec
6711  *
6712  * Return: void
6713  */
6714 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6715 				    uint8_t ac, uint32_t value)
6716 {
6717 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6718 
6719 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
6720 }
6721 
6722 /*
6723  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
6724  * @txrx_soc: cdp soc handle
6725  * @ac: access category
6726  * @value: timeout value in millisec
6727  *
6728  * Return: void
6729  */
6730 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6731 				    uint8_t ac, uint32_t *value)
6732 {
6733 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6734 
6735 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
6736 }
6737 
6738 /*
6739  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
6740  * @txrx_soc: cdp soc handle
6741  * @pdev_id: id of physical device object
6742  * @val: reo destination ring index (1 - 4)
6743  *
6744  * Return: QDF_STATUS
6745  */
6746 static QDF_STATUS
6747 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
6748 		     enum cdp_host_reo_dest_ring val)
6749 {
6750 	struct dp_pdev *pdev =
6751 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6752 						   pdev_id);
6753 
6754 	if (pdev) {
6755 		pdev->reo_dest = val;
6756 		return QDF_STATUS_SUCCESS;
6757 	}
6758 
6759 	return QDF_STATUS_E_FAILURE;
6760 }
6761 
6762 /*
6763  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
6764  * @txrx_soc: cdp soc handle
6765  * @pdev_id: id of physical device object
6766  *
6767  * Return: reo destination ring index
6768  */
6769 static enum cdp_host_reo_dest_ring
6770 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
6771 {
6772 	struct dp_pdev *pdev =
6773 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6774 						   pdev_id);
6775 
6776 	if (pdev)
6777 		return pdev->reo_dest;
6778 	else
6779 		return cdp_host_reo_dest_ring_unknown;
6780 }
6781 
6782 #ifdef WLAN_SUPPORT_SCS
6783 /*
6784  * dp_enable_scs_params - Enable/Disable SCS procedures
6785  * @soc - Datapath soc handle
6786  * @peer_mac - STA Mac address
6787  * @vdev_id - ID of the vdev handle
6788  * @active - Flag to set SCS active/inactive
6789  * return type - QDF_STATUS - Success/Invalid
6790  */
6791 static QDF_STATUS
6792 dp_enable_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
6793 		     *peer_mac,
6794 		     uint8_t vdev_id,
6795 		     bool is_active)
6796 {
6797 	struct dp_peer *peer;
6798 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6799 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6800 
6801 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
6802 				      DP_MOD_ID_CDP);
6803 
6804 	if (!peer) {
6805 		dp_err("Peer is NULL!");
6806 		goto fail;
6807 	}
6808 
6809 	peer->scs_is_active = is_active;
6810 	status = QDF_STATUS_SUCCESS;
6811 
6812 fail:
6813 	if (peer)
6814 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6815 	return status;
6816 }
6817 
6818 /*
6819  * @brief dp_copy_scs_params - SCS Parameters sent by STA
6820  * is copied from the cdp layer to the dp layer
6821  * These parameters are then used by the peer
6822  * for traffic classification.
6823  *
6824  * @param peer - peer struct
6825  * @param scs_params - cdp layer params
6826  * @idx - SCS_entry index obtained from the
6827  * node database with a given SCSID
6828  * @return void
6829  */
6830 void
6831 dp_copy_scs_params(struct dp_peer *peer,
6832 		   struct cdp_scs_params *scs_params,
6833 		   uint8_t idx)
6834 {
6835 	uint8_t tidx = 0;
6836 	uint8_t tclas_elem;
6837 
6838 	peer->scs[idx].scsid = scs_params->scsid;
6839 	peer->scs[idx].access_priority =
6840 		scs_params->access_priority;
6841 	peer->scs[idx].tclas_elements =
6842 		scs_params->tclas_elements;
6843 	peer->scs[idx].tclas_process =
6844 		scs_params->tclas_process;
6845 
6846 	tclas_elem = peer->scs[idx].tclas_elements;
6847 
6848 	while (tidx < tclas_elem) {
6849 		qdf_mem_copy(&peer->scs[idx].tclas[tidx],
6850 			     &scs_params->tclas[tidx],
6851 			     sizeof(struct cdp_tclas_tuple));
6852 		tidx++;
6853 	}
6854 }
6855 
6856 /*
6857  * @brief dp_record_scs_params() - Copying the SCS params to a
6858  * peer based database.
6859  *
6860  * @soc - Datapath soc handle
6861  * @peer_mac - STA Mac address
6862  * @vdev_id - ID of the vdev handle
6863  * @scs_params - Structure having SCS parameters obtained
6864  * from handshake
6865  * @idx - SCS_entry index obtained from the
6866  * node database with a given SCSID
6867  * @scs_sessions - Total # of SCS sessions active
6868  *
6869  * @details
6870  * SCS parameters sent by the STA in
6871  * the SCS Request to the AP. The AP makes a note of these
6872  * parameters while sending the MSDUs to the STA, to
6873  * send the downlink traffic with correct User priority.
6874  *
6875  * return type - QDF_STATUS - Success/Invalid
6876  */
6877 static QDF_STATUS
6878 dp_record_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
6879 		     *peer_mac,
6880 		     uint8_t vdev_id,
6881 		     struct cdp_scs_params *scs_params,
6882 		     uint8_t idx,
6883 		     uint8_t scs_sessions)
6884 {
6885 	struct dp_peer *peer;
6886 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6887 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6888 
6889 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
6890 				      DP_MOD_ID_CDP);
6891 
6892 	if (!peer) {
6893 		dp_err("Peer is NULL!");
6894 		goto fail;
6895 	}
6896 
6897 	if (idx >= IEEE80211_SCS_MAX_NO_OF_ELEM)
6898 		goto fail;
6899 
6900 	/* SCS procedure for the peer is activated
6901 	 * as soon as we get this information from
6902 	 * the control path, unless explicitly disabled.
6903 	 */
6904 	peer->scs_is_active = 1;
6905 	dp_copy_scs_params(peer, scs_params, idx);
6906 	status = QDF_STATUS_SUCCESS;
6907 	peer->no_of_scs_sessions = scs_sessions;
6908 
6909 fail:
6910 	if (peer)
6911 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6912 	return status;
6913 }
6914 #endif
6915 
6916 #ifdef WLAN_SUPPORT_MSCS
6917 /*
6918  * dp_record_mscs_params - MSCS parameters sent by the STA in
6919  * the MSCS Request to the AP. The AP makes a note of these
6920  * parameters while comparing the MSDUs sent by the STA, to
6921  * send the downlink traffic with correct User priority.
6922  * @soc - Datapath soc handle
6923  * @peer_mac - STA Mac address
6924  * @vdev_id - ID of the vdev handle
6925  * @mscs_params - Structure having MSCS parameters obtained
6926  * from handshake
6927  * @active - Flag to set MSCS active/inactive
6928  * return type - QDF_STATUS - Success/Invalid
6929  */
6930 static QDF_STATUS
6931 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
6932 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
6933 		      bool active)
6934 {
6935 	struct dp_peer *peer;
6936 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6937 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6938 
6939 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
6940 				      DP_MOD_ID_CDP);
6941 
6942 	if (!peer) {
6943 		dp_err("Peer is NULL!");
6944 		goto fail;
6945 	}
6946 	if (!active) {
6947 		dp_info("MSCS Procedure is terminated");
6948 		peer->mscs_active = active;
6949 		goto fail;
6950 	}
6951 
6952 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
6953 		/* Populate entries inside IPV4 database first */
6954 		peer->mscs_ipv4_parameter.user_priority_bitmap =
6955 			mscs_params->user_pri_bitmap;
6956 		peer->mscs_ipv4_parameter.user_priority_limit =
6957 			mscs_params->user_pri_limit;
6958 		peer->mscs_ipv4_parameter.classifier_mask =
6959 			mscs_params->classifier_mask;
6960 
6961 		/* Populate entries inside IPV6 database */
6962 		peer->mscs_ipv6_parameter.user_priority_bitmap =
6963 			mscs_params->user_pri_bitmap;
6964 		peer->mscs_ipv6_parameter.user_priority_limit =
6965 			mscs_params->user_pri_limit;
6966 		peer->mscs_ipv6_parameter.classifier_mask =
6967 			mscs_params->classifier_mask;
6968 		peer->mscs_active = 1;
6969 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
6970 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
6971 			"\tUser priority limit = %x\tClassifier mask = %x",
6972 			QDF_MAC_ADDR_REF(peer_mac),
6973 			mscs_params->classifier_type,
6974 			peer->mscs_ipv4_parameter.user_priority_bitmap,
6975 			peer->mscs_ipv4_parameter.user_priority_limit,
6976 			peer->mscs_ipv4_parameter.classifier_mask);
6977 	}
6978 
6979 	status = QDF_STATUS_SUCCESS;
6980 fail:
6981 	if (peer)
6982 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6983 	return status;
6984 }
6985 #endif
6986 
6987 /*
6988  * dp_get_sec_type() - Get the security type
6989  * @soc: soc handle
6990  * @vdev_id: id of dp handle
6991  * @peer_mac: mac of datapath PEER handle
6992  * @sec_idx:    Security id (mcast, ucast)
6993  *
6994  * return sec_type: Security type
6995  */
6996 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
6997 			   uint8_t *peer_mac, uint8_t sec_idx)
6998 {
6999 	int sec_type = 0;
7000 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7001 						       peer_mac, 0, vdev_id,
7002 						       DP_MOD_ID_CDP);
7003 
7004 	if (!peer) {
7005 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
7006 		return sec_type;
7007 	}
7008 
7009 	sec_type = peer->security[sec_idx].sec_type;
7010 
7011 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7012 	return sec_type;
7013 }
7014 
7015 /*
7016  * dp_peer_authorize() - authorize txrx peer
7017  * @soc: soc handle
7018  * @vdev_id: id of dp handle
7019  * @peer_mac: mac of datapath PEER handle
7020  * @authorize
7021  *
7022  */
7023 static QDF_STATUS
7024 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7025 		  uint8_t *peer_mac, uint32_t authorize)
7026 {
7027 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7028 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7029 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7030 						      0, vdev_id,
7031 						      DP_MOD_ID_CDP);
7032 
7033 	if (!peer) {
7034 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
7035 		status = QDF_STATUS_E_FAILURE;
7036 	} else {
7037 		peer->authorize = authorize ? 1 : 0;
7038 
7039 		if (!peer->authorize)
7040 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
7041 
7042 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7043 	}
7044 
7045 	return status;
7046 }
7047 
7048 /**
7049  * dp_vdev_unref_delete() - check and process vdev delete
7050  * @soc : DP specific soc pointer
7051  * @vdev: DP specific vdev pointer
7052  * @mod_id: module id
7053  *
7054  */
7055 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
7056 			  enum dp_mod_id mod_id)
7057 {
7058 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
7059 	void *vdev_delete_context = NULL;
7060 	uint8_t vdev_id = vdev->vdev_id;
7061 	struct dp_pdev *pdev = vdev->pdev;
7062 	struct dp_vdev *tmp_vdev = NULL;
7063 	uint8_t found = 0;
7064 
7065 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
7066 
7067 	/* Return if this is not the last reference*/
7068 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
7069 		return;
7070 
7071 	/*
7072 	 * This should be set as last reference need to released
7073 	 * after cdp_vdev_detach() is called
7074 	 *
7075 	 * if this assert is hit there is a ref count issue
7076 	 */
7077 	QDF_ASSERT(vdev->delete.pending);
7078 
7079 	vdev_delete_cb = vdev->delete.callback;
7080 	vdev_delete_context = vdev->delete.context;
7081 
7082 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
7083 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7084 
7085 	if (wlan_op_mode_monitor == vdev->opmode) {
7086 		dp_monitor_vdev_delete(soc, vdev);
7087 		goto free_vdev;
7088 	}
7089 
7090 	/* all peers are gone, go ahead and delete it */
7091 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
7092 			FLOW_TYPE_VDEV, vdev_id);
7093 	dp_tx_vdev_detach(vdev);
7094 	dp_monitor_vdev_detach(vdev);
7095 
7096 free_vdev:
7097 	qdf_spinlock_destroy(&vdev->peer_list_lock);
7098 
7099 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7100 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
7101 		      inactive_list_elem) {
7102 		if (tmp_vdev == vdev) {
7103 			found = 1;
7104 			break;
7105 		}
7106 	}
7107 	if (found)
7108 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
7109 			     inactive_list_elem);
7110 	/* delete this peer from the list */
7111 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7112 
7113 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
7114 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7115 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
7116 			     WLAN_MD_DP_VDEV, "dp_vdev");
7117 	qdf_mem_free(vdev);
7118 	vdev = NULL;
7119 
7120 	if (vdev_delete_cb)
7121 		vdev_delete_cb(vdev_delete_context);
7122 }
7123 
7124 qdf_export_symbol(dp_vdev_unref_delete);
7125 
7126 /*
7127  * dp_peer_unref_delete() - unref and delete peer
7128  * @peer_handle:    Datapath peer handle
7129  * @mod_id:         ID of module releasing reference
7130  *
7131  */
7132 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
7133 {
7134 	struct dp_vdev *vdev = peer->vdev;
7135 	struct dp_pdev *pdev = vdev->pdev;
7136 	struct dp_soc *soc = pdev->soc;
7137 	uint16_t peer_id;
7138 	struct cdp_peer_cookie peer_cookie;
7139 	struct dp_peer *tmp_peer;
7140 	bool found = false;
7141 	int tid = 0;
7142 
7143 	if (mod_id > DP_MOD_ID_RX)
7144 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
7145 
7146 	/*
7147 	 * Hold the lock all the way from checking if the peer ref count
7148 	 * is zero until the peer references are removed from the hash
7149 	 * table and vdev list (if the peer ref count is zero).
7150 	 * This protects against a new HL tx operation starting to use the
7151 	 * peer object just after this function concludes it's done being used.
7152 	 * Furthermore, the lock needs to be held while checking whether the
7153 	 * vdev's list of peers is empty, to make sure that list is not modified
7154 	 * concurrently with the empty check.
7155 	 */
7156 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
7157 		peer_id = peer->peer_id;
7158 
7159 		/*
7160 		 * Make sure that the reference to the peer in
7161 		 * peer object map is removed
7162 		 */
7163 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
7164 
7165 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
7166 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7167 
7168 		/*
7169 		 * Deallocate the extended stats contenxt
7170 		 */
7171 		dp_peer_ext_stats_ctx_dealloc(soc, peer);
7172 
7173 		/* send peer destroy event to upper layer */
7174 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
7175 			     QDF_MAC_ADDR_SIZE);
7176 		peer_cookie.ctx = NULL;
7177 		peer_cookie.ctx = (struct cdp_stats_cookie *)
7178 					peer->rdkstats_ctx;
7179 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7180 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
7181 				     soc,
7182 				     (void *)&peer_cookie,
7183 				     peer->peer_id,
7184 				     WDI_NO_VAL,
7185 				     pdev->pdev_id);
7186 #endif
7187 		peer->rdkstats_ctx = NULL;
7188 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
7189 				     WLAN_MD_DP_PEER, "dp_peer");
7190 
7191 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7192 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
7193 			      inactive_list_elem) {
7194 			if (tmp_peer == peer) {
7195 				found = 1;
7196 				break;
7197 			}
7198 		}
7199 		if (found)
7200 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7201 				     inactive_list_elem);
7202 		/* delete this peer from the list */
7203 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7204 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
7205 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
7206 
7207 		/* cleanup the peer data */
7208 		dp_peer_cleanup(vdev, peer);
7209 		dp_monitor_peer_detach(soc, peer);
7210 
7211 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
7212 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
7213 
7214 		qdf_spinlock_destroy(&peer->peer_state_lock);
7215 		qdf_mem_free(peer);
7216 
7217 		/*
7218 		 * Decrement ref count taken at peer create
7219 		 */
7220 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
7221 	}
7222 }
7223 
7224 qdf_export_symbol(dp_peer_unref_delete);
7225 
7226 #ifdef PEER_CACHE_RX_PKTS
7227 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
7228 {
7229 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
7230 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
7231 }
7232 #else
7233 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
7234 {
7235 }
7236 #endif
7237 
7238 /*
7239  * dp_peer_detach_wifi3() – Detach txrx peer
7240  * @soc_hdl: soc handle
7241  * @vdev_id: id of dp handle
7242  * @peer_mac: mac of datapath PEER handle
7243  * @bitmap: bitmap indicating special handling of request.
7244  *
7245  */
7246 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
7247 				       uint8_t vdev_id,
7248 				       uint8_t *peer_mac, uint32_t bitmap)
7249 {
7250 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7251 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7252 						      0, vdev_id,
7253 						      DP_MOD_ID_CDP);
7254 	struct dp_vdev *vdev = NULL;
7255 
7256 	/* Peer can be null for monitor vap mac address */
7257 	if (!peer) {
7258 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7259 			  "%s: Invalid peer\n", __func__);
7260 		return QDF_STATUS_E_FAILURE;
7261 	}
7262 
7263 	if (!peer->valid) {
7264 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7265 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
7266 			QDF_MAC_ADDR_REF(peer_mac));
7267 		return QDF_STATUS_E_ALREADY;
7268 	}
7269 
7270 	vdev = peer->vdev;
7271 
7272 	if (!vdev)
7273 		return QDF_STATUS_E_FAILURE;
7274 	peer->valid = 0;
7275 
7276 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
7277 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7278 
7279 	dp_local_peer_id_free(peer->vdev->pdev, peer);
7280 
7281 	/* Drop all rx packets before deleting peer */
7282 	dp_clear_peer_internal(soc, peer);
7283 
7284 	dp_peer_rx_bufq_resources_deinit(peer);
7285 
7286 	qdf_spinlock_destroy(&peer->peer_info_lock);
7287 	dp_peer_multipass_list_remove(peer);
7288 
7289 	/* remove the reference to the peer from the hash table */
7290 	dp_peer_find_hash_remove(soc, peer);
7291 
7292 	dp_peer_vdev_list_remove(soc, vdev, peer);
7293 
7294 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7295 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
7296 			  inactive_list_elem);
7297 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7298 
7299 	/*
7300 	 * Remove the reference added during peer_attach.
7301 	 * The peer will still be left allocated until the
7302 	 * PEER_UNMAP message arrives to remove the other
7303 	 * reference, added by the PEER_MAP message.
7304 	 */
7305 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
7306 	/*
7307 	 * Remove the reference taken above
7308 	 */
7309 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7310 
7311 	return QDF_STATUS_SUCCESS;
7312 }
7313 
7314 /*
7315  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
7316  * @soc_hdl: Datapath soc handle
7317  * @vdev_id: virtual interface id
7318  *
7319  * Return: MAC address on success, NULL on failure.
7320  *
7321  */
7322 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
7323 					   uint8_t vdev_id)
7324 {
7325 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7326 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7327 						     DP_MOD_ID_CDP);
7328 	uint8_t *mac = NULL;
7329 
7330 	if (!vdev)
7331 		return NULL;
7332 
7333 	mac = vdev->mac_addr.raw;
7334 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7335 
7336 	return mac;
7337 }
7338 
7339 /*
7340  * dp_vdev_set_wds() - Enable per packet stats
7341  * @soc: DP soc handle
7342  * @vdev_id: id of DP VDEV handle
7343  * @val: value
7344  *
7345  * Return: none
7346  */
7347 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7348 			   uint32_t val)
7349 {
7350 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7351 	struct dp_vdev *vdev =
7352 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
7353 				      DP_MOD_ID_CDP);
7354 
7355 	if (!vdev)
7356 		return QDF_STATUS_E_FAILURE;
7357 
7358 	vdev->wds_enabled = val;
7359 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7360 
7361 	return QDF_STATUS_SUCCESS;
7362 }
7363 
7364 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
7365 {
7366 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7367 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7368 						     DP_MOD_ID_CDP);
7369 	int opmode;
7370 
7371 	if (!vdev) {
7372 		dp_err("vdev for id %d is NULL", vdev_id);
7373 		return -EINVAL;
7374 	}
7375 	opmode = vdev->opmode;
7376 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7377 
7378 	return opmode;
7379 }
7380 
7381 /**
7382  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
7383  * @soc_hdl: ol_txrx_soc_handle handle
7384  * @vdev_id: vdev id for which os rx handles are needed
7385  * @stack_fn_p: pointer to stack function pointer
7386  * @osif_handle_p: pointer to ol_osif_vdev_handle
7387  *
7388  * Return: void
7389  */
7390 static
7391 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
7392 					  uint8_t vdev_id,
7393 					  ol_txrx_rx_fp *stack_fn_p,
7394 					  ol_osif_vdev_handle *osif_vdev_p)
7395 {
7396 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7397 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7398 						     DP_MOD_ID_CDP);
7399 
7400 	if (qdf_unlikely(!vdev)) {
7401 		*stack_fn_p = NULL;
7402 		*osif_vdev_p = NULL;
7403 		return;
7404 	}
7405 	*stack_fn_p = vdev->osif_rx_stack;
7406 	*osif_vdev_p = vdev->osif_vdev;
7407 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7408 }
7409 
7410 /**
7411  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
7412  * @soc_hdl: datapath soc handle
7413  * @vdev_id: virtual device/interface id
7414  *
7415  * Return: Handle to control pdev
7416  */
7417 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
7418 						struct cdp_soc_t *soc_hdl,
7419 						uint8_t vdev_id)
7420 {
7421 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7422 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7423 						     DP_MOD_ID_CDP);
7424 	struct dp_pdev *pdev;
7425 
7426 	if (!vdev)
7427 		return NULL;
7428 
7429 	pdev = vdev->pdev;
7430 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7431 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
7432 }
7433 
7434 /**
7435  * dp_get_tx_pending() - read pending tx
7436  * @pdev_handle: Datapath PDEV handle
7437  *
7438  * Return: outstanding tx
7439  */
7440 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
7441 {
7442 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7443 
7444 	return qdf_atomic_read(&pdev->num_tx_outstanding);
7445 }
7446 
7447 /**
7448  * dp_get_peer_mac_from_peer_id() - get peer mac
7449  * @pdev_handle: Datapath PDEV handle
7450  * @peer_id: Peer ID
7451  * @peer_mac: MAC addr of PEER
7452  *
7453  * Return: QDF_STATUS
7454  */
7455 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
7456 					       uint32_t peer_id,
7457 					       uint8_t *peer_mac)
7458 {
7459 	struct dp_peer *peer;
7460 
7461 	if (soc && peer_mac) {
7462 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
7463 					     (uint16_t)peer_id,
7464 					     DP_MOD_ID_CDP);
7465 		if (peer) {
7466 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
7467 				     QDF_MAC_ADDR_SIZE);
7468 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7469 			return QDF_STATUS_SUCCESS;
7470 		}
7471 	}
7472 
7473 	return QDF_STATUS_E_FAILURE;
7474 }
7475 
7476 #ifdef MESH_MODE_SUPPORT
7477 static
7478 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
7479 {
7480 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7481 
7482 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
7483 	vdev->mesh_vdev = val;
7484 	if (val)
7485 		vdev->skip_sw_tid_classification |=
7486 			DP_TX_MESH_ENABLED;
7487 	else
7488 		vdev->skip_sw_tid_classification &=
7489 			~DP_TX_MESH_ENABLED;
7490 }
7491 
7492 /*
7493  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
7494  * @vdev_hdl: virtual device object
7495  * @val: value to be set
7496  *
7497  * Return: void
7498  */
7499 static
7500 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
7501 {
7502 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7503 
7504 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
7505 	vdev->mesh_rx_filter = val;
7506 }
7507 #endif
7508 
7509 /*
7510  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
7511  * @vdev_hdl: virtual device object
7512  * @val: value to be set
7513  *
7514  * Return: void
7515  */
7516 static
7517 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
7518 {
7519 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
7520 	if (val)
7521 		vdev->skip_sw_tid_classification |=
7522 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
7523 	else
7524 		vdev->skip_sw_tid_classification &=
7525 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
7526 }
7527 
7528 /*
7529  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
7530  * @vdev_hdl: virtual device object
7531  * @val: value to be set
7532  *
7533  * Return: 1 if this flag is set
7534  */
7535 static
7536 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
7537 {
7538 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7539 
7540 	return !!(vdev->skip_sw_tid_classification &
7541 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
7542 }
7543 
7544 #ifdef VDEV_PEER_PROTOCOL_COUNT
7545 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
7546 					       int8_t vdev_id,
7547 					       bool enable)
7548 {
7549 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7550 	struct dp_vdev *vdev;
7551 
7552 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7553 	if (!vdev)
7554 		return;
7555 
7556 	dp_info("enable %d vdev_id %d", enable, vdev_id);
7557 	vdev->peer_protocol_count_track = enable;
7558 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7559 }
7560 
7561 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7562 						   int8_t vdev_id,
7563 						   int drop_mask)
7564 {
7565 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7566 	struct dp_vdev *vdev;
7567 
7568 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7569 	if (!vdev)
7570 		return;
7571 
7572 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
7573 	vdev->peer_protocol_count_dropmask = drop_mask;
7574 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7575 }
7576 
7577 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
7578 						  int8_t vdev_id)
7579 {
7580 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7581 	struct dp_vdev *vdev;
7582 	int peer_protocol_count_track;
7583 
7584 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7585 	if (!vdev)
7586 		return 0;
7587 
7588 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
7589 		vdev_id);
7590 	peer_protocol_count_track =
7591 		vdev->peer_protocol_count_track;
7592 
7593 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7594 	return peer_protocol_count_track;
7595 }
7596 
7597 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7598 					       int8_t vdev_id)
7599 {
7600 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7601 	struct dp_vdev *vdev;
7602 	int peer_protocol_count_dropmask;
7603 
7604 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7605 	if (!vdev)
7606 		return 0;
7607 
7608 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
7609 		vdev_id);
7610 	peer_protocol_count_dropmask =
7611 		vdev->peer_protocol_count_dropmask;
7612 
7613 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7614 	return peer_protocol_count_dropmask;
7615 }
7616 
7617 #endif
7618 
7619 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7620 {
7621 	uint8_t pdev_count;
7622 
7623 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7624 		if (soc->pdev_list[pdev_count] &&
7625 		    soc->pdev_list[pdev_count] == data)
7626 			return true;
7627 	}
7628 	return false;
7629 }
7630 
7631 /**
7632  * dp_rx_bar_stats_cb(): BAR received stats callback
7633  * @soc: SOC handle
7634  * @cb_ctxt: Call back context
7635  * @reo_status: Reo status
7636  *
7637  * return: void
7638  */
7639 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7640 	union hal_reo_status *reo_status)
7641 {
7642 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7643 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7644 
7645 	if (!dp_check_pdev_exists(soc, pdev)) {
7646 		dp_err_rl("pdev doesn't exist");
7647 		return;
7648 	}
7649 
7650 	if (!qdf_atomic_read(&soc->cmn_init_done))
7651 		return;
7652 
7653 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7654 		DP_PRINT_STATS("REO stats failure %d",
7655 			       queue_status->header.status);
7656 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7657 		return;
7658 	}
7659 
7660 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7661 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7662 
7663 }
7664 
7665 /**
7666  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7667  * @vdev: DP VDEV handle
7668  *
7669  * return: void
7670  */
7671 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7672 			     struct cdp_vdev_stats *vdev_stats)
7673 {
7674 	struct dp_soc *soc = NULL;
7675 
7676 	if (!vdev || !vdev->pdev)
7677 		return;
7678 
7679 	soc = vdev->pdev->soc;
7680 
7681 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7682 
7683 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
7684 			     DP_MOD_ID_GENERIC_STATS);
7685 
7686 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7687 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7688 			     vdev_stats, vdev->vdev_id,
7689 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7690 #endif
7691 }
7692 
7693 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7694 {
7695 	struct dp_vdev *vdev = NULL;
7696 	struct dp_soc *soc;
7697 	struct cdp_vdev_stats *vdev_stats =
7698 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
7699 
7700 	if (!vdev_stats) {
7701 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
7702 			   pdev->soc);
7703 		return;
7704 	}
7705 
7706 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7707 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7708 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7709 
7710 	if (dp_monitor_is_enable_mcopy_mode(pdev))
7711 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7712 
7713 	soc = pdev->soc;
7714 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7715 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7716 
7717 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7718 		dp_update_pdev_stats(pdev, vdev_stats);
7719 		dp_update_pdev_ingress_stats(pdev, vdev);
7720 	}
7721 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7722 	qdf_mem_free(vdev_stats);
7723 
7724 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7725 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7726 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7727 #endif
7728 }
7729 
7730 /**
7731  * dp_vdev_getstats() - get vdev packet level stats
7732  * @vdev_handle: Datapath VDEV handle
7733  * @stats: cdp network device stats structure
7734  *
7735  * Return: QDF_STATUS
7736  */
7737 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7738 				   struct cdp_dev_stats *stats)
7739 {
7740 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7741 	struct dp_pdev *pdev;
7742 	struct dp_soc *soc;
7743 	struct cdp_vdev_stats *vdev_stats;
7744 
7745 	if (!vdev)
7746 		return QDF_STATUS_E_FAILURE;
7747 
7748 	pdev = vdev->pdev;
7749 	if (!pdev)
7750 		return QDF_STATUS_E_FAILURE;
7751 
7752 	soc = pdev->soc;
7753 
7754 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7755 
7756 	if (!vdev_stats) {
7757 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
7758 			   soc);
7759 		return QDF_STATUS_E_FAILURE;
7760 	}
7761 
7762 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7763 
7764 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7765 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7766 
7767 	stats->tx_errors = vdev_stats->tx.tx_failed +
7768 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7769 	stats->tx_dropped = stats->tx_errors;
7770 
7771 	stats->rx_packets = vdev_stats->rx.unicast.num +
7772 		vdev_stats->rx.multicast.num +
7773 		vdev_stats->rx.bcast.num;
7774 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7775 		vdev_stats->rx.multicast.bytes +
7776 		vdev_stats->rx.bcast.bytes;
7777 
7778 	qdf_mem_free(vdev_stats);
7779 
7780 	return QDF_STATUS_SUCCESS;
7781 }
7782 
7783 /**
7784  * dp_pdev_getstats() - get pdev packet level stats
7785  * @pdev_handle: Datapath PDEV handle
7786  * @stats: cdp network device stats structure
7787  *
7788  * Return: QDF_STATUS
7789  */
7790 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7791 			     struct cdp_dev_stats *stats)
7792 {
7793 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7794 
7795 	dp_aggregate_pdev_stats(pdev);
7796 
7797 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7798 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7799 
7800 	stats->tx_errors = pdev->stats.tx.tx_failed +
7801 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7802 	stats->tx_dropped = stats->tx_errors;
7803 
7804 	stats->rx_packets = pdev->stats.rx.unicast.num +
7805 		pdev->stats.rx.multicast.num +
7806 		pdev->stats.rx.bcast.num;
7807 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7808 		pdev->stats.rx.multicast.bytes +
7809 		pdev->stats.rx.bcast.bytes;
7810 	stats->rx_errors = pdev->stats.err.ip_csum_err +
7811 		pdev->stats.err.tcp_udp_csum_err +
7812 		pdev->stats.rx.err.mic_err +
7813 		pdev->stats.rx.err.decrypt_err +
7814 		pdev->stats.err.rxdma_error +
7815 		pdev->stats.err.reo_error;
7816 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7817 		pdev->stats.dropped.mec +
7818 		pdev->stats.dropped.mesh_filter +
7819 		pdev->stats.dropped.wifi_parse +
7820 		pdev->stats.dropped.mon_rx_drop +
7821 		pdev->stats.dropped.mon_radiotap_update_err;
7822 }
7823 
7824 /**
7825  * dp_get_device_stats() - get interface level packet stats
7826  * @soc: soc handle
7827  * @id : vdev_id or pdev_id based on type
7828  * @stats: cdp network device stats structure
7829  * @type: device type pdev/vdev
7830  *
7831  * Return: QDF_STATUS
7832  */
7833 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
7834 				      struct cdp_dev_stats *stats,
7835 				      uint8_t type)
7836 {
7837 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7838 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7839 	struct dp_vdev *vdev;
7840 
7841 	switch (type) {
7842 	case UPDATE_VDEV_STATS:
7843 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
7844 
7845 		if (vdev) {
7846 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
7847 						  stats);
7848 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7849 		}
7850 		return status;
7851 	case UPDATE_PDEV_STATS:
7852 		{
7853 			struct dp_pdev *pdev =
7854 				dp_get_pdev_from_soc_pdev_id_wifi3(
7855 						(struct dp_soc *)soc,
7856 						 id);
7857 			if (pdev) {
7858 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7859 						 stats);
7860 				return QDF_STATUS_SUCCESS;
7861 			}
7862 		}
7863 		break;
7864 	default:
7865 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7866 			"apstats cannot be updated for this input "
7867 			"type %d", type);
7868 		break;
7869 	}
7870 
7871 	return QDF_STATUS_E_FAILURE;
7872 }
7873 
7874 const
7875 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7876 {
7877 	switch (ring_type) {
7878 	case REO_DST:
7879 		return "Reo_dst";
7880 	case REO_EXCEPTION:
7881 		return "Reo_exception";
7882 	case REO_CMD:
7883 		return "Reo_cmd";
7884 	case REO_REINJECT:
7885 		return "Reo_reinject";
7886 	case REO_STATUS:
7887 		return "Reo_status";
7888 	case WBM2SW_RELEASE:
7889 		return "wbm2sw_release";
7890 	case TCL_DATA:
7891 		return "tcl_data";
7892 	case TCL_CMD_CREDIT:
7893 		return "tcl_cmd_credit";
7894 	case TCL_STATUS:
7895 		return "tcl_status";
7896 	case SW2WBM_RELEASE:
7897 		return "sw2wbm_release";
7898 	case RXDMA_BUF:
7899 		return "Rxdma_buf";
7900 	case RXDMA_DST:
7901 		return "Rxdma_dst";
7902 	case RXDMA_MONITOR_BUF:
7903 		return "Rxdma_monitor_buf";
7904 	case RXDMA_MONITOR_DESC:
7905 		return "Rxdma_monitor_desc";
7906 	case RXDMA_MONITOR_STATUS:
7907 		return "Rxdma_monitor_status";
7908 	case WBM_IDLE_LINK:
7909 		return "WBM_hw_idle_link";
7910 	default:
7911 		dp_err("Invalid ring type");
7912 		break;
7913 	}
7914 	return "Invalid";
7915 }
7916 
7917 /*
7918  * dp_print_napi_stats(): NAPI stats
7919  * @soc - soc handle
7920  */
7921 void dp_print_napi_stats(struct dp_soc *soc)
7922 {
7923 	hif_print_napi_stats(soc->hif_handle);
7924 }
7925 
7926 #ifdef QCA_PEER_EXT_STATS
7927 /**
7928  * dp_txrx_host_peer_ext_stats_clr: Reinitialize the txrx peer ext stats
7929  *
7930  */
7931 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
7932 {
7933 	if (peer->pext_stats)
7934 		qdf_mem_zero(peer->pext_stats, sizeof(*peer->pext_stats));
7935 }
7936 #else
7937 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
7938 {
7939 }
7940 #endif
7941 
7942 /**
7943  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
7944  * @soc: Datapath soc
7945  * @peer: Datatpath peer
7946  * @arg: argument to iter function
7947  *
7948  * Return: QDF_STATUS
7949  */
7950 static inline void
7951 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
7952 			    struct dp_peer *peer,
7953 			    void *arg)
7954 {
7955 	struct dp_rx_tid *rx_tid;
7956 	uint8_t tid;
7957 
7958 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
7959 		rx_tid = &peer->rx_tid[tid];
7960 		DP_STATS_CLR(rx_tid);
7961 	}
7962 
7963 	DP_STATS_CLR(peer);
7964 
7965 	dp_txrx_host_peer_ext_stats_clr(peer);
7966 
7967 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7968 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
7969 			     &peer->stats,  peer->peer_id,
7970 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
7971 #endif
7972 }
7973 
7974 /**
7975  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7976  * @vdev: DP_VDEV handle
7977  * @dp_soc: DP_SOC handle
7978  *
7979  * Return: QDF_STATUS
7980  */
7981 static inline QDF_STATUS
7982 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
7983 {
7984 	if (!vdev || !vdev->pdev)
7985 		return QDF_STATUS_E_FAILURE;
7986 
7987 	/*
7988 	 * if NSS offload is enabled, then send message
7989 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
7990 	 * then clear host statistics.
7991 	 */
7992 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
7993 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
7994 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
7995 							   vdev->vdev_id);
7996 	}
7997 
7998 	DP_STATS_CLR(vdev->pdev);
7999 	DP_STATS_CLR(vdev->pdev->soc);
8000 	DP_STATS_CLR(vdev);
8001 
8002 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
8003 
8004 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
8005 			     DP_MOD_ID_GENERIC_STATS);
8006 
8007 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8008 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8009 			     &vdev->stats,  vdev->vdev_id,
8010 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8011 #endif
8012 	return QDF_STATUS_SUCCESS;
8013 }
8014 
8015 /*
8016  * dp_get_host_peer_stats()- function to print peer stats
8017  * @soc: dp_soc handle
8018  * @mac_addr: mac address of the peer
8019  *
8020  * Return: QDF_STATUS
8021  */
8022 static QDF_STATUS
8023 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
8024 {
8025 	struct dp_peer *peer = NULL;
8026 
8027 	if (!mac_addr) {
8028 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8029 			  "%s: NULL peer mac addr\n", __func__);
8030 		return QDF_STATUS_E_FAILURE;
8031 	}
8032 
8033 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8034 				      mac_addr, 0,
8035 				      DP_VDEV_ALL,
8036 				      DP_MOD_ID_CDP);
8037 	if (!peer) {
8038 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8039 			  "%s: Invalid peer\n", __func__);
8040 		return QDF_STATUS_E_FAILURE;
8041 	}
8042 
8043 	dp_print_peer_stats(peer);
8044 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
8045 
8046 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8047 
8048 	return QDF_STATUS_SUCCESS;
8049 }
8050 
8051 /**
8052  * dp_txrx_stats_help() - Helper function for Txrx_Stats
8053  *
8054  * Return: None
8055  */
8056 static void dp_txrx_stats_help(void)
8057 {
8058 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
8059 	dp_info("stats_option:");
8060 	dp_info("  1 -- HTT Tx Statistics");
8061 	dp_info("  2 -- HTT Rx Statistics");
8062 	dp_info("  3 -- HTT Tx HW Queue Statistics");
8063 	dp_info("  4 -- HTT Tx HW Sched Statistics");
8064 	dp_info("  5 -- HTT Error Statistics");
8065 	dp_info("  6 -- HTT TQM Statistics");
8066 	dp_info("  7 -- HTT TQM CMDQ Statistics");
8067 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
8068 	dp_info("  9 -- HTT Tx Rate Statistics");
8069 	dp_info(" 10 -- HTT Rx Rate Statistics");
8070 	dp_info(" 11 -- HTT Peer Statistics");
8071 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
8072 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
8073 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
8074 	dp_info(" 15 -- HTT SRNG Statistics");
8075 	dp_info(" 16 -- HTT SFM Info Statistics");
8076 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
8077 	dp_info(" 18 -- HTT Peer List Details");
8078 	dp_info(" 20 -- Clear Host Statistics");
8079 	dp_info(" 21 -- Host Rx Rate Statistics");
8080 	dp_info(" 22 -- Host Tx Rate Statistics");
8081 	dp_info(" 23 -- Host Tx Statistics");
8082 	dp_info(" 24 -- Host Rx Statistics");
8083 	dp_info(" 25 -- Host AST Statistics");
8084 	dp_info(" 26 -- Host SRNG PTR Statistics");
8085 	dp_info(" 27 -- Host Mon Statistics");
8086 	dp_info(" 28 -- Host REO Queue Statistics");
8087 	dp_info(" 29 -- Host Soc cfg param Statistics");
8088 	dp_info(" 30 -- Host pdev cfg param Statistics");
8089 	dp_info(" 31 -- Host FISA stats");
8090 	dp_info(" 32 -- Host Register Work stats");
8091 }
8092 
8093 /**
8094  * dp_print_host_stats()- Function to print the stats aggregated at host
8095  * @vdev_handle: DP_VDEV handle
8096  * @req: host stats type
8097  * @soc: dp soc handler
8098  *
8099  * Return: 0 on success, print error message in case of failure
8100  */
8101 static int
8102 dp_print_host_stats(struct dp_vdev *vdev,
8103 		    struct cdp_txrx_stats_req *req,
8104 		    struct dp_soc *soc)
8105 {
8106 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8107 	enum cdp_host_txrx_stats type =
8108 			dp_stats_mapping_table[req->stats][STATS_HOST];
8109 
8110 	dp_aggregate_pdev_stats(pdev);
8111 
8112 	switch (type) {
8113 	case TXRX_CLEAR_STATS:
8114 		dp_txrx_host_stats_clr(vdev, soc);
8115 		break;
8116 	case TXRX_RX_RATE_STATS:
8117 		dp_print_rx_rates(vdev);
8118 		break;
8119 	case TXRX_TX_RATE_STATS:
8120 		dp_print_tx_rates(vdev);
8121 		break;
8122 	case TXRX_TX_HOST_STATS:
8123 		dp_print_pdev_tx_stats(pdev);
8124 		dp_print_soc_tx_stats(pdev->soc);
8125 		break;
8126 	case TXRX_RX_HOST_STATS:
8127 		dp_print_pdev_rx_stats(pdev);
8128 		dp_print_soc_rx_stats(pdev->soc);
8129 		break;
8130 	case TXRX_AST_STATS:
8131 		dp_print_ast_stats(pdev->soc);
8132 		dp_print_mec_stats(pdev->soc);
8133 		dp_print_peer_table(vdev);
8134 		break;
8135 	case TXRX_SRNG_PTR_STATS:
8136 		dp_print_ring_stats(pdev);
8137 		break;
8138 	case TXRX_RX_MON_STATS:
8139 		dp_monitor_print_pdev_rx_mon_stats(pdev);
8140 		break;
8141 	case TXRX_REO_QUEUE_STATS:
8142 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
8143 				       req->peer_addr);
8144 		break;
8145 	case TXRX_SOC_CFG_PARAMS:
8146 		dp_print_soc_cfg_params(pdev->soc);
8147 		break;
8148 	case TXRX_PDEV_CFG_PARAMS:
8149 		dp_print_pdev_cfg_params(pdev);
8150 		break;
8151 	case TXRX_NAPI_STATS:
8152 		dp_print_napi_stats(pdev->soc);
8153 		break;
8154 	case TXRX_SOC_INTERRUPT_STATS:
8155 		dp_print_soc_interrupt_stats(pdev->soc);
8156 		break;
8157 	case TXRX_SOC_FSE_STATS:
8158 		dp_rx_dump_fisa_table(pdev->soc);
8159 		break;
8160 	case TXRX_HAL_REG_WRITE_STATS:
8161 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
8162 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
8163 		break;
8164 	case TXRX_SOC_REO_HW_DESC_DUMP:
8165 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
8166 					 vdev->vdev_id);
8167 		break;
8168 	default:
8169 		dp_info("Wrong Input For TxRx Host Stats");
8170 		dp_txrx_stats_help();
8171 		break;
8172 	}
8173 	return 0;
8174 }
8175 
8176 /*
8177  * dp_pdev_tid_stats_ingress_inc
8178  * @pdev: pdev handle
8179  * @val: increase in value
8180  *
8181  * Return: void
8182  */
8183 static void
8184 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
8185 {
8186 	pdev->stats.tid_stats.ingress_stack += val;
8187 }
8188 
8189 /*
8190  * dp_pdev_tid_stats_osif_drop
8191  * @pdev: pdev handle
8192  * @val: increase in value
8193  *
8194  * Return: void
8195  */
8196 static void
8197 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
8198 {
8199 	pdev->stats.tid_stats.osif_drop += val;
8200 }
8201 
8202 /*
8203  * dp_get_fw_peer_stats()- function to print peer stats
8204  * @soc: soc handle
8205  * @pdev_id : id of the pdev handle
8206  * @mac_addr: mac address of the peer
8207  * @cap: Type of htt stats requested
8208  * @is_wait: if set, wait on completion from firmware response
8209  *
8210  * Currently Supporting only MAC ID based requests Only
8211  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
8212  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
8213  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
8214  *
8215  * Return: QDF_STATUS
8216  */
8217 static QDF_STATUS
8218 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8219 		     uint8_t *mac_addr,
8220 		     uint32_t cap, uint32_t is_wait)
8221 {
8222 	int i;
8223 	uint32_t config_param0 = 0;
8224 	uint32_t config_param1 = 0;
8225 	uint32_t config_param2 = 0;
8226 	uint32_t config_param3 = 0;
8227 	struct dp_pdev *pdev =
8228 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8229 						   pdev_id);
8230 
8231 	if (!pdev)
8232 		return QDF_STATUS_E_FAILURE;
8233 
8234 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
8235 	config_param0 |= (1 << (cap + 1));
8236 
8237 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
8238 		config_param1 |= (1 << i);
8239 	}
8240 
8241 	config_param2 |= (mac_addr[0] & 0x000000ff);
8242 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
8243 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
8244 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
8245 
8246 	config_param3 |= (mac_addr[4] & 0x000000ff);
8247 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
8248 
8249 	if (is_wait) {
8250 		qdf_event_reset(&pdev->fw_peer_stats_event);
8251 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8252 					  config_param0, config_param1,
8253 					  config_param2, config_param3,
8254 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
8255 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
8256 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
8257 	} else {
8258 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8259 					  config_param0, config_param1,
8260 					  config_param2, config_param3,
8261 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
8262 	}
8263 
8264 	return QDF_STATUS_SUCCESS;
8265 
8266 }
8267 
8268 /* This struct definition will be removed from here
8269  * once it get added in FW headers*/
8270 struct httstats_cmd_req {
8271     uint32_t    config_param0;
8272     uint32_t    config_param1;
8273     uint32_t    config_param2;
8274     uint32_t    config_param3;
8275     int cookie;
8276     u_int8_t    stats_id;
8277 };
8278 
8279 /*
8280  * dp_get_htt_stats: function to process the httstas request
8281  * @soc: DP soc handle
8282  * @pdev_id: id of pdev handle
8283  * @data: pointer to request data
8284  * @data_len: length for request data
8285  *
8286  * return: QDF_STATUS
8287  */
8288 static QDF_STATUS
8289 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
8290 		 uint32_t data_len)
8291 {
8292 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
8293 	struct dp_pdev *pdev =
8294 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8295 						   pdev_id);
8296 
8297 	if (!pdev)
8298 		return QDF_STATUS_E_FAILURE;
8299 
8300 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
8301 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
8302 				req->config_param0, req->config_param1,
8303 				req->config_param2, req->config_param3,
8304 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
8305 
8306 	return QDF_STATUS_SUCCESS;
8307 }
8308 
8309 /**
8310  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
8311  * @pdev: DP_PDEV handle
8312  * @prio: tidmap priority value passed by the user
8313  *
8314  * Return: QDF_STATUS_SUCCESS on success
8315  */
8316 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
8317 						uint8_t prio)
8318 {
8319 	struct dp_soc *soc = pdev->soc;
8320 
8321 	soc->tidmap_prty = prio;
8322 
8323 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
8324 	return QDF_STATUS_SUCCESS;
8325 }
8326 
8327 /*
8328  * dp_get_peer_param: function to get parameters in peer
8329  * @cdp_soc: DP soc handle
8330  * @vdev_id: id of vdev handle
8331  * @peer_mac: peer mac address
8332  * @param: parameter type to be set
8333  * @val : address of buffer
8334  *
8335  * Return: val
8336  */
8337 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8338 				    uint8_t *peer_mac,
8339 				    enum cdp_peer_param_type param,
8340 				    cdp_config_param_type *val)
8341 {
8342 	return QDF_STATUS_SUCCESS;
8343 }
8344 
8345 /*
8346  * dp_set_peer_param: function to set parameters in peer
8347  * @cdp_soc: DP soc handle
8348  * @vdev_id: id of vdev handle
8349  * @peer_mac: peer mac address
8350  * @param: parameter type to be set
8351  * @val: value of parameter to be set
8352  *
8353  * Return: 0 for success. nonzero for failure.
8354  */
8355 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8356 				    uint8_t *peer_mac,
8357 				    enum cdp_peer_param_type param,
8358 				    cdp_config_param_type val)
8359 {
8360 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
8361 						      peer_mac, 0, vdev_id,
8362 						      DP_MOD_ID_CDP);
8363 
8364 	if (!peer)
8365 		return QDF_STATUS_E_FAILURE;
8366 
8367 	switch (param) {
8368 	case CDP_CONFIG_NAWDS:
8369 		peer->nawds_enabled = val.cdp_peer_param_nawds;
8370 		break;
8371 	case CDP_CONFIG_NAC:
8372 		peer->nac = !!(val.cdp_peer_param_nac);
8373 		break;
8374 	case CDP_CONFIG_ISOLATION:
8375 		dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
8376 		break;
8377 	case CDP_CONFIG_IN_TWT:
8378 		peer->in_twt = !!(val.cdp_peer_param_in_twt);
8379 		break;
8380 	default:
8381 		break;
8382 	}
8383 
8384 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8385 
8386 	return QDF_STATUS_SUCCESS;
8387 }
8388 
8389 /*
8390  * dp_get_pdev_param: function to get parameters from pdev
8391  * @cdp_soc: DP soc handle
8392  * @pdev_id: id of pdev handle
8393  * @param: parameter type to be get
8394  * @value : buffer for value
8395  *
8396  * Return: status
8397  */
8398 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8399 				    enum cdp_pdev_param_type param,
8400 				    cdp_config_param_type *val)
8401 {
8402 	struct cdp_pdev *pdev = (struct cdp_pdev *)
8403 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8404 						   pdev_id);
8405 	if (!pdev)
8406 		return QDF_STATUS_E_FAILURE;
8407 
8408 	switch (param) {
8409 	case CDP_CONFIG_VOW:
8410 		val->cdp_pdev_param_cfg_vow =
8411 				((struct dp_pdev *)pdev)->delay_stats_flag;
8412 		break;
8413 	case CDP_TX_PENDING:
8414 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
8415 		break;
8416 	case CDP_FILTER_MCAST_DATA:
8417 		val->cdp_pdev_param_fltr_mcast =
8418 				dp_monitor_pdev_get_filter_mcast_data(pdev);
8419 		break;
8420 	case CDP_FILTER_NO_DATA:
8421 		val->cdp_pdev_param_fltr_none =
8422 				dp_monitor_pdev_get_filter_non_data(pdev);
8423 		break;
8424 	case CDP_FILTER_UCAST_DATA:
8425 		val->cdp_pdev_param_fltr_ucast =
8426 				dp_monitor_pdev_get_filter_ucast_data(pdev);
8427 		break;
8428 	default:
8429 		return QDF_STATUS_E_FAILURE;
8430 	}
8431 
8432 	return QDF_STATUS_SUCCESS;
8433 }
8434 
8435 /*
8436  * dp_set_pdev_param: function to set parameters in pdev
8437  * @cdp_soc: DP soc handle
8438  * @pdev_id: id of pdev handle
8439  * @param: parameter type to be set
8440  * @val: value of parameter to be set
8441  *
8442  * Return: 0 for success. nonzero for failure.
8443  */
8444 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8445 				    enum cdp_pdev_param_type param,
8446 				    cdp_config_param_type val)
8447 {
8448 	int target_type;
8449 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8450 	struct dp_pdev *pdev =
8451 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8452 						   pdev_id);
8453 	enum reg_wifi_band chan_band;
8454 
8455 	if (!pdev)
8456 		return QDF_STATUS_E_FAILURE;
8457 
8458 	target_type = hal_get_target_type(soc->hal_soc);
8459 	switch (target_type) {
8460 	case TARGET_TYPE_QCA6750:
8461 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_5G_LMAC_ID;
8462 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8463 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8464 		break;
8465 	default:
8466 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_2G_LMAC_ID;
8467 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8468 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8469 		break;
8470 	}
8471 
8472 	switch (param) {
8473 	case CDP_CONFIG_TX_CAPTURE:
8474 		return dp_monitor_config_debug_sniffer(pdev,
8475 						val.cdp_pdev_param_tx_capture);
8476 	case CDP_CONFIG_DEBUG_SNIFFER:
8477 		return dp_monitor_config_debug_sniffer(pdev,
8478 						val.cdp_pdev_param_dbg_snf);
8479 	case CDP_CONFIG_BPR_ENABLE:
8480 		return dp_monitor_set_bpr_enable(pdev,
8481 						 val.cdp_pdev_param_bpr_enable);
8482 	case CDP_CONFIG_PRIMARY_RADIO:
8483 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
8484 		break;
8485 	case CDP_CONFIG_CAPTURE_LATENCY:
8486 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
8487 		break;
8488 	case CDP_INGRESS_STATS:
8489 		dp_pdev_tid_stats_ingress_inc(pdev,
8490 					      val.cdp_pdev_param_ingrs_stats);
8491 		break;
8492 	case CDP_OSIF_DROP:
8493 		dp_pdev_tid_stats_osif_drop(pdev,
8494 					    val.cdp_pdev_param_osif_drop);
8495 		break;
8496 	case CDP_CONFIG_ENH_RX_CAPTURE:
8497 		return dp_monitor_config_enh_rx_capture(pdev,
8498 						val.cdp_pdev_param_en_rx_cap);
8499 	case CDP_CONFIG_ENH_TX_CAPTURE:
8500 		return dp_monitor_config_enh_tx_capture(pdev,
8501 						val.cdp_pdev_param_en_tx_cap);
8502 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
8503 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
8504 		break;
8505 	case CDP_CONFIG_HMMC_TID_VALUE:
8506 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
8507 		break;
8508 	case CDP_CHAN_NOISE_FLOOR:
8509 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
8510 		break;
8511 	case CDP_TIDMAP_PRTY:
8512 		dp_set_pdev_tidmap_prty_wifi3(pdev,
8513 					      val.cdp_pdev_param_tidmap_prty);
8514 		break;
8515 	case CDP_FILTER_NEIGH_PEERS:
8516 		dp_monitor_set_filter_neigh_peers(pdev,
8517 					val.cdp_pdev_param_fltr_neigh_peers);
8518 		break;
8519 	case CDP_MONITOR_CHANNEL:
8520 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
8521 		break;
8522 	case CDP_MONITOR_FREQUENCY:
8523 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
8524 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
8525 		dp_monitor_set_chan_band(pdev, chan_band);
8526 		break;
8527 	case CDP_CONFIG_BSS_COLOR:
8528 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
8529 		break;
8530 	case CDP_SET_ATF_STATS_ENABLE:
8531 		dp_monitor_set_atf_stats_enable(pdev,
8532 					val.cdp_pdev_param_atf_stats_enable);
8533 		break;
8534 	case CDP_CONFIG_SPECIAL_VAP:
8535 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
8536 					val.cdp_pdev_param_config_special_vap);
8537 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
8538 		break;
8539 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
8540 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
8541 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
8542 		break;
8543 	default:
8544 		return QDF_STATUS_E_INVAL;
8545 	}
8546 	return QDF_STATUS_SUCCESS;
8547 }
8548 
8549 #ifdef QCA_PEER_EXT_STATS
8550 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8551 					  qdf_nbuf_t nbuf)
8552 {
8553 	struct dp_peer *peer = NULL;
8554 	uint16_t peer_id, ring_id;
8555 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
8556 	struct cdp_peer_ext_stats *pext_stats = NULL;
8557 
8558 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
8559 	if (peer_id > soc->max_peers)
8560 		return;
8561 
8562 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
8563 	if (qdf_unlikely(!peer))
8564 		return;
8565 
8566 	if (qdf_likely(peer->pext_stats)) {
8567 		pext_stats = peer->pext_stats;
8568 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
8569 		dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
8570 					nbuf);
8571 	}
8572 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8573 }
8574 #else
8575 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8576 						 qdf_nbuf_t nbuf)
8577 {
8578 }
8579 #endif
8580 
8581 /*
8582  * dp_calculate_delay_stats: function to get rx delay stats
8583  * @cdp_soc: DP soc handle
8584  * @vdev_id: id of DP vdev handle
8585  * @nbuf: skb
8586  *
8587  * Return: QDF_STATUS
8588  */
8589 static QDF_STATUS
8590 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8591 			 qdf_nbuf_t nbuf)
8592 {
8593 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8594 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8595 						     DP_MOD_ID_CDP);
8596 
8597 	if (!vdev)
8598 		return QDF_STATUS_SUCCESS;
8599 
8600 	if (vdev->pdev->delay_stats_flag)
8601 		dp_rx_compute_delay(vdev, nbuf);
8602 	else
8603 		dp_rx_update_peer_delay_stats(soc, nbuf);
8604 
8605 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8606 	return QDF_STATUS_SUCCESS;
8607 }
8608 
8609 /*
8610  * dp_get_vdev_param: function to get parameters from vdev
8611  * @cdp_soc : DP soc handle
8612  * @vdev_id: id of DP vdev handle
8613  * @param: parameter type to get value
8614  * @val: buffer address
8615  *
8616  * return: status
8617  */
8618 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8619 				    enum cdp_vdev_param_type param,
8620 				    cdp_config_param_type *val)
8621 {
8622 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8623 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8624 						     DP_MOD_ID_CDP);
8625 
8626 	if (!vdev)
8627 		return QDF_STATUS_E_FAILURE;
8628 
8629 	switch (param) {
8630 	case CDP_ENABLE_WDS:
8631 		val->cdp_vdev_param_wds = vdev->wds_enabled;
8632 		break;
8633 	case CDP_ENABLE_MEC:
8634 		val->cdp_vdev_param_mec = vdev->mec_enabled;
8635 		break;
8636 	case CDP_ENABLE_DA_WAR:
8637 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
8638 		break;
8639 	case CDP_ENABLE_IGMP_MCAST_EN:
8640 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
8641 		break;
8642 	case CDP_ENABLE_MCAST_EN:
8643 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
8644 		break;
8645 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
8646 		val->cdp_vdev_param_hlos_tid_override =
8647 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
8648 		break;
8649 	case CDP_ENABLE_PEER_AUTHORIZE:
8650 		val->cdp_vdev_param_peer_authorize =
8651 			    vdev->peer_authorize;
8652 		break;
8653 #ifdef WLAN_SUPPORT_MESH_LATENCY
8654 	case CDP_ENABLE_PEER_TID_LATENCY:
8655 		val->cdp_vdev_param_peer_tid_latency_enable =
8656 			vdev->peer_tid_latency_enabled;
8657 		break;
8658 	case CDP_SET_VAP_MESH_TID:
8659 		val->cdp_vdev_param_mesh_tid =
8660 				vdev->mesh_tid_latency_config.latency_tid;
8661 		break;
8662 #endif
8663 	default:
8664 		dp_cdp_err("%pK: param value %d is wrong",
8665 			   soc, param);
8666 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8667 		return QDF_STATUS_E_FAILURE;
8668 	}
8669 
8670 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8671 	return QDF_STATUS_SUCCESS;
8672 }
8673 
8674 /*
8675  * dp_set_vdev_param: function to set parameters in vdev
8676  * @cdp_soc : DP soc handle
8677  * @vdev_id: id of DP vdev handle
8678  * @param: parameter type to get value
8679  * @val: value
8680  *
8681  * return: QDF_STATUS
8682  */
8683 static QDF_STATUS
8684 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8685 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
8686 {
8687 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
8688 	struct dp_vdev *vdev =
8689 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
8690 	uint32_t var = 0;
8691 
8692 	if (!vdev)
8693 		return QDF_STATUS_E_FAILURE;
8694 
8695 	switch (param) {
8696 	case CDP_ENABLE_WDS:
8697 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
8698 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
8699 		vdev->wds_enabled = val.cdp_vdev_param_wds;
8700 		break;
8701 	case CDP_ENABLE_MEC:
8702 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
8703 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
8704 		vdev->mec_enabled = val.cdp_vdev_param_mec;
8705 		break;
8706 	case CDP_ENABLE_DA_WAR:
8707 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
8708 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
8709 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
8710 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8711 					     vdev->pdev->soc));
8712 		break;
8713 	case CDP_ENABLE_NAWDS:
8714 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
8715 		break;
8716 	case CDP_ENABLE_MCAST_EN:
8717 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
8718 		break;
8719 	case CDP_ENABLE_IGMP_MCAST_EN:
8720 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
8721 		break;
8722 	case CDP_ENABLE_PROXYSTA:
8723 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
8724 		break;
8725 	case CDP_UPDATE_TDLS_FLAGS:
8726 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
8727 		break;
8728 	case CDP_CFG_WDS_AGING_TIMER:
8729 		var = val.cdp_vdev_param_aging_tmr;
8730 		if (!var)
8731 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8732 		else if (var != vdev->wds_aging_timer_val)
8733 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
8734 
8735 		vdev->wds_aging_timer_val = var;
8736 		break;
8737 	case CDP_ENABLE_AP_BRIDGE:
8738 		if (wlan_op_mode_sta != vdev->opmode)
8739 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
8740 		else
8741 			vdev->ap_bridge_enabled = false;
8742 		break;
8743 	case CDP_ENABLE_CIPHER:
8744 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
8745 		break;
8746 	case CDP_ENABLE_QWRAP_ISOLATION:
8747 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
8748 		break;
8749 	case CDP_UPDATE_MULTIPASS:
8750 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
8751 		break;
8752 	case CDP_TX_ENCAP_TYPE:
8753 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
8754 		break;
8755 	case CDP_RX_DECAP_TYPE:
8756 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
8757 		break;
8758 	case CDP_TID_VDEV_PRTY:
8759 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
8760 		break;
8761 	case CDP_TIDMAP_TBL_ID:
8762 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
8763 		break;
8764 #ifdef MESH_MODE_SUPPORT
8765 	case CDP_MESH_RX_FILTER:
8766 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
8767 					   val.cdp_vdev_param_mesh_rx_filter);
8768 		break;
8769 	case CDP_MESH_MODE:
8770 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
8771 				      val.cdp_vdev_param_mesh_mode);
8772 		break;
8773 #endif
8774 	case CDP_ENABLE_CSUM:
8775 		dp_info("vdev_id %d enable Checksum %d", vdev_id,
8776 			val.cdp_enable_tx_checksum);
8777 		vdev->csum_enabled = val.cdp_enable_tx_checksum;
8778 		break;
8779 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
8780 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
8781 			val.cdp_vdev_param_hlos_tid_override);
8782 		dp_vdev_set_hlos_tid_override(vdev,
8783 				val.cdp_vdev_param_hlos_tid_override);
8784 		break;
8785 #ifdef QCA_SUPPORT_WDS_EXTENDED
8786 	case CDP_CFG_WDS_EXT:
8787 		vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
8788 		break;
8789 #endif
8790 	case CDP_ENABLE_PEER_AUTHORIZE:
8791 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
8792 		break;
8793 #ifdef WLAN_SUPPORT_MESH_LATENCY
8794 	case CDP_ENABLE_PEER_TID_LATENCY:
8795 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
8796 			val.cdp_vdev_param_peer_tid_latency_enable);
8797 		vdev->peer_tid_latency_enabled =
8798 			val.cdp_vdev_param_peer_tid_latency_enable;
8799 		break;
8800 	case CDP_SET_VAP_MESH_TID:
8801 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
8802 			val.cdp_vdev_param_mesh_tid);
8803 		vdev->mesh_tid_latency_config.latency_tid
8804 				= val.cdp_vdev_param_mesh_tid;
8805 		break;
8806 #endif
8807 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
8808 	case CDP_SKIP_BAR_UPDATE_AP:
8809 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
8810 			val.cdp_skip_bar_update);
8811 		vdev->skip_bar_update = val.cdp_skip_bar_update;
8812 		vdev->skip_bar_update_last_ts = 0;
8813 		break;
8814 #endif
8815 	default:
8816 		break;
8817 	}
8818 
8819 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
8820 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
8821 
8822 	return QDF_STATUS_SUCCESS;
8823 }
8824 
8825 /*
8826  * dp_set_psoc_param: function to set parameters in psoc
8827  * @cdp_soc : DP soc handle
8828  * @param: parameter type to be set
8829  * @val: value of parameter to be set
8830  *
8831  * return: QDF_STATUS
8832  */
8833 static QDF_STATUS
8834 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
8835 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
8836 {
8837 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8838 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
8839 
8840 	switch (param) {
8841 	case CDP_ENABLE_RATE_STATS:
8842 		soc->rdkstats_enabled = val.cdp_psoc_param_en_rate_stats;
8843 		break;
8844 	case CDP_SET_NSS_CFG:
8845 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
8846 					    val.cdp_psoc_param_en_nss_cfg);
8847 		/*
8848 		 * TODO: masked out based on the per offloaded radio
8849 		 */
8850 		switch (val.cdp_psoc_param_en_nss_cfg) {
8851 		case dp_nss_cfg_default:
8852 			break;
8853 		case dp_nss_cfg_first_radio:
8854 		/*
8855 		 * This configuration is valid for single band radio which
8856 		 * is also NSS offload.
8857 		 */
8858 		case dp_nss_cfg_dbdc:
8859 		case dp_nss_cfg_dbtc:
8860 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
8861 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
8862 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
8863 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
8864 			break;
8865 		default:
8866 			dp_cdp_err("%pK: Invalid offload config %d",
8867 				   soc, val.cdp_psoc_param_en_nss_cfg);
8868 		}
8869 
8870 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
8871 				   , soc);
8872 		break;
8873 	case CDP_SET_PREFERRED_HW_MODE:
8874 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
8875 		break;
8876 	default:
8877 		break;
8878 	}
8879 
8880 	return QDF_STATUS_SUCCESS;
8881 }
8882 
8883 /*
8884  * dp_get_psoc_param: function to get parameters in soc
8885  * @cdp_soc : DP soc handle
8886  * @param: parameter type to be set
8887  * @val: address of buffer
8888  *
8889  * return: status
8890  */
8891 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
8892 				    enum cdp_psoc_param_type param,
8893 				    cdp_config_param_type *val)
8894 {
8895 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8896 
8897 	if (!soc)
8898 		return QDF_STATUS_E_FAILURE;
8899 
8900 	switch (param) {
8901 	case CDP_CFG_PEER_EXT_STATS:
8902 		val->cdp_psoc_param_pext_stats =
8903 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
8904 		break;
8905 	default:
8906 		dp_warn("Invalid param");
8907 		break;
8908 	}
8909 
8910 	return QDF_STATUS_SUCCESS;
8911 }
8912 
8913 /*
8914  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8915  * @soc: DP_SOC handle
8916  * @vdev_id: id of DP_VDEV handle
8917  * @map_id:ID of map that needs to be updated
8918  *
8919  * Return: QDF_STATUS
8920  */
8921 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
8922 						 uint8_t vdev_id,
8923 						 uint8_t map_id)
8924 {
8925 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8926 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8927 						     DP_MOD_ID_CDP);
8928 	if (vdev) {
8929 		vdev->dscp_tid_map_id = map_id;
8930 		/* Updatr flag for transmit tid classification */
8931 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
8932 			vdev->skip_sw_tid_classification |=
8933 				DP_TX_HW_DSCP_TID_MAP_VALID;
8934 		else
8935 			vdev->skip_sw_tid_classification &=
8936 				~DP_TX_HW_DSCP_TID_MAP_VALID;
8937 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8938 		return QDF_STATUS_SUCCESS;
8939 	}
8940 
8941 	return QDF_STATUS_E_FAILURE;
8942 }
8943 
8944 #ifdef DP_RATETABLE_SUPPORT
8945 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8946 				int htflag, int gintval)
8947 {
8948 	uint32_t rix;
8949 	uint16_t ratecode;
8950 
8951 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8952 			       (uint8_t)preamb, 1, &rix, &ratecode);
8953 }
8954 #else
8955 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8956 				int htflag, int gintval)
8957 {
8958 	return 0;
8959 }
8960 #endif
8961 
8962 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8963  * @soc: DP soc handle
8964  * @pdev_id: id of DP pdev handle
8965  * @pdev_stats: buffer to copy to
8966  *
8967  * return : status success/failure
8968  */
8969 static QDF_STATUS
8970 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8971 		       struct cdp_pdev_stats *pdev_stats)
8972 {
8973 	struct dp_pdev *pdev =
8974 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8975 						   pdev_id);
8976 	if (!pdev)
8977 		return QDF_STATUS_E_FAILURE;
8978 
8979 	dp_aggregate_pdev_stats(pdev);
8980 
8981 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8982 	return QDF_STATUS_SUCCESS;
8983 }
8984 
8985 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8986  * @vdev: DP vdev handle
8987  * @buf: buffer containing specific stats structure
8988  *
8989  * Returns: void
8990  */
8991 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8992 					 void *buf)
8993 {
8994 	struct cdp_tx_ingress_stats *host_stats = NULL;
8995 
8996 	if (!buf) {
8997 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
8998 		return;
8999 	}
9000 	host_stats = (struct cdp_tx_ingress_stats *)buf;
9001 
9002 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
9003 			 host_stats->mcast_en.mcast_pkt.num,
9004 			 host_stats->mcast_en.mcast_pkt.bytes);
9005 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
9006 		     host_stats->mcast_en.dropped_map_error);
9007 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
9008 		     host_stats->mcast_en.dropped_self_mac);
9009 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
9010 		     host_stats->mcast_en.dropped_send_fail);
9011 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
9012 		     host_stats->mcast_en.ucast);
9013 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
9014 		     host_stats->mcast_en.fail_seg_alloc);
9015 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
9016 		     host_stats->mcast_en.clone_fail);
9017 }
9018 
9019 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
9020  * @vdev: DP vdev handle
9021  * @buf: buffer containing specific stats structure
9022  *
9023  * Returns: void
9024  */
9025 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
9026 					      void *buf)
9027 {
9028 	struct cdp_tx_ingress_stats *host_stats = NULL;
9029 
9030 	if (!buf) {
9031 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
9032 		return;
9033 	}
9034 	host_stats = (struct cdp_tx_ingress_stats *)buf;
9035 
9036 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
9037 		     host_stats->igmp_mcast_en.igmp_rcvd);
9038 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
9039 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
9040 }
9041 
9042 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
9043  * @soc: DP soc handle
9044  * @vdev_id: id of DP vdev handle
9045  * @buf: buffer containing specific stats structure
9046  * @stats_id: stats type
9047  *
9048  * Returns: QDF_STATUS
9049  */
9050 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
9051 						 uint8_t vdev_id,
9052 						 void *buf,
9053 						 uint16_t stats_id)
9054 {
9055 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9056 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9057 						     DP_MOD_ID_CDP);
9058 
9059 	if (!vdev) {
9060 		dp_cdp_err("%pK: Invalid vdev handle", soc);
9061 		return QDF_STATUS_E_FAILURE;
9062 	}
9063 
9064 	switch (stats_id) {
9065 	case DP_VDEV_STATS_PKT_CNT_ONLY:
9066 		break;
9067 	case DP_VDEV_STATS_TX_ME:
9068 		dp_txrx_update_vdev_me_stats(vdev, buf);
9069 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
9070 		break;
9071 	default:
9072 		qdf_info("Invalid stats_id %d", stats_id);
9073 		break;
9074 	}
9075 
9076 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9077 	return QDF_STATUS_SUCCESS;
9078 }
9079 
9080 /* dp_txrx_get_soc_stats - will return cdp_soc_stats
9081  * @soc_hdl: soc handle
9082  * @soc_stats: buffer to hold the values
9083  *
9084  * return: status success/failure
9085  */
9086 static QDF_STATUS
9087 dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl,
9088 		      struct cdp_soc_stats *soc_stats)
9089 {
9090 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9091 
9092 	soc_stats->tx.egress = soc->stats.tx.egress;
9093 	soc_stats->rx.ingress = soc->stats.rx.ingress;
9094 	soc_stats->rx.err_ring_pkts = soc->stats.rx.err_ring_pkts;
9095 	soc_stats->rx.rx_frags = soc->stats.rx.rx_frags;
9096 	soc_stats->rx.reo_reinject = soc->stats.rx.reo_reinject;
9097 	soc_stats->rx.bar_frame = soc->stats.rx.bar_frame;
9098 	soc_stats->rx.err.rx_rejected = soc->stats.rx.err.rejected;
9099 	soc_stats->rx.err.rx_raw_frm_drop = soc->stats.rx.err.raw_frm_drop;
9100 
9101 	return QDF_STATUS_SUCCESS;
9102 }
9103 
9104 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
9105  * @soc: soc handle
9106  * @vdev_id: id of vdev handle
9107  * @peer_mac: mac of DP_PEER handle
9108  * @peer_stats: buffer to copy to
9109  * return : status success/failure
9110  */
9111 static QDF_STATUS
9112 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
9113 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
9114 {
9115 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9116 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9117 						       peer_mac, 0, vdev_id,
9118 						       DP_MOD_ID_CDP);
9119 
9120 	if (!peer)
9121 		return QDF_STATUS_E_FAILURE;
9122 
9123 	qdf_mem_copy(peer_stats, &peer->stats,
9124 		     sizeof(struct cdp_peer_stats));
9125 
9126 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9127 
9128 	return status;
9129 }
9130 
9131 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
9132  * @param soc - soc handle
9133  * @param vdev_id - vdev_id of vdev object
9134  * @param peer_mac - mac address of the peer
9135  * @param type - enum of required stats
9136  * @param buf - buffer to hold the value
9137  * return : status success/failure
9138  */
9139 static QDF_STATUS
9140 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
9141 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
9142 			     cdp_peer_stats_param_t *buf)
9143 {
9144 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
9145 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9146 						      peer_mac, 0, vdev_id,
9147 						      DP_MOD_ID_CDP);
9148 
9149 	if (!peer) {
9150 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
9151 			    soc, QDF_MAC_ADDR_REF(peer_mac));
9152 		return QDF_STATUS_E_FAILURE;
9153 	} else if (type < cdp_peer_stats_max) {
9154 		switch (type) {
9155 		case cdp_peer_tx_ucast:
9156 			buf->tx_ucast = peer->stats.tx.ucast;
9157 			break;
9158 		case cdp_peer_tx_mcast:
9159 			buf->tx_mcast = peer->stats.tx.mcast;
9160 			break;
9161 		case cdp_peer_tx_rate:
9162 			buf->tx_rate = peer->stats.tx.tx_rate;
9163 			break;
9164 		case cdp_peer_tx_last_tx_rate:
9165 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
9166 			break;
9167 		case cdp_peer_tx_inactive_time:
9168 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
9169 			break;
9170 		case cdp_peer_tx_ratecode:
9171 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
9172 			break;
9173 		case cdp_peer_tx_flags:
9174 			buf->tx_flags = peer->stats.tx.tx_flags;
9175 			break;
9176 		case cdp_peer_tx_power:
9177 			buf->tx_power = peer->stats.tx.tx_power;
9178 			break;
9179 		case cdp_peer_rx_rate:
9180 			buf->rx_rate = peer->stats.rx.rx_rate;
9181 			break;
9182 		case cdp_peer_rx_last_rx_rate:
9183 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
9184 			break;
9185 		case cdp_peer_rx_ratecode:
9186 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
9187 			break;
9188 		case cdp_peer_rx_ucast:
9189 			buf->rx_ucast = peer->stats.rx.unicast;
9190 			break;
9191 		case cdp_peer_rx_flags:
9192 			buf->rx_flags = peer->stats.rx.rx_flags;
9193 			break;
9194 		case cdp_peer_rx_avg_snr:
9195 			buf->rx_avg_snr = peer->stats.rx.avg_snr;
9196 			break;
9197 		default:
9198 			dp_peer_err("%pK: Invalid value", soc);
9199 			ret = QDF_STATUS_E_FAILURE;
9200 			break;
9201 		}
9202 	} else {
9203 		dp_peer_err("%pK: Invalid value", soc);
9204 		ret = QDF_STATUS_E_FAILURE;
9205 	}
9206 
9207 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9208 
9209 	return ret;
9210 }
9211 
9212 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
9213  * @soc: soc handle
9214  * @vdev_id: id of vdev handle
9215  * @peer_mac: mac of DP_PEER handle
9216  *
9217  * return : QDF_STATUS
9218  */
9219 static QDF_STATUS
9220 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
9221 			 uint8_t *peer_mac)
9222 {
9223 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9224 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9225 						      peer_mac, 0, vdev_id,
9226 						      DP_MOD_ID_CDP);
9227 
9228 	if (!peer)
9229 		return QDF_STATUS_E_FAILURE;
9230 
9231 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
9232 
9233 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9234 
9235 	return status;
9236 }
9237 
9238 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
9239  * @vdev_handle: DP_VDEV handle
9240  * @buf: buffer for vdev stats
9241  *
9242  * return : int
9243  */
9244 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9245 				  void *buf, bool is_aggregate)
9246 {
9247 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9248 	struct cdp_vdev_stats *vdev_stats;
9249 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9250 						     DP_MOD_ID_CDP);
9251 
9252 	if (!vdev)
9253 		return 1;
9254 
9255 	vdev_stats = (struct cdp_vdev_stats *)buf;
9256 
9257 	if (is_aggregate) {
9258 		dp_aggregate_vdev_stats(vdev, buf);
9259 	} else {
9260 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9261 	}
9262 
9263 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9264 	return 0;
9265 }
9266 
9267 /*
9268  * dp_get_total_per(): get total per
9269  * @soc: DP soc handle
9270  * @pdev_id: id of DP_PDEV handle
9271  *
9272  * Return: % error rate using retries per packet and success packets
9273  */
9274 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
9275 {
9276 	struct dp_pdev *pdev =
9277 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9278 						   pdev_id);
9279 
9280 	if (!pdev)
9281 		return 0;
9282 
9283 	dp_aggregate_pdev_stats(pdev);
9284 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
9285 		return 0;
9286 	return ((pdev->stats.tx.retries * 100) /
9287 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
9288 }
9289 
9290 /*
9291  * dp_txrx_stats_publish(): publish pdev stats into a buffer
9292  * @soc: DP soc handle
9293  * @pdev_id: id of DP_PDEV handle
9294  * @buf: to hold pdev_stats
9295  *
9296  * Return: int
9297  */
9298 static int
9299 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
9300 		      struct cdp_stats_extd *buf)
9301 {
9302 	struct cdp_txrx_stats_req req = {0,};
9303 	struct dp_pdev *pdev =
9304 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9305 						   pdev_id);
9306 
9307 	if (!pdev)
9308 		return TXRX_STATS_LEVEL_OFF;
9309 
9310 	dp_aggregate_pdev_stats(pdev);
9311 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
9312 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
9313 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9314 				req.param1, req.param2, req.param3, 0,
9315 				req.cookie_val, 0);
9316 
9317 	msleep(DP_MAX_SLEEP_TIME);
9318 
9319 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
9320 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
9321 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9322 				req.param1, req.param2, req.param3, 0,
9323 				req.cookie_val, 0);
9324 
9325 	msleep(DP_MAX_SLEEP_TIME);
9326 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
9327 
9328 	return TXRX_STATS_LEVEL;
9329 }
9330 
9331 /**
9332  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
9333  * @soc: soc handle
9334  * @pdev_id: id of DP_PDEV handle
9335  * @map_id: ID of map that needs to be updated
9336  * @tos: index value in map
9337  * @tid: tid value passed by the user
9338  *
9339  * Return: QDF_STATUS
9340  */
9341 static QDF_STATUS
9342 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
9343 			       uint8_t pdev_id,
9344 			       uint8_t map_id,
9345 			       uint8_t tos, uint8_t tid)
9346 {
9347 	uint8_t dscp;
9348 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9349 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9350 
9351 	if (!pdev)
9352 		return QDF_STATUS_E_FAILURE;
9353 
9354 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
9355 	pdev->dscp_tid_map[map_id][dscp] = tid;
9356 
9357 	if (map_id < soc->num_hw_dscp_tid_map)
9358 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
9359 				       map_id, dscp);
9360 	else
9361 		return QDF_STATUS_E_FAILURE;
9362 
9363 	return QDF_STATUS_SUCCESS;
9364 }
9365 
9366 /**
9367  * dp_fw_stats_process(): Process TxRX FW stats request
9368  * @vdev_handle: DP VDEV handle
9369  * @req: stats request
9370  *
9371  * return: int
9372  */
9373 static int dp_fw_stats_process(struct dp_vdev *vdev,
9374 			       struct cdp_txrx_stats_req *req)
9375 {
9376 	struct dp_pdev *pdev = NULL;
9377 	uint32_t stats = req->stats;
9378 	uint8_t mac_id = req->mac_id;
9379 
9380 	if (!vdev) {
9381 		DP_TRACE(NONE, "VDEV not found");
9382 		return 1;
9383 	}
9384 	pdev = vdev->pdev;
9385 
9386 	/*
9387 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
9388 	 * from param0 to param3 according to below rule:
9389 	 *
9390 	 * PARAM:
9391 	 *   - config_param0 : start_offset (stats type)
9392 	 *   - config_param1 : stats bmask from start offset
9393 	 *   - config_param2 : stats bmask from start offset + 32
9394 	 *   - config_param3 : stats bmask from start offset + 64
9395 	 */
9396 	if (req->stats == CDP_TXRX_STATS_0) {
9397 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
9398 		req->param1 = 0xFFFFFFFF;
9399 		req->param2 = 0xFFFFFFFF;
9400 		req->param3 = 0xFFFFFFFF;
9401 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
9402 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
9403 	}
9404 
9405 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
9406 		return dp_h2t_ext_stats_msg_send(pdev,
9407 				HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
9408 				req->param0, req->param1, req->param2,
9409 				req->param3, 0, DBG_STATS_COOKIE_DEFAULT,
9410 				mac_id);
9411 	} else {
9412 		return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
9413 				req->param1, req->param2, req->param3,
9414 				0, DBG_STATS_COOKIE_DEFAULT, mac_id);
9415 	}
9416 }
9417 
9418 /**
9419  * dp_txrx_stats_request - function to map to firmware and host stats
9420  * @soc: soc handle
9421  * @vdev_id: virtual device ID
9422  * @req: stats request
9423  *
9424  * Return: QDF_STATUS
9425  */
9426 static
9427 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
9428 				 uint8_t vdev_id,
9429 				 struct cdp_txrx_stats_req *req)
9430 {
9431 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
9432 	int host_stats;
9433 	int fw_stats;
9434 	enum cdp_stats stats;
9435 	int num_stats;
9436 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9437 						     DP_MOD_ID_CDP);
9438 	QDF_STATUS status = QDF_STATUS_E_INVAL;
9439 
9440 	if (!vdev || !req) {
9441 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
9442 		status = QDF_STATUS_E_INVAL;
9443 		goto fail0;
9444 	}
9445 
9446 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
9447 		dp_err("Invalid mac id request");
9448 		status = QDF_STATUS_E_INVAL;
9449 		goto fail0;
9450 	}
9451 
9452 	stats = req->stats;
9453 	if (stats >= CDP_TXRX_MAX_STATS) {
9454 		status = QDF_STATUS_E_INVAL;
9455 		goto fail0;
9456 	}
9457 
9458 	/*
9459 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
9460 	 *			has to be updated if new FW HTT stats added
9461 	 */
9462 	if (stats > CDP_TXRX_STATS_HTT_MAX)
9463 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
9464 
9465 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
9466 
9467 	if (stats >= num_stats) {
9468 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
9469 		status = QDF_STATUS_E_INVAL;
9470 		goto fail0;
9471 	}
9472 
9473 	req->stats = stats;
9474 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
9475 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
9476 
9477 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
9478 		stats, fw_stats, host_stats);
9479 
9480 	if (fw_stats != TXRX_FW_STATS_INVALID) {
9481 		/* update request with FW stats type */
9482 		req->stats = fw_stats;
9483 		status = dp_fw_stats_process(vdev, req);
9484 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
9485 			(host_stats <= TXRX_HOST_STATS_MAX))
9486 		status = dp_print_host_stats(vdev, req, soc);
9487 	else
9488 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
9489 fail0:
9490 	if (vdev)
9491 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9492 	return status;
9493 }
9494 
9495 /*
9496  * dp_txrx_dump_stats() -  Dump statistics
9497  * @value - Statistics option
9498  */
9499 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
9500 				     enum qdf_stats_verbosity_level level)
9501 {
9502 	struct dp_soc *soc =
9503 		(struct dp_soc *)psoc;
9504 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9505 
9506 	if (!soc) {
9507 		dp_cdp_err("%pK: soc is NULL", soc);
9508 		return QDF_STATUS_E_INVAL;
9509 	}
9510 
9511 	switch (value) {
9512 	case CDP_TXRX_PATH_STATS:
9513 		dp_txrx_path_stats(soc);
9514 		dp_print_soc_interrupt_stats(soc);
9515 		hal_dump_reg_write_stats(soc->hal_soc);
9516 		break;
9517 
9518 	case CDP_RX_RING_STATS:
9519 		dp_print_per_ring_stats(soc);
9520 		break;
9521 
9522 	case CDP_TXRX_TSO_STATS:
9523 		dp_print_tso_stats(soc, level);
9524 		break;
9525 
9526 	case CDP_DUMP_TX_FLOW_POOL_INFO:
9527 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
9528 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
9529 		break;
9530 
9531 	case CDP_DP_NAPI_STATS:
9532 		dp_print_napi_stats(soc);
9533 		break;
9534 
9535 	case CDP_TXRX_DESC_STATS:
9536 		/* TODO: NOT IMPLEMENTED */
9537 		break;
9538 
9539 	case CDP_DP_RX_FISA_STATS:
9540 		dp_rx_dump_fisa_stats(soc);
9541 		break;
9542 
9543 	case CDP_DP_SWLM_STATS:
9544 		dp_print_swlm_stats(soc);
9545 		break;
9546 
9547 	default:
9548 		status = QDF_STATUS_E_INVAL;
9549 		break;
9550 	}
9551 
9552 	return status;
9553 
9554 }
9555 
9556 /**
9557  * dp_txrx_clear_dump_stats() - clear dumpStats
9558  * @soc- soc handle
9559  * @value - stats option
9560  *
9561  * Return: 0 - Success, non-zero - failure
9562  */
9563 static
9564 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9565 				    uint8_t value)
9566 {
9567 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9568 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9569 
9570 	if (!soc) {
9571 		dp_err("soc is NULL");
9572 		return QDF_STATUS_E_INVAL;
9573 	}
9574 
9575 	switch (value) {
9576 	case CDP_TXRX_TSO_STATS:
9577 		dp_txrx_clear_tso_stats(soc);
9578 		break;
9579 
9580 	default:
9581 		status = QDF_STATUS_E_INVAL;
9582 		break;
9583 	}
9584 
9585 	return status;
9586 }
9587 
9588 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9589 /**
9590  * dp_update_flow_control_parameters() - API to store datapath
9591  *                            config parameters
9592  * @soc: soc handle
9593  * @cfg: ini parameter handle
9594  *
9595  * Return: void
9596  */
9597 static inline
9598 void dp_update_flow_control_parameters(struct dp_soc *soc,
9599 				struct cdp_config_params *params)
9600 {
9601 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
9602 					params->tx_flow_stop_queue_threshold;
9603 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
9604 					params->tx_flow_start_queue_offset;
9605 }
9606 #else
9607 static inline
9608 void dp_update_flow_control_parameters(struct dp_soc *soc,
9609 				struct cdp_config_params *params)
9610 {
9611 }
9612 #endif
9613 
9614 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
9615 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
9616 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
9617 
9618 /* Max packet limit for RX REAP Loop (dp_rx_process) */
9619 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
9620 
9621 static
9622 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9623 					struct cdp_config_params *params)
9624 {
9625 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
9626 				params->tx_comp_loop_pkt_limit;
9627 
9628 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
9629 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
9630 	else
9631 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
9632 
9633 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
9634 				params->rx_reap_loop_pkt_limit;
9635 
9636 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
9637 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
9638 	else
9639 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
9640 
9641 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
9642 				params->rx_hp_oos_update_limit;
9643 
9644 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
9645 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
9646 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
9647 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
9648 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
9649 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
9650 }
9651 
9652 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
9653 				      uint32_t rx_limit)
9654 {
9655 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
9656 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
9657 }
9658 
9659 #else
9660 static inline
9661 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9662 					struct cdp_config_params *params)
9663 { }
9664 
9665 static inline
9666 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
9667 			       uint32_t rx_limit)
9668 {
9669 }
9670 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
9671 
9672 /**
9673  * dp_update_config_parameters() - API to store datapath
9674  *                            config parameters
9675  * @soc: soc handle
9676  * @cfg: ini parameter handle
9677  *
9678  * Return: status
9679  */
9680 static
9681 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
9682 				struct cdp_config_params *params)
9683 {
9684 	struct dp_soc *soc = (struct dp_soc *)psoc;
9685 
9686 	if (!(soc)) {
9687 		dp_cdp_err("%pK: Invalid handle", soc);
9688 		return QDF_STATUS_E_INVAL;
9689 	}
9690 
9691 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
9692 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
9693 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
9694 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
9695 				params->p2p_tcp_udp_checksumoffload;
9696 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
9697 				params->nan_tcp_udp_checksumoffload;
9698 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
9699 				params->tcp_udp_checksumoffload;
9700 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
9701 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
9702 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
9703 
9704 	dp_update_rx_soft_irq_limit_params(soc, params);
9705 	dp_update_flow_control_parameters(soc, params);
9706 
9707 	return QDF_STATUS_SUCCESS;
9708 }
9709 
9710 static struct cdp_wds_ops dp_ops_wds = {
9711 	.vdev_set_wds = dp_vdev_set_wds,
9712 #ifdef WDS_VENDOR_EXTENSION
9713 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
9714 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
9715 #endif
9716 };
9717 
9718 /*
9719  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
9720  * @soc_hdl - datapath soc handle
9721  * @vdev_id - virtual interface id
9722  * @callback - callback function
9723  * @ctxt: callback context
9724  *
9725  */
9726 static void
9727 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9728 		       ol_txrx_data_tx_cb callback, void *ctxt)
9729 {
9730 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9731 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9732 						     DP_MOD_ID_CDP);
9733 
9734 	if (!vdev)
9735 		return;
9736 
9737 	vdev->tx_non_std_data_callback.func = callback;
9738 	vdev->tx_non_std_data_callback.ctxt = ctxt;
9739 
9740 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9741 }
9742 
9743 /**
9744  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
9745  * @soc: datapath soc handle
9746  * @pdev_id: id of datapath pdev handle
9747  *
9748  * Return: opaque pointer to dp txrx handle
9749  */
9750 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
9751 {
9752 	struct dp_pdev *pdev =
9753 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9754 						   pdev_id);
9755 	if (qdf_unlikely(!pdev))
9756 		return NULL;
9757 
9758 	return pdev->dp_txrx_handle;
9759 }
9760 
9761 /**
9762  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
9763  * @soc: datapath soc handle
9764  * @pdev_id: id of datapath pdev handle
9765  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
9766  *
9767  * Return: void
9768  */
9769 static void
9770 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
9771 			   void *dp_txrx_hdl)
9772 {
9773 	struct dp_pdev *pdev =
9774 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9775 						   pdev_id);
9776 
9777 	if (!pdev)
9778 		return;
9779 
9780 	pdev->dp_txrx_handle = dp_txrx_hdl;
9781 }
9782 
9783 /**
9784  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
9785  * @soc: datapath soc handle
9786  * @vdev_id: vdev id
9787  *
9788  * Return: opaque pointer to dp txrx handle
9789  */
9790 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
9791 				       uint8_t vdev_id)
9792 {
9793 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9794 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9795 						     DP_MOD_ID_CDP);
9796 	void *dp_ext_handle;
9797 
9798 	if (!vdev)
9799 		return NULL;
9800 	dp_ext_handle = vdev->vdev_dp_ext_handle;
9801 
9802 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9803 	return dp_ext_handle;
9804 }
9805 
9806 /**
9807  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
9808  * @soc: datapath soc handle
9809  * @vdev_id: vdev id
9810  * @size: size of advance dp handle
9811  *
9812  * Return: QDF_STATUS
9813  */
9814 static QDF_STATUS
9815 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
9816 			  uint16_t size)
9817 {
9818 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9819 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9820 						     DP_MOD_ID_CDP);
9821 	void *dp_ext_handle;
9822 
9823 	if (!vdev)
9824 		return QDF_STATUS_E_FAILURE;
9825 
9826 	dp_ext_handle = qdf_mem_malloc(size);
9827 
9828 	if (!dp_ext_handle) {
9829 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9830 		return QDF_STATUS_E_FAILURE;
9831 	}
9832 
9833 	vdev->vdev_dp_ext_handle = dp_ext_handle;
9834 
9835 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9836 	return QDF_STATUS_SUCCESS;
9837 }
9838 
9839 /**
9840  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
9841  *			      connection for this vdev
9842  * @soc_hdl: CDP soc handle
9843  * @vdev_id: vdev ID
9844  * @action: Add/Delete action
9845  *
9846  * Returns: QDF_STATUS.
9847  */
9848 static QDF_STATUS
9849 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9850 		       enum vdev_ll_conn_actions action)
9851 {
9852 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9853 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9854 						     DP_MOD_ID_CDP);
9855 
9856 	if (!vdev) {
9857 		dp_err("LL connection action for invalid vdev %d", vdev_id);
9858 		return QDF_STATUS_E_FAILURE;
9859 	}
9860 
9861 	switch (action) {
9862 	case CDP_VDEV_LL_CONN_ADD:
9863 		vdev->num_latency_critical_conn++;
9864 		break;
9865 
9866 	case CDP_VDEV_LL_CONN_DEL:
9867 		vdev->num_latency_critical_conn--;
9868 		break;
9869 
9870 	default:
9871 		dp_err("LL connection action invalid %d", action);
9872 		break;
9873 	}
9874 
9875 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9876 	return QDF_STATUS_SUCCESS;
9877 }
9878 
9879 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
9880 /**
9881  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
9882  * @soc_hdl: CDP Soc handle
9883  * @value: Enable/Disable value
9884  *
9885  * Returns: QDF_STATUS
9886  */
9887 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
9888 					 uint8_t value)
9889 {
9890 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9891 
9892 	if (!soc->swlm.is_init) {
9893 		dp_err("SWLM is not initialized");
9894 		return QDF_STATUS_E_FAILURE;
9895 	}
9896 
9897 	soc->swlm.is_enabled = !!value;
9898 
9899 	return QDF_STATUS_SUCCESS;
9900 }
9901 
9902 /**
9903  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
9904  * @soc_hdl: CDP Soc handle
9905  *
9906  * Returns: QDF_STATUS
9907  */
9908 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
9909 {
9910 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9911 
9912 	return soc->swlm.is_enabled;
9913 }
9914 #endif
9915 
9916 /**
9917  * dp_display_srng_info() - Dump the srng HP TP info
9918  * @soc_hdl: CDP Soc handle
9919  *
9920  * This function dumps the SW hp/tp values for the important rings.
9921  * HW hp/tp values are not being dumped, since it can lead to
9922  * READ NOC error when UMAC is in low power state. MCC does not have
9923  * device force wake working yet.
9924  *
9925  * Return: none
9926  */
9927 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
9928 {
9929 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9930 	hal_soc_handle_t hal_soc = soc->hal_soc;
9931 	uint32_t hp, tp, i;
9932 
9933 	dp_info("SRNG HP-TP data:");
9934 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
9935 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
9936 				&hp, &tp);
9937 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
9938 
9939 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
9940 				&hp, &tp);
9941 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
9942 	}
9943 
9944 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
9945 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
9946 				&hp, &tp);
9947 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
9948 	}
9949 
9950 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &hp, &tp);
9951 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
9952 
9953 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &hp, &tp);
9954 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
9955 
9956 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &hp, &tp);
9957 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
9958 }
9959 
9960 /**
9961  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
9962  * @soc_handle: datapath soc handle
9963  *
9964  * Return: opaque pointer to external dp (non-core DP)
9965  */
9966 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
9967 {
9968 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9969 
9970 	return soc->external_txrx_handle;
9971 }
9972 
9973 /**
9974  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
9975  * @soc_handle: datapath soc handle
9976  * @txrx_handle: opaque pointer to external dp (non-core DP)
9977  *
9978  * Return: void
9979  */
9980 static void
9981 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
9982 {
9983 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9984 
9985 	soc->external_txrx_handle = txrx_handle;
9986 }
9987 
9988 /**
9989  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
9990  * @soc_hdl: datapath soc handle
9991  * @pdev_id: id of the datapath pdev handle
9992  * @lmac_id: lmac id
9993  *
9994  * Return: QDF_STATUS
9995  */
9996 static QDF_STATUS
9997 dp_soc_map_pdev_to_lmac
9998 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9999 	 uint32_t lmac_id)
10000 {
10001 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10002 
10003 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
10004 				pdev_id,
10005 				lmac_id);
10006 
10007 	/*Set host PDEV ID for lmac_id*/
10008 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
10009 			      pdev_id,
10010 			      lmac_id);
10011 
10012 	return QDF_STATUS_SUCCESS;
10013 }
10014 
10015 /**
10016  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
10017  * @soc_hdl: datapath soc handle
10018  * @pdev_id: id of the datapath pdev handle
10019  * @lmac_id: lmac id
10020  *
10021  * In the event of a dynamic mode change, update the pdev to lmac mapping
10022  *
10023  * Return: QDF_STATUS
10024  */
10025 static QDF_STATUS
10026 dp_soc_handle_pdev_mode_change
10027 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10028 	 uint32_t lmac_id)
10029 {
10030 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10031 	struct dp_vdev *vdev = NULL;
10032 	uint8_t hw_pdev_id, mac_id;
10033 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
10034 								  pdev_id);
10035 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
10036 
10037 	if (qdf_unlikely(!pdev))
10038 		return QDF_STATUS_E_FAILURE;
10039 
10040 	pdev->lmac_id = lmac_id;
10041 	pdev->target_pdev_id =
10042 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
10043 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
10044 
10045 	/*Set host PDEV ID for lmac_id*/
10046 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
10047 			      pdev->pdev_id,
10048 			      lmac_id);
10049 
10050 	hw_pdev_id =
10051 		dp_get_target_pdev_id_for_host_pdev_id(soc,
10052 						       pdev->pdev_id);
10053 
10054 	/*
10055 	 * When NSS offload is enabled, send pdev_id->lmac_id
10056 	 * and pdev_id to hw_pdev_id to NSS FW
10057 	 */
10058 	if (nss_config) {
10059 		mac_id = pdev->lmac_id;
10060 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
10061 			soc->cdp_soc.ol_ops->
10062 				pdev_update_lmac_n_target_pdev_id(
10063 				soc->ctrl_psoc,
10064 				&pdev_id, &mac_id, &hw_pdev_id);
10065 	}
10066 
10067 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
10068 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
10069 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
10070 						hw_pdev_id);
10071 		vdev->lmac_id = pdev->lmac_id;
10072 	}
10073 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
10074 
10075 	return QDF_STATUS_SUCCESS;
10076 }
10077 
10078 /**
10079  * dp_soc_set_pdev_status_down() - set pdev down/up status
10080  * @soc: datapath soc handle
10081  * @pdev_id: id of datapath pdev handle
10082  * @is_pdev_down: pdev down/up status
10083  *
10084  * Return: QDF_STATUS
10085  */
10086 static QDF_STATUS
10087 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
10088 			    bool is_pdev_down)
10089 {
10090 	struct dp_pdev *pdev =
10091 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10092 						   pdev_id);
10093 	if (!pdev)
10094 		return QDF_STATUS_E_FAILURE;
10095 
10096 	pdev->is_pdev_down = is_pdev_down;
10097 	return QDF_STATUS_SUCCESS;
10098 }
10099 
10100 /**
10101  * dp_get_cfg_capabilities() - get dp capabilities
10102  * @soc_handle: datapath soc handle
10103  * @dp_caps: enum for dp capabilities
10104  *
10105  * Return: bool to determine if dp caps is enabled
10106  */
10107 static bool
10108 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
10109 			enum cdp_capabilities dp_caps)
10110 {
10111 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10112 
10113 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
10114 }
10115 
10116 #ifdef FEATURE_AST
10117 static QDF_STATUS
10118 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10119 		       uint8_t *peer_mac)
10120 {
10121 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10122 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10123 	struct dp_peer *peer =
10124 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
10125 					       DP_MOD_ID_CDP);
10126 
10127 	/* Peer can be null for monitor vap mac address */
10128 	if (!peer) {
10129 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
10130 			  "%s: Invalid peer\n", __func__);
10131 		return QDF_STATUS_E_FAILURE;
10132 	}
10133 
10134 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
10135 
10136 	qdf_spin_lock_bh(&soc->ast_lock);
10137 	dp_peer_delete_ast_entries(soc, peer);
10138 	qdf_spin_unlock_bh(&soc->ast_lock);
10139 
10140 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10141 	return status;
10142 }
10143 #endif
10144 
10145 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
10146 /**
10147  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
10148  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
10149  * @soc: cdp_soc handle
10150  * @pdev_id: id of cdp_pdev handle
10151  * @protocol_type: protocol type for which stats should be displayed
10152  *
10153  * Return: none
10154  */
10155 static inline void
10156 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
10157 				   uint16_t protocol_type)
10158 {
10159 }
10160 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10161 
10162 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10163 /**
10164  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
10165  * applied to the desired protocol type packets
10166  * @soc: soc handle
10167  * @pdev_id: id of cdp_pdev handle
10168  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
10169  * are enabled for tagging. zero indicates disable feature, non-zero indicates
10170  * enable feature
10171  * @protocol_type: new protocol type for which the tag is being added
10172  * @tag: user configured tag for the new protocol
10173  *
10174  * Return: Success
10175  */
10176 static inline QDF_STATUS
10177 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
10178 			       uint32_t enable_rx_protocol_tag,
10179 			       uint16_t protocol_type,
10180 			       uint16_t tag)
10181 {
10182 	return QDF_STATUS_SUCCESS;
10183 }
10184 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10185 
10186 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
10187 /**
10188  * dp_set_rx_flow_tag - add/delete a flow
10189  * @soc: soc handle
10190  * @pdev_id: id of cdp_pdev handle
10191  * @flow_info: flow tuple that is to be added to/deleted from flow search table
10192  *
10193  * Return: Success
10194  */
10195 static inline QDF_STATUS
10196 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10197 		   struct cdp_rx_flow_info *flow_info)
10198 {
10199 	return QDF_STATUS_SUCCESS;
10200 }
10201 /**
10202  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
10203  * given flow 5-tuple
10204  * @cdp_soc: soc handle
10205  * @pdev_id: id of cdp_pdev handle
10206  * @flow_info: flow 5-tuple for which stats should be displayed
10207  *
10208  * Return: Success
10209  */
10210 static inline QDF_STATUS
10211 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10212 			  struct cdp_rx_flow_info *flow_info)
10213 {
10214 	return QDF_STATUS_SUCCESS;
10215 }
10216 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10217 
10218 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
10219 					   uint32_t max_peers,
10220 					   uint32_t max_ast_index,
10221 					   bool peer_map_unmap_v2)
10222 {
10223 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10224 
10225 	soc->max_peers = max_peers;
10226 
10227 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
10228 		   __func__, max_peers, max_ast_index);
10229 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
10230 
10231 	if (dp_peer_find_attach(soc))
10232 		return QDF_STATUS_E_FAILURE;
10233 
10234 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
10235 	soc->peer_map_attach_success = TRUE;
10236 
10237 	return QDF_STATUS_SUCCESS;
10238 }
10239 
10240 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
10241 				   enum cdp_soc_param_t param,
10242 				   uint32_t value)
10243 {
10244 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10245 
10246 	switch (param) {
10247 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
10248 		soc->num_msdu_exception_desc = value;
10249 		dp_info("num_msdu exception_desc %u",
10250 			value);
10251 		break;
10252 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
10253 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
10254 			soc->fst_in_cmem = !!value;
10255 		dp_info("FW supports CMEM FSE %u", value);
10256 		break;
10257 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
10258 		soc->max_ast_ageout_count = value;
10259 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
10260 		break;
10261 	default:
10262 		dp_info("not handled param %d ", param);
10263 		break;
10264 	}
10265 
10266 	return QDF_STATUS_SUCCESS;
10267 }
10268 
10269 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
10270 				      void *stats_ctx)
10271 {
10272 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10273 
10274 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
10275 }
10276 
10277 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10278 /**
10279  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
10280  * @soc: Datapath SOC handle
10281  * @peer: Datapath peer
10282  * @arg: argument to iter function
10283  *
10284  * Return: QDF_STATUS
10285  */
10286 static void
10287 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
10288 			     void *arg)
10289 {
10290 	if (peer->bss_peer)
10291 		return;
10292 
10293 	dp_wdi_event_handler(
10294 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
10295 		soc, peer->rdkstats_ctx,
10296 		peer->peer_id,
10297 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
10298 }
10299 
10300 /**
10301  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
10302  * @soc_hdl: Datapath SOC handle
10303  * @pdev_id: pdev_id
10304  *
10305  * Return: QDF_STATUS
10306  */
10307 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10308 					  uint8_t pdev_id)
10309 {
10310 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10311 	struct dp_pdev *pdev =
10312 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10313 						   pdev_id);
10314 	if (!pdev)
10315 		return QDF_STATUS_E_FAILURE;
10316 
10317 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
10318 			     DP_MOD_ID_CDP);
10319 
10320 	return QDF_STATUS_SUCCESS;
10321 }
10322 #else
10323 static inline QDF_STATUS
10324 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10325 			uint8_t pdev_id)
10326 {
10327 	return QDF_STATUS_SUCCESS;
10328 }
10329 #endif
10330 
10331 static void *dp_peer_get_rdkstats_ctx(struct cdp_soc_t *soc_hdl,
10332 				      uint8_t vdev_id,
10333 				      uint8_t *mac_addr)
10334 {
10335 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10336 	struct dp_peer *peer;
10337 	void *rdkstats_ctx = NULL;
10338 
10339 	if (mac_addr) {
10340 		peer = dp_peer_find_hash_find(soc, mac_addr,
10341 					      0, vdev_id,
10342 					      DP_MOD_ID_CDP);
10343 		if (!peer)
10344 			return NULL;
10345 
10346 		rdkstats_ctx = peer->rdkstats_ctx;
10347 
10348 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10349 	}
10350 
10351 	return rdkstats_ctx;
10352 }
10353 
10354 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10355 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10356 					   uint8_t pdev_id,
10357 					   void *buf)
10358 {
10359 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
10360 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
10361 			      WDI_NO_VAL, pdev_id);
10362 	return QDF_STATUS_SUCCESS;
10363 }
10364 #else
10365 static inline QDF_STATUS
10366 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10367 			 uint8_t pdev_id,
10368 			 void *buf)
10369 {
10370 	return QDF_STATUS_SUCCESS;
10371 }
10372 #endif
10373 
10374 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
10375 {
10376 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10377 
10378 	return soc->rate_stats_ctx;
10379 }
10380 
10381 /*
10382  * dp_get_cfg() - get dp cfg
10383  * @soc: cdp soc handle
10384  * @cfg: cfg enum
10385  *
10386  * Return: cfg value
10387  */
10388 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
10389 {
10390 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
10391 	uint32_t value = 0;
10392 
10393 	switch (cfg) {
10394 	case cfg_dp_enable_data_stall:
10395 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
10396 		break;
10397 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
10398 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
10399 		break;
10400 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
10401 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
10402 		break;
10403 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
10404 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
10405 		break;
10406 	case cfg_dp_disable_legacy_mode_csum_offload:
10407 		value = dpsoc->wlan_cfg_ctx->
10408 					legacy_mode_checksumoffload_disable;
10409 		break;
10410 	case cfg_dp_tso_enable:
10411 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
10412 		break;
10413 	case cfg_dp_lro_enable:
10414 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
10415 		break;
10416 	case cfg_dp_gro_enable:
10417 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
10418 		break;
10419 	case cfg_dp_sg_enable:
10420 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
10421 		break;
10422 	case cfg_dp_tx_flow_start_queue_offset:
10423 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
10424 		break;
10425 	case cfg_dp_tx_flow_stop_queue_threshold:
10426 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
10427 		break;
10428 	case cfg_dp_disable_intra_bss_fwd:
10429 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
10430 		break;
10431 	case cfg_dp_pktlog_buffer_size:
10432 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
10433 		break;
10434 	case cfg_dp_wow_check_rx_pending:
10435 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
10436 		break;
10437 	default:
10438 		value =  0;
10439 	}
10440 
10441 	return value;
10442 }
10443 
10444 #ifdef PEER_FLOW_CONTROL
10445 /**
10446  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
10447  * @soc_handle: datapath soc handle
10448  * @pdev_id: id of datapath pdev handle
10449  * @param: ol ath params
10450  * @value: value of the flag
10451  * @buff: Buffer to be passed
10452  *
10453  * Implemented this function same as legacy function. In legacy code, single
10454  * function is used to display stats and update pdev params.
10455  *
10456  * Return: 0 for success. nonzero for failure.
10457  */
10458 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
10459 					       uint8_t pdev_id,
10460 					       enum _dp_param_t param,
10461 					       uint32_t value, void *buff)
10462 {
10463 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10464 	struct dp_pdev *pdev =
10465 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10466 						   pdev_id);
10467 
10468 	if (qdf_unlikely(!pdev))
10469 		return 1;
10470 
10471 	soc = pdev->soc;
10472 	if (!soc)
10473 		return 1;
10474 
10475 	switch (param) {
10476 #ifdef QCA_ENH_V3_STATS_SUPPORT
10477 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
10478 		if (value)
10479 			pdev->delay_stats_flag = true;
10480 		else
10481 			pdev->delay_stats_flag = false;
10482 		break;
10483 	case DP_PARAM_VIDEO_STATS_FC:
10484 		qdf_print("------- TID Stats ------\n");
10485 		dp_pdev_print_tid_stats(pdev);
10486 		qdf_print("------ Delay Stats ------\n");
10487 		dp_pdev_print_delay_stats(pdev);
10488 		qdf_print("------ Rx Error Stats ------\n");
10489 		dp_pdev_print_rx_error_stats(pdev);
10490 		break;
10491 #endif
10492 	case DP_PARAM_TOTAL_Q_SIZE:
10493 		{
10494 			uint32_t tx_min, tx_max;
10495 
10496 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
10497 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
10498 
10499 			if (!buff) {
10500 				if ((value >= tx_min) && (value <= tx_max)) {
10501 					pdev->num_tx_allowed = value;
10502 				} else {
10503 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
10504 						   soc, tx_min, tx_max);
10505 					break;
10506 				}
10507 			} else {
10508 				*(int *)buff = pdev->num_tx_allowed;
10509 			}
10510 		}
10511 		break;
10512 	default:
10513 		dp_tx_info("%pK: not handled param %d ", soc, param);
10514 		break;
10515 	}
10516 
10517 	return 0;
10518 }
10519 #endif
10520 
10521 /**
10522  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
10523  * @psoc: dp soc handle
10524  * @pdev_id: id of DP_PDEV handle
10525  * @pcp: pcp value
10526  * @tid: tid value passed by the user
10527  *
10528  * Return: QDF_STATUS_SUCCESS on success
10529  */
10530 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
10531 						uint8_t pdev_id,
10532 						uint8_t pcp, uint8_t tid)
10533 {
10534 	struct dp_soc *soc = (struct dp_soc *)psoc;
10535 
10536 	soc->pcp_tid_map[pcp] = tid;
10537 
10538 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
10539 	return QDF_STATUS_SUCCESS;
10540 }
10541 
10542 /**
10543  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
10544  * @soc: DP soc handle
10545  * @vdev_id: id of DP_VDEV handle
10546  * @pcp: pcp value
10547  * @tid: tid value passed by the user
10548  *
10549  * Return: QDF_STATUS_SUCCESS on success
10550  */
10551 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
10552 						uint8_t vdev_id,
10553 						uint8_t pcp, uint8_t tid)
10554 {
10555 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10556 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10557 						     DP_MOD_ID_CDP);
10558 
10559 	if (!vdev)
10560 		return QDF_STATUS_E_FAILURE;
10561 
10562 	vdev->pcp_tid_map[pcp] = tid;
10563 
10564 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10565 	return QDF_STATUS_SUCCESS;
10566 }
10567 
10568 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
10569 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
10570 {
10571 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10572 	uint32_t cur_tx_limit, cur_rx_limit;
10573 	uint32_t budget = 0xffff;
10574 	uint32_t val;
10575 	int i;
10576 
10577 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
10578 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
10579 
10580 	/* Temporarily increase soft irq limits when going to drain
10581 	 * the UMAC/LMAC SRNGs and restore them after polling.
10582 	 * Though the budget is on higher side, the TX/RX reaping loops
10583 	 * will not execute longer as both TX and RX would be suspended
10584 	 * by the time this API is called.
10585 	 */
10586 	dp_update_soft_irq_limits(soc, budget, budget);
10587 
10588 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
10589 		dp_service_srngs(&soc->intr_ctx[i], budget);
10590 
10591 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
10592 
10593 	/* Do a dummy read at offset 0; this will ensure all
10594 	 * pendings writes(HP/TP) are flushed before read returns.
10595 	 */
10596 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
10597 	dp_debug("Register value at offset 0: %u\n", val);
10598 }
10599 #endif
10600 
10601 static struct cdp_cmn_ops dp_ops_cmn = {
10602 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
10603 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
10604 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
10605 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
10606 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
10607 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
10608 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
10609 	.txrx_peer_create = dp_peer_create_wifi3,
10610 	.txrx_peer_setup = dp_peer_setup_wifi3,
10611 #ifdef FEATURE_AST
10612 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
10613 #else
10614 	.txrx_peer_teardown = NULL,
10615 #endif
10616 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
10617 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
10618 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
10619 	.txrx_peer_get_ast_info_by_pdev =
10620 		dp_peer_get_ast_info_by_pdevid_wifi3,
10621 	.txrx_peer_ast_delete_by_soc =
10622 		dp_peer_ast_entry_del_by_soc,
10623 	.txrx_peer_ast_delete_by_pdev =
10624 		dp_peer_ast_entry_del_by_pdev,
10625 	.txrx_peer_delete = dp_peer_delete_wifi3,
10626 	.txrx_vdev_register = dp_vdev_register_wifi3,
10627 	.txrx_soc_detach = dp_soc_detach_wifi3,
10628 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
10629 	.txrx_soc_init = dp_soc_init_wifi3,
10630 #ifndef QCA_HOST_MODE_WIFI_DISABLED
10631 	.txrx_tso_soc_attach = dp_tso_soc_attach,
10632 	.txrx_tso_soc_detach = dp_tso_soc_detach,
10633 	.tx_send = dp_tx_send,
10634 	.tx_send_exc = dp_tx_send_exception,
10635 #endif
10636 	.txrx_pdev_init = dp_pdev_init_wifi3,
10637 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
10638 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
10639 	.txrx_ath_getstats = dp_get_device_stats,
10640 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
10641 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
10642 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
10643 	.delba_process = dp_delba_process_wifi3,
10644 	.set_addba_response = dp_set_addba_response,
10645 	.flush_cache_rx_queue = NULL,
10646 	/* TODO: get API's for dscp-tid need to be added*/
10647 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
10648 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
10649 	.txrx_get_total_per = dp_get_total_per,
10650 	.txrx_stats_request = dp_txrx_stats_request,
10651 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
10652 	.display_stats = dp_txrx_dump_stats,
10653 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
10654 	.txrx_intr_detach = dp_soc_interrupt_detach,
10655 	.set_pn_check = dp_set_pn_check_wifi3,
10656 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
10657 	.update_config_parameters = dp_update_config_parameters,
10658 	/* TODO: Add other functions */
10659 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
10660 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
10661 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
10662 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
10663 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
10664 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
10665 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
10666 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
10667 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
10668 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
10669 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
10670 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
10671 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
10672 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
10673 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
10674 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
10675 	.set_soc_param = dp_soc_set_param,
10676 	.txrx_get_os_rx_handles_from_vdev =
10677 					dp_get_os_rx_handles_from_vdev_wifi3,
10678 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
10679 	.get_dp_capabilities = dp_get_cfg_capabilities,
10680 	.txrx_get_cfg = dp_get_cfg,
10681 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
10682 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
10683 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
10684 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
10685 	.txrx_peer_get_rdkstats_ctx = dp_peer_get_rdkstats_ctx,
10686 
10687 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
10688 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
10689 
10690 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
10691 #ifdef QCA_MULTIPASS_SUPPORT
10692 	.set_vlan_groupkey = dp_set_vlan_groupkey,
10693 #endif
10694 	.get_peer_mac_list = dp_get_peer_mac_list,
10695 #ifdef QCA_SUPPORT_WDS_EXTENDED
10696 	.get_wds_ext_peer_id = dp_wds_ext_get_peer_id,
10697 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
10698 #endif /* QCA_SUPPORT_WDS_EXTENDED */
10699 
10700 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
10701 	.txrx_drain = dp_drain_txrx,
10702 #endif
10703 };
10704 
10705 static struct cdp_ctrl_ops dp_ops_ctrl = {
10706 	.txrx_peer_authorize = dp_peer_authorize,
10707 #ifdef VDEV_PEER_PROTOCOL_COUNT
10708 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
10709 	.txrx_set_peer_protocol_drop_mask =
10710 		dp_enable_vdev_peer_protocol_drop_mask,
10711 	.txrx_is_peer_protocol_count_enabled =
10712 		dp_is_vdev_peer_protocol_count_enabled,
10713 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
10714 #endif
10715 	.txrx_set_vdev_param = dp_set_vdev_param,
10716 	.txrx_set_psoc_param = dp_set_psoc_param,
10717 	.txrx_get_psoc_param = dp_get_psoc_param,
10718 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
10719 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
10720 	.txrx_get_sec_type = dp_get_sec_type,
10721 	.txrx_wdi_event_sub = dp_wdi_event_sub,
10722 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
10723 	.txrx_set_pdev_param = dp_set_pdev_param,
10724 	.txrx_get_pdev_param = dp_get_pdev_param,
10725 	.txrx_set_peer_param = dp_set_peer_param,
10726 	.txrx_get_peer_param = dp_get_peer_param,
10727 #ifdef VDEV_PEER_PROTOCOL_COUNT
10728 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
10729 #endif
10730 #ifdef WLAN_SUPPORT_MSCS
10731 	.txrx_record_mscs_params = dp_record_mscs_params,
10732 #endif
10733 #ifdef WLAN_SUPPORT_SCS
10734 	.txrx_enable_scs_params = dp_enable_scs_params,
10735 	.txrx_record_scs_params = dp_record_scs_params,
10736 #endif
10737 	.set_key = dp_set_michael_key,
10738 	.txrx_get_vdev_param = dp_get_vdev_param,
10739 	.calculate_delay_stats = dp_calculate_delay_stats,
10740 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10741 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
10742 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
10743 	.txrx_dump_pdev_rx_protocol_tag_stats =
10744 				dp_dump_pdev_rx_protocol_tag_stats,
10745 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10746 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10747 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
10748 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
10749 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
10750 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10751 #ifdef QCA_MULTIPASS_SUPPORT
10752 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
10753 #endif /*QCA_MULTIPASS_SUPPORT*/
10754 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
10755 	.txrx_set_delta_tsf = dp_set_delta_tsf,
10756 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
10757 	.txrx_get_uplink_delay = dp_get_uplink_delay,
10758 #endif
10759 };
10760 
10761 static struct cdp_me_ops dp_ops_me = {
10762 #ifndef QCA_HOST_MODE_WIFI_DISABLED
10763 #ifdef ATH_SUPPORT_IQUE
10764 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
10765 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
10766 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
10767 #endif
10768 #endif
10769 };
10770 
10771 static struct cdp_host_stats_ops dp_ops_host_stats = {
10772 	.txrx_per_peer_stats = dp_get_host_peer_stats,
10773 	.get_fw_peer_stats = dp_get_fw_peer_stats,
10774 	.get_htt_stats = dp_get_htt_stats,
10775 	.txrx_stats_publish = dp_txrx_stats_publish,
10776 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
10777 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
10778 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
10779 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
10780 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
10781 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
10782 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
10783 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
10784 	/* TODO */
10785 };
10786 
10787 static struct cdp_raw_ops dp_ops_raw = {
10788 	/* TODO */
10789 };
10790 
10791 #ifdef PEER_FLOW_CONTROL
10792 static struct cdp_pflow_ops dp_ops_pflow = {
10793 	dp_tx_flow_ctrl_configure_pdev,
10794 };
10795 #endif /* CONFIG_WIN */
10796 
10797 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10798 static struct cdp_cfr_ops dp_ops_cfr = {
10799 	.txrx_cfr_filter = NULL,
10800 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
10801 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
10802 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
10803 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
10804 	.txrx_enable_mon_reap_timer = NULL,
10805 };
10806 #endif
10807 
10808 #ifdef WLAN_SUPPORT_MSCS
10809 static struct cdp_mscs_ops dp_ops_mscs = {
10810 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
10811 };
10812 #endif
10813 
10814 #ifdef WLAN_SUPPORT_MESH_LATENCY
10815 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
10816 	.mesh_latency_update_peer_parameter =
10817 		dp_mesh_latency_update_peer_parameter,
10818 };
10819 #endif
10820 
10821 #ifdef FEATURE_RUNTIME_PM
10822 /**
10823  * dp_flush_ring_hptp() - Update ring shadow
10824  *			  register HP/TP address when runtime
10825  *                        resume
10826  * @opaque_soc: DP soc context
10827  *
10828  * Return: None
10829  */
10830 static
10831 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
10832 {
10833 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
10834 						 HAL_SRNG_FLUSH_EVENT)) {
10835 		/* Acquire the lock */
10836 		hal_srng_access_start(soc->hal_soc, hal_srng);
10837 
10838 		hal_srng_access_end(soc->hal_soc, hal_srng);
10839 
10840 		hal_srng_set_flush_last_ts(hal_srng);
10841 		dp_debug("flushed");
10842 	}
10843 }
10844 
10845 /**
10846  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
10847  * @soc_hdl: Datapath soc handle
10848  * @pdev_id: id of data path pdev handle
10849  *
10850  * DP is ready to runtime suspend if there are no pending TX packets.
10851  *
10852  * Return: QDF_STATUS
10853  */
10854 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10855 {
10856 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10857 	struct dp_pdev *pdev;
10858 	uint8_t i;
10859 
10860 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10861 	if (!pdev) {
10862 		dp_err("pdev is NULL");
10863 		return QDF_STATUS_E_INVAL;
10864 	}
10865 
10866 	/* Abort if there are any pending TX packets */
10867 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
10868 		dp_init_info("%pK: Abort suspend due to pending TX packets", soc);
10869 
10870 		/* perform a force flush if tx is pending */
10871 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
10872 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
10873 					   HAL_SRNG_FLUSH_EVENT);
10874 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10875 		}
10876 
10877 		return QDF_STATUS_E_AGAIN;
10878 	}
10879 
10880 	if (dp_runtime_get_refcount(soc)) {
10881 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
10882 
10883 		return QDF_STATUS_E_AGAIN;
10884 	}
10885 
10886 	if (soc->intr_mode == DP_INTR_POLL)
10887 		qdf_timer_stop(&soc->int_timer);
10888 
10889 	dp_rx_fst_update_pm_suspend_status(soc, true);
10890 
10891 	return QDF_STATUS_SUCCESS;
10892 }
10893 
10894 #define DP_FLUSH_WAIT_CNT 10
10895 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
10896 /**
10897  * dp_runtime_resume() - ensure DP is ready to runtime resume
10898  * @soc_hdl: Datapath soc handle
10899  * @pdev_id: id of data path pdev handle
10900  *
10901  * Resume DP for runtime PM.
10902  *
10903  * Return: QDF_STATUS
10904  */
10905 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10906 {
10907 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10908 	int i, suspend_wait = 0;
10909 
10910 	if (soc->intr_mode == DP_INTR_POLL)
10911 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10912 
10913 	/*
10914 	 * Wait until dp runtime refcount becomes zero or time out, then flush
10915 	 * pending tx for runtime suspend.
10916 	 */
10917 	while (dp_runtime_get_refcount(soc) &&
10918 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
10919 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
10920 		suspend_wait++;
10921 	}
10922 
10923 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
10924 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10925 	}
10926 
10927 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
10928 	dp_rx_fst_update_pm_suspend_status(soc, false);
10929 
10930 	return QDF_STATUS_SUCCESS;
10931 }
10932 #endif /* FEATURE_RUNTIME_PM */
10933 
10934 /**
10935  * dp_tx_get_success_ack_stats() - get tx success completion count
10936  * @soc_hdl: Datapath soc handle
10937  * @vdevid: vdev identifier
10938  *
10939  * Return: tx success ack count
10940  */
10941 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
10942 					    uint8_t vdev_id)
10943 {
10944 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10945 	struct cdp_vdev_stats *vdev_stats = NULL;
10946 	uint32_t tx_success;
10947 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10948 						     DP_MOD_ID_CDP);
10949 
10950 	if (!vdev) {
10951 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
10952 		return 0;
10953 	}
10954 
10955 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
10956 	if (!vdev_stats) {
10957 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
10958 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10959 		return 0;
10960 	}
10961 
10962 	dp_aggregate_vdev_stats(vdev, vdev_stats);
10963 
10964 	tx_success = vdev_stats->tx.tx_success.num;
10965 	qdf_mem_free(vdev_stats);
10966 
10967 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10968 	return tx_success;
10969 }
10970 
10971 #ifdef WLAN_SUPPORT_DATA_STALL
10972 /**
10973  * dp_register_data_stall_detect_cb() - register data stall callback
10974  * @soc_hdl: Datapath soc handle
10975  * @pdev_id: id of data path pdev handle
10976  * @data_stall_detect_callback: data stall callback function
10977  *
10978  * Return: QDF_STATUS Enumeration
10979  */
10980 static
10981 QDF_STATUS dp_register_data_stall_detect_cb(
10982 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10983 			data_stall_detect_cb data_stall_detect_callback)
10984 {
10985 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10986 	struct dp_pdev *pdev;
10987 
10988 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10989 	if (!pdev) {
10990 		dp_err("pdev NULL!");
10991 		return QDF_STATUS_E_INVAL;
10992 	}
10993 
10994 	pdev->data_stall_detect_callback = data_stall_detect_callback;
10995 	return QDF_STATUS_SUCCESS;
10996 }
10997 
10998 /**
10999  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
11000  * @soc_hdl: Datapath soc handle
11001  * @pdev_id: id of data path pdev handle
11002  * @data_stall_detect_callback: data stall callback function
11003  *
11004  * Return: QDF_STATUS Enumeration
11005  */
11006 static
11007 QDF_STATUS dp_deregister_data_stall_detect_cb(
11008 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11009 			data_stall_detect_cb data_stall_detect_callback)
11010 {
11011 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11012 	struct dp_pdev *pdev;
11013 
11014 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11015 	if (!pdev) {
11016 		dp_err("pdev NULL!");
11017 		return QDF_STATUS_E_INVAL;
11018 	}
11019 
11020 	pdev->data_stall_detect_callback = NULL;
11021 	return QDF_STATUS_SUCCESS;
11022 }
11023 
11024 /**
11025  * dp_txrx_post_data_stall_event() - post data stall event
11026  * @soc_hdl: Datapath soc handle
11027  * @indicator: Module triggering data stall
11028  * @data_stall_type: data stall event type
11029  * @pdev_id: pdev id
11030  * @vdev_id_bitmap: vdev id bitmap
11031  * @recovery_type: data stall recovery type
11032  *
11033  * Return: None
11034  */
11035 static void
11036 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
11037 			      enum data_stall_log_event_indicator indicator,
11038 			      enum data_stall_log_event_type data_stall_type,
11039 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
11040 			      enum data_stall_log_recovery_type recovery_type)
11041 {
11042 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11043 	struct data_stall_event_info data_stall_info;
11044 	struct dp_pdev *pdev;
11045 
11046 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11047 	if (!pdev) {
11048 		dp_err("pdev NULL!");
11049 		return;
11050 	}
11051 
11052 	if (!pdev->data_stall_detect_callback) {
11053 		dp_err("data stall cb not registered!");
11054 		return;
11055 	}
11056 
11057 	dp_info("data_stall_type: %x pdev_id: %d",
11058 		data_stall_type, pdev_id);
11059 
11060 	data_stall_info.indicator = indicator;
11061 	data_stall_info.data_stall_type = data_stall_type;
11062 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
11063 	data_stall_info.pdev_id = pdev_id;
11064 	data_stall_info.recovery_type = recovery_type;
11065 
11066 	pdev->data_stall_detect_callback(&data_stall_info);
11067 }
11068 #endif /* WLAN_SUPPORT_DATA_STALL */
11069 
11070 #ifdef WLAN_FEATURE_STATS_EXT
11071 /* rx hw stats event wait timeout in ms */
11072 #define DP_REO_STATUS_STATS_TIMEOUT 1500
11073 /**
11074  * dp_txrx_ext_stats_request - request dp txrx extended stats request
11075  * @soc_hdl: soc handle
11076  * @pdev_id: pdev id
11077  * @req: stats request
11078  *
11079  * Return: QDF_STATUS
11080  */
11081 static QDF_STATUS
11082 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11083 			  struct cdp_txrx_ext_stats *req)
11084 {
11085 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11086 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11087 
11088 	if (!pdev) {
11089 		dp_err("pdev is null");
11090 		return QDF_STATUS_E_INVAL;
11091 	}
11092 
11093 	dp_aggregate_pdev_stats(pdev);
11094 
11095 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
11096 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
11097 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
11098 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
11099 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
11100 	/* only count error source from RXDMA */
11101 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
11102 
11103 	return QDF_STATUS_SUCCESS;
11104 }
11105 
11106 /**
11107  * dp_rx_hw_stats_cb - request rx hw stats response callback
11108  * @soc: soc handle
11109  * @cb_ctxt: callback context
11110  * @reo_status: reo command response status
11111  *
11112  * Return: None
11113  */
11114 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
11115 			      union hal_reo_status *reo_status)
11116 {
11117 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
11118 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
11119 	bool is_query_timeout;
11120 
11121 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
11122 	is_query_timeout = rx_hw_stats->is_query_timeout;
11123 	/* free the cb_ctxt if all pending tid stats query is received */
11124 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
11125 		if (!is_query_timeout) {
11126 			qdf_event_set(&soc->rx_hw_stats_event);
11127 			soc->is_last_stats_ctx_init = false;
11128 		}
11129 
11130 		qdf_mem_free(rx_hw_stats);
11131 	}
11132 
11133 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
11134 		dp_info("REO stats failure %d",
11135 			queue_status->header.status);
11136 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11137 		return;
11138 	}
11139 
11140 	if (!is_query_timeout) {
11141 		soc->ext_stats.rx_mpdu_received +=
11142 					queue_status->mpdu_frms_cnt;
11143 		soc->ext_stats.rx_mpdu_missed +=
11144 					queue_status->hole_cnt;
11145 	}
11146 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11147 }
11148 
11149 /**
11150  * dp_request_rx_hw_stats - request rx hardware stats
11151  * @soc_hdl: soc handle
11152  * @vdev_id: vdev id
11153  *
11154  * Return: None
11155  */
11156 static QDF_STATUS
11157 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
11158 {
11159 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11160 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11161 						     DP_MOD_ID_CDP);
11162 	struct dp_peer *peer = NULL;
11163 	QDF_STATUS status;
11164 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
11165 	int rx_stats_sent_cnt = 0;
11166 	uint32_t last_rx_mpdu_received;
11167 	uint32_t last_rx_mpdu_missed;
11168 
11169 	if (!vdev) {
11170 		dp_err("vdev is null for vdev_id: %u", vdev_id);
11171 		status = QDF_STATUS_E_INVAL;
11172 		goto out;
11173 	}
11174 
11175 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
11176 
11177 	if (!peer) {
11178 		dp_err("Peer is NULL");
11179 		status = QDF_STATUS_E_INVAL;
11180 		goto out;
11181 	}
11182 
11183 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
11184 
11185 	if (!rx_hw_stats) {
11186 		dp_err("malloc failed for hw stats structure");
11187 		status = QDF_STATUS_E_INVAL;
11188 		goto out;
11189 	}
11190 
11191 	qdf_event_reset(&soc->rx_hw_stats_event);
11192 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
11193 	/* save the last soc cumulative stats and reset it to 0 */
11194 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
11195 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
11196 	soc->ext_stats.rx_mpdu_received = 0;
11197 	soc->ext_stats.rx_mpdu_missed = 0;
11198 
11199 	rx_stats_sent_cnt =
11200 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
11201 	if (!rx_stats_sent_cnt) {
11202 		dp_err("no tid stats sent successfully");
11203 		qdf_mem_free(rx_hw_stats);
11204 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11205 		status = QDF_STATUS_E_INVAL;
11206 		goto out;
11207 	}
11208 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
11209 		       rx_stats_sent_cnt);
11210 	rx_hw_stats->is_query_timeout = false;
11211 	soc->is_last_stats_ctx_init = true;
11212 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11213 
11214 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
11215 				       DP_REO_STATUS_STATS_TIMEOUT);
11216 
11217 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
11218 	if (status != QDF_STATUS_SUCCESS) {
11219 		dp_info("rx hw stats event timeout");
11220 		if (soc->is_last_stats_ctx_init)
11221 			rx_hw_stats->is_query_timeout = true;
11222 		/**
11223 		 * If query timeout happened, use the last saved stats
11224 		 * for this time query.
11225 		 */
11226 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
11227 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
11228 	}
11229 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11230 
11231 out:
11232 	if (peer)
11233 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11234 	if (vdev)
11235 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11236 
11237 	return status;
11238 }
11239 
11240 /**
11241  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
11242  * @soc_hdl: soc handle
11243  *
11244  * Return: None
11245  */
11246 static
11247 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
11248 {
11249 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11250 
11251 	soc->ext_stats.rx_mpdu_received = 0;
11252 	soc->ext_stats.rx_mpdu_missed = 0;
11253 }
11254 #endif /* WLAN_FEATURE_STATS_EXT */
11255 
11256 #ifdef DP_PEER_EXTENDED_API
11257 static struct cdp_misc_ops dp_ops_misc = {
11258 #ifdef FEATURE_WLAN_TDLS
11259 	.tx_non_std = dp_tx_non_std,
11260 #endif /* FEATURE_WLAN_TDLS */
11261 	.get_opmode = dp_get_opmode,
11262 #ifdef FEATURE_RUNTIME_PM
11263 	.runtime_suspend = dp_runtime_suspend,
11264 	.runtime_resume = dp_runtime_resume,
11265 #endif /* FEATURE_RUNTIME_PM */
11266 	.get_num_rx_contexts = dp_get_num_rx_contexts,
11267 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
11268 #ifdef WLAN_SUPPORT_DATA_STALL
11269 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
11270 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
11271 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
11272 #endif
11273 
11274 #ifdef WLAN_FEATURE_STATS_EXT
11275 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
11276 	.request_rx_hw_stats = dp_request_rx_hw_stats,
11277 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
11278 #endif /* WLAN_FEATURE_STATS_EXT */
11279 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
11280 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
11281 	.set_swlm_enable = dp_soc_set_swlm_enable,
11282 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
11283 #endif
11284 	.display_txrx_hw_info = dp_display_srng_info,
11285 };
11286 #endif
11287 
11288 #ifdef DP_FLOW_CTL
11289 static struct cdp_flowctl_ops dp_ops_flowctl = {
11290 	/* WIFI 3.0 DP implement as required. */
11291 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
11292 	.flow_pool_map_handler = dp_tx_flow_pool_map,
11293 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
11294 	.register_pause_cb = dp_txrx_register_pause_cb,
11295 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
11296 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
11297 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
11298 };
11299 
11300 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
11301 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11302 };
11303 #endif
11304 
11305 #ifdef IPA_OFFLOAD
11306 static struct cdp_ipa_ops dp_ops_ipa = {
11307 	.ipa_get_resource = dp_ipa_get_resource,
11308 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
11309 	.ipa_op_response = dp_ipa_op_response,
11310 	.ipa_register_op_cb = dp_ipa_register_op_cb,
11311 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
11312 	.ipa_get_stat = dp_ipa_get_stat,
11313 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
11314 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
11315 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
11316 	.ipa_setup = dp_ipa_setup,
11317 	.ipa_cleanup = dp_ipa_cleanup,
11318 	.ipa_setup_iface = dp_ipa_setup_iface,
11319 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
11320 	.ipa_enable_pipes = dp_ipa_enable_pipes,
11321 	.ipa_disable_pipes = dp_ipa_disable_pipes,
11322 	.ipa_set_perf_level = dp_ipa_set_perf_level,
11323 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
11324 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
11325 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping
11326 };
11327 #endif
11328 
11329 #ifdef DP_POWER_SAVE
11330 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11331 {
11332 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11333 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11334 	int timeout = SUSPEND_DRAIN_WAIT;
11335 	int drain_wait_delay = 50; /* 50 ms */
11336 
11337 	if (qdf_unlikely(!pdev)) {
11338 		dp_err("pdev is NULL");
11339 		return QDF_STATUS_E_INVAL;
11340 	}
11341 
11342 	/* Abort if there are any pending TX packets */
11343 	while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) {
11344 		qdf_sleep(drain_wait_delay);
11345 		if (timeout <= 0) {
11346 			dp_err("TX frames are pending, abort suspend");
11347 			return QDF_STATUS_E_TIMEOUT;
11348 		}
11349 		timeout = timeout - drain_wait_delay;
11350 	}
11351 
11352 	if (soc->intr_mode == DP_INTR_POLL)
11353 		qdf_timer_stop(&soc->int_timer);
11354 
11355 	/* Stop monitor reap timer and reap any pending frames in ring */
11356 	dp_monitor_pktlog_reap_pending_frames(pdev);
11357 
11358 	dp_suspend_fse_cache_flush(soc);
11359 
11360 	return QDF_STATUS_SUCCESS;
11361 }
11362 
11363 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11364 {
11365 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11366 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11367 
11368 	if (qdf_unlikely(!pdev)) {
11369 		dp_err("pdev is NULL");
11370 		return QDF_STATUS_E_INVAL;
11371 	}
11372 
11373 	if (soc->intr_mode == DP_INTR_POLL)
11374 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
11375 
11376 	/* Start monitor reap timer */
11377 	dp_monitor_pktlog_start_reap_timer(pdev);
11378 
11379 	dp_resume_fse_cache_flush(soc);
11380 
11381 	return QDF_STATUS_SUCCESS;
11382 }
11383 
11384 /**
11385  * dp_process_wow_ack_rsp() - process wow ack response
11386  * @soc_hdl: datapath soc handle
11387  * @pdev_id: data path pdev handle id
11388  *
11389  * Return: none
11390  */
11391 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11392 {
11393 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11394 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11395 
11396 	if (qdf_unlikely(!pdev)) {
11397 		dp_err("pdev is NULL");
11398 		return;
11399 	}
11400 
11401 	/*
11402 	 * As part of wow enable FW disables the mon status ring and in wow ack
11403 	 * response from FW reap mon status ring to make sure no packets pending
11404 	 * in the ring.
11405 	 */
11406 	dp_monitor_pktlog_reap_pending_frames(pdev);
11407 }
11408 
11409 /**
11410  * dp_process_target_suspend_req() - process target suspend request
11411  * @soc_hdl: datapath soc handle
11412  * @pdev_id: data path pdev handle id
11413  *
11414  * Return: none
11415  */
11416 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
11417 					  uint8_t pdev_id)
11418 {
11419 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11420 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11421 
11422 	if (qdf_unlikely(!pdev)) {
11423 		dp_err("pdev is NULL");
11424 		return;
11425 	}
11426 
11427 	/* Stop monitor reap timer and reap any pending frames in ring */
11428 	dp_monitor_pktlog_reap_pending_frames(pdev);
11429 }
11430 
11431 static struct cdp_bus_ops dp_ops_bus = {
11432 	.bus_suspend = dp_bus_suspend,
11433 	.bus_resume = dp_bus_resume,
11434 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
11435 	.process_target_suspend_req = dp_process_target_suspend_req
11436 };
11437 #endif
11438 
11439 #ifdef DP_FLOW_CTL
11440 static struct cdp_throttle_ops dp_ops_throttle = {
11441 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11442 };
11443 
11444 static struct cdp_cfg_ops dp_ops_cfg = {
11445 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11446 };
11447 #endif
11448 
11449 #ifdef DP_PEER_EXTENDED_API
11450 static struct cdp_ocb_ops dp_ops_ocb = {
11451 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11452 };
11453 
11454 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
11455 	.clear_stats = dp_txrx_clear_dump_stats,
11456 };
11457 
11458 static struct cdp_peer_ops dp_ops_peer = {
11459 	.register_peer = dp_register_peer,
11460 	.clear_peer = dp_clear_peer,
11461 	.find_peer_exist = dp_find_peer_exist,
11462 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
11463 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
11464 	.peer_state_update = dp_peer_state_update,
11465 	.get_vdevid = dp_get_vdevid,
11466 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
11467 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
11468 	.get_peer_state = dp_get_peer_state,
11469 	.peer_flush_frags = dp_peer_flush_frags,
11470 };
11471 #endif
11472 
11473 static struct cdp_ops dp_txrx_ops = {
11474 	.cmn_drv_ops = &dp_ops_cmn,
11475 	.ctrl_ops = &dp_ops_ctrl,
11476 	.me_ops = &dp_ops_me,
11477 	.host_stats_ops = &dp_ops_host_stats,
11478 	.wds_ops = &dp_ops_wds,
11479 	.raw_ops = &dp_ops_raw,
11480 #ifdef PEER_FLOW_CONTROL
11481 	.pflow_ops = &dp_ops_pflow,
11482 #endif /* PEER_FLOW_CONTROL */
11483 #ifdef DP_PEER_EXTENDED_API
11484 	.misc_ops = &dp_ops_misc,
11485 	.ocb_ops = &dp_ops_ocb,
11486 	.peer_ops = &dp_ops_peer,
11487 	.mob_stats_ops = &dp_ops_mob_stats,
11488 #endif
11489 #ifdef DP_FLOW_CTL
11490 	.cfg_ops = &dp_ops_cfg,
11491 	.flowctl_ops = &dp_ops_flowctl,
11492 	.l_flowctl_ops = &dp_ops_l_flowctl,
11493 	.throttle_ops = &dp_ops_throttle,
11494 #endif
11495 #ifdef IPA_OFFLOAD
11496 	.ipa_ops = &dp_ops_ipa,
11497 #endif
11498 #ifdef DP_POWER_SAVE
11499 	.bus_ops = &dp_ops_bus,
11500 #endif
11501 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11502 	.cfr_ops = &dp_ops_cfr,
11503 #endif
11504 #ifdef WLAN_SUPPORT_MSCS
11505 	.mscs_ops = &dp_ops_mscs,
11506 #endif
11507 #ifdef WLAN_SUPPORT_MESH_LATENCY
11508 	.mesh_latency_ops = &dp_ops_mesh_latency,
11509 #endif
11510 };
11511 
11512 /*
11513  * dp_soc_set_txrx_ring_map()
11514  * @dp_soc: DP handler for soc
11515  *
11516  * Return: Void
11517  */
11518 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
11519 {
11520 	uint32_t i;
11521 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
11522 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
11523 	}
11524 }
11525 
11526 qdf_export_symbol(dp_soc_set_txrx_ring_map);
11527 
11528 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
11529 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
11530 /**
11531  * dp_soc_attach_wifi3() - Attach txrx SOC
11532  * @ctrl_psoc: Opaque SOC handle from control plane
11533  * @htc_handle: Opaque HTC handle
11534  * @hif_handle: Opaque HIF handle
11535  * @qdf_osdev: QDF device
11536  * @ol_ops: Offload Operations
11537  * @device_id: Device ID
11538  *
11539  * Return: DP SOC handle on success, NULL on failure
11540  */
11541 struct cdp_soc_t *
11542 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11543 		    struct hif_opaque_softc *hif_handle,
11544 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11545 		    struct ol_if_ops *ol_ops, uint16_t device_id)
11546 {
11547 	struct dp_soc *dp_soc = NULL;
11548 
11549 	dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev,
11550 			       ol_ops, device_id);
11551 	return dp_soc_to_cdp_soc_t(dp_soc);
11552 }
11553 
11554 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
11555 {
11556 	int lmac_id;
11557 
11558 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
11559 		/*Set default host PDEV ID for lmac_id*/
11560 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11561 				      INVALID_PDEV_ID, lmac_id);
11562 	}
11563 }
11564 
11565 static uint32_t
11566 dp_get_link_desc_id_start(uint16_t arch_id)
11567 {
11568 	switch (arch_id) {
11569 	case CDP_ARCH_TYPE_LI:
11570 		return LINK_DESC_ID_START_21_BITS_COOKIE;
11571 	case CDP_ARCH_TYPE_BE:
11572 		return LINK_DESC_ID_START_20_BITS_COOKIE;
11573 	default:
11574 		dp_err("unkonwn arch_id 0x%x", arch_id);
11575 		QDF_BUG(0);
11576 		return LINK_DESC_ID_START_21_BITS_COOKIE;
11577 	}
11578 }
11579 
11580 /**
11581  * dp_soc_attach() - Attach txrx SOC
11582  * @ctrl_psoc: Opaque SOC handle from control plane
11583  * @hif_handle: Opaque HIF handle
11584  * @htc_handle: Opaque HTC handle
11585  * @qdf_osdev: QDF device
11586  * @ol_ops: Offload Operations
11587  * @device_id: Device ID
11588  *
11589  * Return: DP SOC handle on success, NULL on failure
11590  */
11591 static struct dp_soc *
11592 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11593 	      struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle,
11594 	      qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops,
11595 	      uint16_t device_id)
11596 {
11597 	int int_ctx;
11598 	struct dp_soc *soc =  NULL;
11599 	uint16_t arch_id;
11600 
11601 	if (!hif_handle) {
11602 		dp_err("HIF handle is NULL");
11603 		goto fail0;
11604 	}
11605 	arch_id = cdp_get_arch_type_from_devid(device_id);
11606 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
11607 	if (!soc) {
11608 		dp_err("DP SOC memory allocation failed");
11609 		goto fail0;
11610 	}
11611 
11612 	dp_info("soc memory allocated %pk", soc);
11613 	soc->hif_handle = hif_handle;
11614 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11615 	if (!soc->hal_soc)
11616 		goto fail1;
11617 
11618 	hif_get_cmem_info(soc->hif_handle,
11619 			  &soc->cmem_base,
11620 			  &soc->cmem_size);
11621 	int_ctx = 0;
11622 	soc->device_id = device_id;
11623 	soc->cdp_soc.ops = &dp_txrx_ops;
11624 	soc->cdp_soc.ol_ops = ol_ops;
11625 	soc->ctrl_psoc = ctrl_psoc;
11626 	soc->osdev = qdf_osdev;
11627 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
11628 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
11629 			    &soc->rx_mon_pkt_tlv_size);
11630 
11631 	soc->arch_id = arch_id;
11632 	soc->link_desc_id_start =
11633 			dp_get_link_desc_id_start(soc->arch_id);
11634 	dp_configure_arch_ops(soc);
11635 
11636 	/* Reset wbm sg list and flags */
11637 	dp_rx_wbm_sg_list_reset(soc);
11638 
11639 	dp_soc_tx_hw_desc_history_attach(soc);
11640 	dp_soc_rx_history_attach(soc);
11641 	dp_soc_tx_history_attach(soc);
11642 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
11643 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
11644 	if (!soc->wlan_cfg_ctx) {
11645 		dp_err("wlan_cfg_ctx failed\n");
11646 		goto fail1;
11647 	}
11648 
11649 	dp_soc_cfg_attach(soc);
11650 
11651 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
11652 		dp_err("failed to allocate link desc pool banks");
11653 		goto fail2;
11654 	}
11655 
11656 	if (dp_hw_link_desc_ring_alloc(soc)) {
11657 		dp_err("failed to allocate link_desc_ring");
11658 		goto fail3;
11659 	}
11660 
11661 	if (dp_soc_srng_alloc(soc)) {
11662 		dp_err("failed to allocate soc srng rings");
11663 		goto fail4;
11664 	}
11665 
11666 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
11667 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
11668 		goto fail5;
11669 	}
11670 
11671 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc))) {
11672 		dp_err("unable to do target specific attach");
11673 		goto fail6;
11674 	}
11675 
11676 	if (!dp_monitor_modularized_enable()) {
11677 		if (dp_mon_soc_attach_wrapper(soc)) {
11678 			dp_err("failed to attach monitor");
11679 			goto fail6;
11680 		}
11681 	}
11682 
11683 	dp_soc_swlm_attach(soc);
11684 	dp_soc_set_interrupt_mode(soc);
11685 	dp_soc_set_def_pdev(soc);
11686 
11687 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11688 		qdf_dma_mem_stats_read(),
11689 		qdf_heap_mem_stats_read(),
11690 		qdf_skb_total_mem_stats_read());
11691 
11692 	return soc;
11693 fail6:
11694 	dp_soc_tx_desc_sw_pools_free(soc);
11695 fail5:
11696 	dp_soc_srng_free(soc);
11697 fail4:
11698 	dp_hw_link_desc_ring_free(soc);
11699 fail3:
11700 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
11701 fail2:
11702 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
11703 fail1:
11704 	qdf_mem_free(soc);
11705 fail0:
11706 	return NULL;
11707 }
11708 
11709 /**
11710  * dp_soc_init() - Initialize txrx SOC
11711  * @dp_soc: Opaque DP SOC handle
11712  * @htc_handle: Opaque HTC handle
11713  * @hif_handle: Opaque HIF handle
11714  *
11715  * Return: DP SOC handle on success, NULL on failure
11716  */
11717 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
11718 		  struct hif_opaque_softc *hif_handle)
11719 {
11720 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
11721 	bool is_monitor_mode = false;
11722 	struct hal_reo_params reo_params;
11723 	uint8_t i;
11724 	int num_dp_msi;
11725 
11726 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
11727 			  WLAN_MD_DP_SOC, "dp_soc");
11728 
11729 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
11730 		dp_err("unable to do target specific init");
11731 		goto fail0;
11732 	}
11733 
11734 	htt_soc = htt_soc_attach(soc, htc_handle);
11735 	if (!htt_soc)
11736 		goto fail1;
11737 
11738 	soc->htt_handle = htt_soc;
11739 
11740 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
11741 		goto fail2;
11742 
11743 	htt_set_htc_handle(htt_soc, htc_handle);
11744 	soc->hif_handle = hif_handle;
11745 
11746 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11747 	if (!soc->hal_soc)
11748 		goto fail3;
11749 
11750 	dp_soc_cfg_init(soc);
11751 
11752 	dp_monitor_soc_cfg_init(soc);
11753 	/* Reset/Initialize wbm sg list and flags */
11754 	dp_rx_wbm_sg_list_reset(soc);
11755 
11756 	/* Note: Any SRNG ring initialization should happen only after
11757 	 * Interrupt mode is set and followed by filling up the
11758 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
11759 	 */
11760 	dp_soc_set_interrupt_mode(soc);
11761 	if (soc->cdp_soc.ol_ops->get_con_mode &&
11762 	    soc->cdp_soc.ol_ops->get_con_mode() ==
11763 	    QDF_GLOBAL_MONITOR_MODE)
11764 		is_monitor_mode = true;
11765 
11766 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
11767 	if (num_dp_msi < 0) {
11768 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
11769 		goto fail4;
11770 	}
11771 
11772 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
11773 				     soc->intr_mode, is_monitor_mode);
11774 
11775 	/* initialize WBM_IDLE_LINK ring */
11776 	if (dp_hw_link_desc_ring_init(soc)) {
11777 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
11778 		goto fail4;
11779 	}
11780 
11781 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
11782 
11783 	if (dp_soc_srng_init(soc)) {
11784 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
11785 		goto fail5;
11786 	}
11787 
11788 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
11789 			       htt_get_htc_handle(htt_soc),
11790 			       soc->hal_soc, soc->osdev) == NULL)
11791 		goto fail6;
11792 
11793 	/* Initialize descriptors in TCL Rings */
11794 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11795 		hal_tx_init_data_ring(soc->hal_soc,
11796 				      soc->tcl_data_ring[i].hal_srng);
11797 	}
11798 
11799 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
11800 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
11801 		goto fail7;
11802 	}
11803 
11804 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
11805 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
11806 	soc->cce_disable = false;
11807 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
11808 
11809 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
11810 	qdf_spinlock_create(&soc->vdev_map_lock);
11811 	qdf_atomic_init(&soc->num_tx_outstanding);
11812 	qdf_atomic_init(&soc->num_tx_exception);
11813 	soc->num_tx_allowed =
11814 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
11815 
11816 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
11817 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11818 				CDP_CFG_MAX_PEER_ID);
11819 
11820 		if (ret != -EINVAL)
11821 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
11822 
11823 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11824 				CDP_CFG_CCE_DISABLE);
11825 		if (ret == 1)
11826 			soc->cce_disable = true;
11827 	}
11828 
11829 	/*
11830 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
11831 	 * and IPQ5018 WMAC2 is not there in these platforms.
11832 	 */
11833 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
11834 	    soc->disable_mac2_intr)
11835 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
11836 
11837 	/*
11838 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
11839 	 * WMAC1 is not there in this platform.
11840 	 */
11841 	if (soc->disable_mac1_intr)
11842 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
11843 
11844 	/* Setup HW REO */
11845 	qdf_mem_zero(&reo_params, sizeof(reo_params));
11846 
11847 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
11848 		/*
11849 		 * Reo ring remap is not required if both radios
11850 		 * are offloaded to NSS
11851 		 */
11852 		if (dp_reo_remap_config(soc,
11853 					&reo_params.remap1,
11854 					&reo_params.remap2))
11855 			reo_params.rx_hash_enabled = true;
11856 		else
11857 			reo_params.rx_hash_enabled = false;
11858 	}
11859 
11860 	/* setup the global rx defrag waitlist */
11861 	TAILQ_INIT(&soc->rx.defrag.waitlist);
11862 	soc->rx.defrag.timeout_ms =
11863 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
11864 	soc->rx.defrag.next_flush_ms = 0;
11865 	soc->rx.flags.defrag_timeout_check =
11866 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
11867 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
11868 
11869 	/*
11870 	 * set the fragment destination ring
11871 	 */
11872 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
11873 
11874 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
11875 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
11876 
11877 	hal_reo_setup(soc->hal_soc, &reo_params);
11878 
11879 	hal_reo_set_err_dst_remap(soc->hal_soc);
11880 
11881 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
11882 
11883 	qdf_atomic_set(&soc->cmn_init_done, 1);
11884 
11885 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
11886 
11887 	qdf_spinlock_create(&soc->ast_lock);
11888 	dp_peer_mec_spinlock_create(soc);
11889 
11890 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
11891 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
11892 	INIT_RX_HW_STATS_LOCK(soc);
11893 
11894 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
11895 	/* fill the tx/rx cpu ring map*/
11896 	dp_soc_set_txrx_ring_map(soc);
11897 
11898 	TAILQ_INIT(&soc->inactive_peer_list);
11899 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
11900 	TAILQ_INIT(&soc->inactive_vdev_list);
11901 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
11902 	qdf_spinlock_create(&soc->htt_stats.lock);
11903 	/* initialize work queue for stats processing */
11904 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
11905 
11906 	dp_reo_desc_deferred_freelist_create(soc);
11907 
11908 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11909 		qdf_dma_mem_stats_read(),
11910 		qdf_heap_mem_stats_read(),
11911 		qdf_skb_total_mem_stats_read());
11912 
11913 	return soc;
11914 fail7:
11915 	htt_soc_htc_dealloc(soc->htt_handle);
11916 fail6:
11917 	dp_soc_srng_deinit(soc);
11918 fail5:
11919 	dp_hw_link_desc_ring_deinit(soc);
11920 fail4:
11921 	dp_hw_link_desc_ring_free(soc);
11922 fail3:
11923 	htt_htc_pkt_pool_free(htt_soc);
11924 fail2:
11925 	htt_soc_detach(htt_soc);
11926 fail1:
11927 	soc->arch_ops.txrx_soc_deinit(soc);
11928 fail0:
11929 	return NULL;
11930 }
11931 
11932 /**
11933  * dp_soc_init_wifi3() - Initialize txrx SOC
11934  * @soc: Opaque DP SOC handle
11935  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
11936  * @hif_handle: Opaque HIF handle
11937  * @htc_handle: Opaque HTC handle
11938  * @qdf_osdev: QDF device (Unused)
11939  * @ol_ops: Offload Operations (Unused)
11940  * @device_id: Device ID (Unused)
11941  *
11942  * Return: DP SOC handle on success, NULL on failure
11943  */
11944 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
11945 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11946 			struct hif_opaque_softc *hif_handle,
11947 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11948 			struct ol_if_ops *ol_ops, uint16_t device_id)
11949 {
11950 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
11951 }
11952 
11953 #endif
11954 
11955 /*
11956  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
11957  *
11958  * @soc: handle to DP soc
11959  * @mac_id: MAC id
11960  *
11961  * Return: Return pdev corresponding to MAC
11962  */
11963 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
11964 {
11965 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
11966 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
11967 
11968 	/* Typically for MCL as there only 1 PDEV*/
11969 	return soc->pdev_list[0];
11970 }
11971 
11972 /*
11973  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
11974  * @soc:		DP SoC context
11975  * @max_mac_rings:	No of MAC rings
11976  *
11977  * Return: None
11978  */
11979 void dp_is_hw_dbs_enable(struct dp_soc *soc,
11980 				int *max_mac_rings)
11981 {
11982 	bool dbs_enable = false;
11983 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
11984 		dbs_enable = soc->cdp_soc.ol_ops->
11985 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
11986 
11987 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
11988 }
11989 
11990 qdf_export_symbol(dp_is_hw_dbs_enable);
11991 
11992 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11993 /**
11994  * dp_get_cfr_rcc() - get cfr rcc config
11995  * @soc_hdl: Datapath soc handle
11996  * @pdev_id: id of objmgr pdev
11997  *
11998  * Return: true/false based on cfr mode setting
11999  */
12000 static
12001 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12002 {
12003 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12004 	struct dp_pdev *pdev = NULL;
12005 
12006 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12007 	if (!pdev) {
12008 		dp_err("pdev is NULL");
12009 		return false;
12010 	}
12011 
12012 	return pdev->cfr_rcc_mode;
12013 }
12014 
12015 /**
12016  * dp_set_cfr_rcc() - enable/disable cfr rcc config
12017  * @soc_hdl: Datapath soc handle
12018  * @pdev_id: id of objmgr pdev
12019  * @enable: Enable/Disable cfr rcc mode
12020  *
12021  * Return: none
12022  */
12023 static
12024 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
12025 {
12026 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12027 	struct dp_pdev *pdev = NULL;
12028 
12029 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12030 	if (!pdev) {
12031 		dp_err("pdev is NULL");
12032 		return;
12033 	}
12034 
12035 	pdev->cfr_rcc_mode = enable;
12036 }
12037 
12038 /*
12039  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
12040  * @soc_hdl: Datapath soc handle
12041  * @pdev_id: id of data path pdev handle
12042  * @cfr_rcc_stats: CFR RCC debug statistics buffer
12043  *
12044  * Return: none
12045  */
12046 static inline void
12047 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12048 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
12049 {
12050 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12051 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12052 
12053 	if (!pdev) {
12054 		dp_err("Invalid pdev");
12055 		return;
12056 	}
12057 
12058 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
12059 		     sizeof(struct cdp_cfr_rcc_stats));
12060 }
12061 
12062 /*
12063  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
12064  * @soc_hdl: Datapath soc handle
12065  * @pdev_id: id of data path pdev handle
12066  *
12067  * Return: none
12068  */
12069 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
12070 				   uint8_t pdev_id)
12071 {
12072 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12073 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12074 
12075 	if (!pdev) {
12076 		dp_err("dp pdev is NULL");
12077 		return;
12078 	}
12079 
12080 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
12081 }
12082 #endif
12083 
12084 /**
12085  * dp_bucket_index() - Return index from array
12086  *
12087  * @delay: delay measured
12088  * @array: array used to index corresponding delay
12089  *
12090  * Return: index
12091  */
12092 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
12093 {
12094 	uint8_t i = CDP_DELAY_BUCKET_0;
12095 
12096 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
12097 		if (delay >= array[i] && delay <= array[i + 1])
12098 			return i;
12099 	}
12100 
12101 	return (CDP_DELAY_BUCKET_MAX - 1);
12102 }
12103 
12104 /**
12105  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
12106  *				type of delay
12107  *
12108  * @pdev: pdev handle
12109  * @delay: delay in ms
12110  * @tid: tid value
12111  * @mode: type of tx delay mode
12112  * @ring_id: ring number
12113  * Return: pointer to cdp_delay_stats structure
12114  */
12115 static struct cdp_delay_stats *
12116 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
12117 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
12118 {
12119 	uint8_t delay_index = 0;
12120 	struct cdp_tid_tx_stats *tstats =
12121 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
12122 	struct cdp_tid_rx_stats *rstats =
12123 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
12124 	/*
12125 	 * cdp_fw_to_hw_delay_range
12126 	 * Fw to hw delay ranges in milliseconds
12127 	 */
12128 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
12129 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
12130 
12131 	/*
12132 	 * cdp_sw_enq_delay_range
12133 	 * Software enqueue delay ranges in milliseconds
12134 	 */
12135 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
12136 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
12137 
12138 	/*
12139 	 * cdp_intfrm_delay_range
12140 	 * Interframe delay ranges in milliseconds
12141 	 */
12142 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
12143 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
12144 
12145 	/*
12146 	 * Update delay stats in proper bucket
12147 	 */
12148 	switch (mode) {
12149 	/* Software Enqueue delay ranges */
12150 	case CDP_DELAY_STATS_SW_ENQ:
12151 
12152 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
12153 		tstats->swq_delay.delay_bucket[delay_index]++;
12154 		return &tstats->swq_delay;
12155 
12156 	/* Tx Completion delay ranges */
12157 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
12158 
12159 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
12160 		tstats->hwtx_delay.delay_bucket[delay_index]++;
12161 		return &tstats->hwtx_delay;
12162 
12163 	/* Interframe tx delay ranges */
12164 	case CDP_DELAY_STATS_TX_INTERFRAME:
12165 
12166 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12167 		tstats->intfrm_delay.delay_bucket[delay_index]++;
12168 		return &tstats->intfrm_delay;
12169 
12170 	/* Interframe rx delay ranges */
12171 	case CDP_DELAY_STATS_RX_INTERFRAME:
12172 
12173 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12174 		rstats->intfrm_delay.delay_bucket[delay_index]++;
12175 		return &rstats->intfrm_delay;
12176 
12177 	/* Ring reap to indication to network stack */
12178 	case CDP_DELAY_STATS_REAP_STACK:
12179 
12180 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12181 		rstats->to_stack_delay.delay_bucket[delay_index]++;
12182 		return &rstats->to_stack_delay;
12183 	default:
12184 		dp_debug("Incorrect delay mode: %d", mode);
12185 	}
12186 
12187 	return NULL;
12188 }
12189 
12190 /**
12191  * dp_update_delay_stats() - Update delay statistics in structure
12192  *				and fill min, max and avg delay
12193  *
12194  * @pdev: pdev handle
12195  * @delay: delay in ms
12196  * @tid: tid value
12197  * @mode: type of tx delay mode
12198  * @ring id: ring number
12199  * Return: none
12200  */
12201 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
12202 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
12203 {
12204 	struct cdp_delay_stats *dstats = NULL;
12205 
12206 	/*
12207 	 * Delay ranges are different for different delay modes
12208 	 * Get the correct index to update delay bucket
12209 	 */
12210 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
12211 	if (qdf_unlikely(!dstats))
12212 		return;
12213 
12214 	if (delay != 0) {
12215 		/*
12216 		 * Compute minimum,average and maximum
12217 		 * delay
12218 		 */
12219 		if (delay < dstats->min_delay)
12220 			dstats->min_delay = delay;
12221 
12222 		if (delay > dstats->max_delay)
12223 			dstats->max_delay = delay;
12224 
12225 		/*
12226 		 * Average over delay measured till now
12227 		 */
12228 		if (!dstats->avg_delay)
12229 			dstats->avg_delay = delay;
12230 		else
12231 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
12232 	}
12233 }
12234 
12235 /**
12236  * dp_get_peer_mac_list(): function to get peer mac list of vdev
12237  * @soc: Datapath soc handle
12238  * @vdev_id: vdev id
12239  * @newmac: Table of the clients mac
12240  * @mac_cnt: No. of MACs required
12241  * @limit: Limit the number of clients
12242  *
12243  * return: no of clients
12244  */
12245 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
12246 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
12247 			      u_int16_t mac_cnt, bool limit)
12248 {
12249 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
12250 	struct dp_vdev *vdev =
12251 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
12252 	struct dp_peer *peer;
12253 	uint16_t new_mac_cnt = 0;
12254 
12255 	if (!vdev)
12256 		return new_mac_cnt;
12257 
12258 	if (limit && (vdev->num_peers > mac_cnt))
12259 		return 0;
12260 
12261 	qdf_spin_lock_bh(&vdev->peer_list_lock);
12262 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
12263 		if (peer->bss_peer)
12264 			continue;
12265 		if (new_mac_cnt < mac_cnt) {
12266 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
12267 			new_mac_cnt++;
12268 		}
12269 	}
12270 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
12271 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
12272 	return new_mac_cnt;
12273 }
12274 
12275 #ifdef QCA_SUPPORT_WDS_EXTENDED
12276 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
12277 				uint8_t vdev_id,
12278 				uint8_t *mac)
12279 {
12280 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
12281 						       mac, 0, vdev_id,
12282 						       DP_MOD_ID_CDP);
12283 	uint16_t peer_id = HTT_INVALID_PEER;
12284 
12285 	if (!peer) {
12286 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
12287 		return peer_id;
12288 	}
12289 
12290 	peer_id = peer->peer_id;
12291 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12292 	return peer_id;
12293 }
12294 
12295 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
12296 				  uint8_t vdev_id,
12297 				  uint8_t *mac,
12298 				  ol_txrx_rx_fp rx,
12299 				  ol_osif_peer_handle osif_peer)
12300 {
12301 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
12302 						       mac, 0, vdev_id,
12303 						       DP_MOD_ID_CDP);
12304 	QDF_STATUS status = QDF_STATUS_E_INVAL;
12305 
12306 	if (!peer) {
12307 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
12308 		return status;
12309 	}
12310 
12311 	if (rx) {
12312 		if (peer->osif_rx) {
12313 		    status = QDF_STATUS_E_ALREADY;
12314 		} else {
12315 		    peer->osif_rx = rx;
12316 		    status = QDF_STATUS_SUCCESS;
12317 		}
12318 	} else {
12319 		if (peer->osif_rx) {
12320 		    peer->osif_rx = NULL;
12321 		    status = QDF_STATUS_SUCCESS;
12322 		} else {
12323 		    status = QDF_STATUS_E_ALREADY;
12324 		}
12325 	}
12326 
12327 	peer->wds_ext.osif_peer = osif_peer;
12328 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12329 
12330 	return status;
12331 }
12332 #endif /* QCA_SUPPORT_WDS_EXTENDED */
12333 
12334 /**
12335  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
12336  *			   monitor rings
12337  * @pdev: Datapath pdev handle
12338  *
12339  */
12340 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
12341 {
12342 	struct dp_soc *soc = pdev->soc;
12343 	uint8_t i;
12344 
12345 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF,
12346 		       pdev->lmac_id);
12347 
12348 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12349 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12350 		dp_ipa_deinit_alt_tx_ring(soc);
12351 	}
12352 
12353 	if (!soc->rxdma2sw_rings_not_supported) {
12354 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12355 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12356 								 pdev->pdev_id);
12357 
12358 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
12359 							base_vaddr_unaligned,
12360 					     soc->rxdma_err_dst_ring[lmac_id].
12361 								alloc_size,
12362 					     soc->ctrl_psoc,
12363 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
12364 					     "rxdma_err_dst");
12365 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
12366 				       RXDMA_DST, lmac_id);
12367 		}
12368 	}
12369 
12370 
12371 }
12372 
12373 /**
12374  * dp_pdev_srng_init() - initialize all pdev srng rings including
12375  *			   monitor rings
12376  * @pdev: Datapath pdev handle
12377  *
12378  * return: QDF_STATUS_SUCCESS on success
12379  *	   QDF_STATUS_E_NOMEM on failure
12380  */
12381 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
12382 {
12383 	struct dp_soc *soc = pdev->soc;
12384 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12385 	uint32_t i;
12386 
12387 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12388 
12389 	if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12390 			 RXDMA_BUF, 0, pdev->lmac_id)) {
12391 		dp_init_err("%pK: dp_srng_init failed rx refill ring", soc);
12392 		goto fail1;
12393 	}
12394 
12395 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12396 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12397 			goto fail1;
12398 
12399 		if (dp_ipa_init_alt_tx_ring(soc))
12400 			goto fail1;
12401 	}
12402 
12403 	/* LMAC RxDMA to SW Rings configuration */
12404 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12405 		/* Only valid for MCL */
12406 		pdev = soc->pdev_list[0];
12407 
12408 	if (!soc->rxdma2sw_rings_not_supported) {
12409 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12410 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12411 								 pdev->pdev_id);
12412 			struct dp_srng *srng =
12413 				&soc->rxdma_err_dst_ring[lmac_id];
12414 
12415 			if (srng->hal_srng)
12416 				continue;
12417 
12418 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
12419 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
12420 					    soc);
12421 				goto fail1;
12422 			}
12423 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
12424 						base_vaddr_unaligned,
12425 					  soc->rxdma_err_dst_ring[lmac_id].
12426 						alloc_size,
12427 					  soc->ctrl_psoc,
12428 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
12429 					  "rxdma_err_dst");
12430 		}
12431 	}
12432 	return QDF_STATUS_SUCCESS;
12433 
12434 fail1:
12435 	dp_pdev_srng_deinit(pdev);
12436 	return QDF_STATUS_E_NOMEM;
12437 }
12438 
12439 /**
12440  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
12441  * pdev: Datapath pdev handle
12442  *
12443  */
12444 static void dp_pdev_srng_free(struct dp_pdev *pdev)
12445 {
12446 	struct dp_soc *soc = pdev->soc;
12447 	uint8_t i;
12448 
12449 	dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
12450 
12451 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12452 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12453 		dp_ipa_free_alt_tx_ring(soc);
12454 	}
12455 
12456 	if (!soc->rxdma2sw_rings_not_supported) {
12457 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12458 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12459 								 pdev->pdev_id);
12460 
12461 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
12462 		}
12463 	}
12464 }
12465 
12466 /**
12467  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
12468  *			  monitor rings
12469  * pdev: Datapath pdev handle
12470  *
12471  * return: QDF_STATUS_SUCCESS on success
12472  *	   QDF_STATUS_E_NOMEM on failure
12473  */
12474 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
12475 {
12476 	struct dp_soc *soc = pdev->soc;
12477 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12478 	uint32_t ring_size;
12479 	uint32_t i;
12480 
12481 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12482 
12483 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
12484 	if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12485 			  RXDMA_BUF, ring_size, 0)) {
12486 		dp_init_err("%pK: dp_srng_alloc failed rx refill ring", soc);
12487 		goto fail1;
12488 	}
12489 
12490 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12491 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12492 			goto fail1;
12493 
12494 		if (dp_ipa_alloc_alt_tx_ring(soc))
12495 			goto fail1;
12496 	}
12497 
12498 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
12499 	/* LMAC RxDMA to SW Rings configuration */
12500 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12501 		/* Only valid for MCL */
12502 		pdev = soc->pdev_list[0];
12503 
12504 	if (!soc->rxdma2sw_rings_not_supported) {
12505 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12506 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12507 								 pdev->pdev_id);
12508 			struct dp_srng *srng =
12509 				&soc->rxdma_err_dst_ring[lmac_id];
12510 
12511 			if (srng->base_vaddr_unaligned)
12512 				continue;
12513 
12514 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
12515 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
12516 					    soc);
12517 				goto fail1;
12518 			}
12519 		}
12520 	}
12521 
12522 	return QDF_STATUS_SUCCESS;
12523 fail1:
12524 	dp_pdev_srng_free(pdev);
12525 	return QDF_STATUS_E_NOMEM;
12526 }
12527 
12528 /**
12529  * dp_soc_srng_deinit() - de-initialize soc srng rings
12530  * @soc: Datapath soc handle
12531  *
12532  */
12533 static void dp_soc_srng_deinit(struct dp_soc *soc)
12534 {
12535 	uint32_t i;
12536 
12537 	if (soc->arch_ops.txrx_soc_srng_deinit)
12538 		soc->arch_ops.txrx_soc_srng_deinit(soc);
12539 
12540 	/* Free the ring memories */
12541 	/* Common rings */
12542 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
12543 			     soc->wbm_desc_rel_ring.alloc_size,
12544 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
12545 			     "wbm_desc_rel_ring");
12546 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
12547 
12548 	/* Tx data rings */
12549 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12550 		dp_deinit_tx_pair_by_index(soc, i);
12551 
12552 	/* TCL command and status rings */
12553 	if (soc->init_tcl_cmd_cred_ring) {
12554 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
12555 				     soc->tcl_cmd_credit_ring.alloc_size,
12556 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
12557 				     "wbm_desc_rel_ring");
12558 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
12559 			       TCL_CMD_CREDIT, 0);
12560 	}
12561 
12562 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
12563 			     soc->tcl_status_ring.alloc_size,
12564 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
12565 			     "wbm_desc_rel_ring");
12566 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
12567 
12568 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12569 		/* TODO: Get number of rings and ring sizes
12570 		 * from wlan_cfg
12571 		 */
12572 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
12573 				     soc->reo_dest_ring[i].alloc_size,
12574 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
12575 				     "reo_dest_ring");
12576 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
12577 	}
12578 
12579 	/* REO reinjection ring */
12580 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
12581 			     soc->reo_reinject_ring.alloc_size,
12582 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
12583 			     "reo_reinject_ring");
12584 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
12585 
12586 	/* Rx release ring */
12587 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
12588 			     soc->rx_rel_ring.alloc_size,
12589 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
12590 			     "reo_release_ring");
12591 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
12592 
12593 	/* Rx exception ring */
12594 	/* TODO: Better to store ring_type and ring_num in
12595 	 * dp_srng during setup
12596 	 */
12597 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
12598 			     soc->reo_exception_ring.alloc_size,
12599 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
12600 			     "reo_exception_ring");
12601 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
12602 
12603 	/* REO command and status rings */
12604 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
12605 			     soc->reo_cmd_ring.alloc_size,
12606 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
12607 			     "reo_cmd_ring");
12608 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
12609 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
12610 			     soc->reo_status_ring.alloc_size,
12611 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
12612 			     "reo_status_ring");
12613 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
12614 }
12615 
12616 /**
12617  * dp_soc_srng_init() - Initialize soc level srng rings
12618  * @soc: Datapath soc handle
12619  *
12620  * return: QDF_STATUS_SUCCESS on success
12621  *	   QDF_STATUS_E_FAILURE on failure
12622  */
12623 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
12624 {
12625 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12626 	uint8_t i;
12627 
12628 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12629 
12630 	dp_enable_verbose_debug(soc);
12631 
12632 	/* WBM descriptor release ring */
12633 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
12634 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
12635 		goto fail1;
12636 	}
12637 
12638 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
12639 			  soc->wbm_desc_rel_ring.alloc_size,
12640 			  soc->ctrl_psoc,
12641 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
12642 			  "wbm_desc_rel_ring");
12643 
12644 	if (soc->init_tcl_cmd_cred_ring) {
12645 		/* TCL command and status rings */
12646 		if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
12647 				 TCL_CMD_CREDIT, 0, 0)) {
12648 			dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
12649 			goto fail1;
12650 		}
12651 
12652 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
12653 				  soc->tcl_cmd_credit_ring.alloc_size,
12654 				  soc->ctrl_psoc,
12655 				  WLAN_MD_DP_SRNG_TCL_CMD,
12656 				  "wbm_desc_rel_ring");
12657 	}
12658 
12659 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
12660 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
12661 		goto fail1;
12662 	}
12663 
12664 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
12665 			  soc->tcl_status_ring.alloc_size,
12666 			  soc->ctrl_psoc,
12667 			  WLAN_MD_DP_SRNG_TCL_STATUS,
12668 			  "wbm_desc_rel_ring");
12669 
12670 	/* REO reinjection ring */
12671 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
12672 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
12673 		goto fail1;
12674 	}
12675 
12676 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
12677 			  soc->reo_reinject_ring.alloc_size,
12678 			  soc->ctrl_psoc,
12679 			  WLAN_MD_DP_SRNG_REO_REINJECT,
12680 			  "reo_reinject_ring");
12681 
12682 	/* Rx release ring */
12683 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
12684 			 WBM2SW_REL_ERR_RING_NUM, 0)) {
12685 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
12686 		goto fail1;
12687 	}
12688 
12689 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
12690 			  soc->rx_rel_ring.alloc_size,
12691 			  soc->ctrl_psoc,
12692 			  WLAN_MD_DP_SRNG_RX_REL,
12693 			  "reo_release_ring");
12694 
12695 	/* Rx exception ring */
12696 	if (dp_srng_init(soc, &soc->reo_exception_ring,
12697 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
12698 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
12699 		goto fail1;
12700 	}
12701 
12702 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
12703 			  soc->reo_exception_ring.alloc_size,
12704 			  soc->ctrl_psoc,
12705 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
12706 			  "reo_exception_ring");
12707 
12708 	/* REO command and status rings */
12709 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
12710 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
12711 		goto fail1;
12712 	}
12713 
12714 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
12715 			  soc->reo_cmd_ring.alloc_size,
12716 			  soc->ctrl_psoc,
12717 			  WLAN_MD_DP_SRNG_REO_CMD,
12718 			  "reo_cmd_ring");
12719 
12720 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
12721 	TAILQ_INIT(&soc->rx.reo_cmd_list);
12722 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
12723 
12724 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
12725 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
12726 		goto fail1;
12727 	}
12728 
12729 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
12730 			  soc->reo_status_ring.alloc_size,
12731 			  soc->ctrl_psoc,
12732 			  WLAN_MD_DP_SRNG_REO_STATUS,
12733 			  "reo_status_ring");
12734 
12735 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12736 		if (dp_init_tx_ring_pair_by_index(soc, i))
12737 			goto fail1;
12738 	}
12739 
12740 	dp_create_ext_stats_event(soc);
12741 
12742 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12743 		/* Initialize REO destination ring */
12744 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
12745 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
12746 			goto fail1;
12747 		}
12748 
12749 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
12750 				  soc->reo_dest_ring[i].alloc_size,
12751 				  soc->ctrl_psoc,
12752 				  WLAN_MD_DP_SRNG_REO_DEST,
12753 				  "reo_dest_ring");
12754 	}
12755 
12756 	if (soc->arch_ops.txrx_soc_srng_init) {
12757 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
12758 			dp_init_err("%pK: dp_srng_init failed for arch rings",
12759 				    soc);
12760 			goto fail1;
12761 		}
12762 	}
12763 
12764 	return QDF_STATUS_SUCCESS;
12765 fail1:
12766 	/*
12767 	 * Cleanup will be done as part of soc_detach, which will
12768 	 * be called on pdev attach failure
12769 	 */
12770 	dp_soc_srng_deinit(soc);
12771 	return QDF_STATUS_E_FAILURE;
12772 }
12773 
12774 /**
12775  * dp_soc_srng_free() - free soc level srng rings
12776  * @soc: Datapath soc handle
12777  *
12778  */
12779 static void dp_soc_srng_free(struct dp_soc *soc)
12780 {
12781 	uint32_t i;
12782 
12783 	if (soc->arch_ops.txrx_soc_srng_free)
12784 		soc->arch_ops.txrx_soc_srng_free(soc);
12785 
12786 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
12787 
12788 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12789 		dp_free_tx_ring_pair_by_index(soc, i);
12790 
12791 	if (soc->init_tcl_cmd_cred_ring)
12792 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
12793 
12794 	dp_srng_free(soc, &soc->tcl_status_ring);
12795 
12796 	for (i = 0; i < soc->num_reo_dest_rings; i++)
12797 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
12798 
12799 	dp_srng_free(soc, &soc->reo_reinject_ring);
12800 	dp_srng_free(soc, &soc->rx_rel_ring);
12801 
12802 	dp_srng_free(soc, &soc->reo_exception_ring);
12803 
12804 	dp_srng_free(soc, &soc->reo_cmd_ring);
12805 	dp_srng_free(soc, &soc->reo_status_ring);
12806 }
12807 
12808 /**
12809  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
12810  * @soc: Datapath soc handle
12811  *
12812  * return: QDF_STATUS_SUCCESS on success
12813  *	   QDF_STATUS_E_NOMEM on failure
12814  */
12815 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
12816 {
12817 	uint32_t entries;
12818 	uint32_t i;
12819 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12820 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
12821 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
12822 
12823 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12824 
12825 	/* sw2wbm link descriptor release ring */
12826 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
12827 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
12828 			  entries, 0)) {
12829 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
12830 		goto fail1;
12831 	}
12832 
12833 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
12834 	/* TCL command and status rings */
12835 	if (soc->init_tcl_cmd_cred_ring) {
12836 		if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
12837 				  TCL_CMD_CREDIT, entries, 0)) {
12838 			dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
12839 			goto fail1;
12840 		}
12841 	}
12842 
12843 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
12844 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
12845 			  0)) {
12846 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
12847 		goto fail1;
12848 	}
12849 
12850 	/* REO reinjection ring */
12851 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
12852 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
12853 			  entries, 0)) {
12854 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
12855 		goto fail1;
12856 	}
12857 
12858 	/* Rx release ring */
12859 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
12860 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
12861 			  entries, 0)) {
12862 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
12863 		goto fail1;
12864 	}
12865 
12866 	/* Rx exception ring */
12867 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
12868 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
12869 			  entries, 0)) {
12870 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
12871 		goto fail1;
12872 	}
12873 
12874 	/* REO command and status rings */
12875 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
12876 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
12877 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
12878 		goto fail1;
12879 	}
12880 
12881 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
12882 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
12883 			  entries, 0)) {
12884 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
12885 		goto fail1;
12886 	}
12887 
12888 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
12889 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
12890 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
12891 
12892 	/* Disable cached desc if NSS offload is enabled */
12893 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
12894 		cached = 0;
12895 
12896 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12897 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
12898 			goto fail1;
12899 	}
12900 
12901 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12902 		/* Setup REO destination ring */
12903 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
12904 				  reo_dst_ring_size, cached)) {
12905 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
12906 			goto fail1;
12907 		}
12908 	}
12909 
12910 	if (soc->arch_ops.txrx_soc_srng_alloc) {
12911 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
12912 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
12913 				    soc);
12914 			goto fail1;
12915 		}
12916 	}
12917 
12918 	return QDF_STATUS_SUCCESS;
12919 
12920 fail1:
12921 	dp_soc_srng_free(soc);
12922 	return QDF_STATUS_E_NOMEM;
12923 }
12924 
12925 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
12926 {
12927 	dp_init_info("DP soc Dump for Target = %d", target_type);
12928 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
12929 		     soc->ast_override_support, soc->da_war_enabled);
12930 
12931 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
12932 }
12933 
12934 /**
12935  * dp_soc_cfg_init() - initialize target specific configuration
12936  *		       during dp_soc_init
12937  * @soc: dp soc handle
12938  */
12939 static void dp_soc_cfg_init(struct dp_soc *soc)
12940 {
12941 	uint32_t target_type;
12942 
12943 	target_type = hal_get_target_type(soc->hal_soc);
12944 	switch (target_type) {
12945 	case TARGET_TYPE_QCA6290:
12946 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12947 					       REO_DST_RING_SIZE_QCA6290);
12948 		soc->ast_override_support = 1;
12949 		soc->da_war_enabled = false;
12950 		break;
12951 	case TARGET_TYPE_QCA6390:
12952 	case TARGET_TYPE_QCA6490:
12953 	case TARGET_TYPE_QCA6750:
12954 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12955 					       REO_DST_RING_SIZE_QCA6290);
12956 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12957 		soc->ast_override_support = 1;
12958 		if (soc->cdp_soc.ol_ops->get_con_mode &&
12959 		    soc->cdp_soc.ol_ops->get_con_mode() ==
12960 		    QDF_GLOBAL_MONITOR_MODE) {
12961 			int int_ctx;
12962 
12963 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
12964 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
12965 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
12966 			}
12967 		}
12968 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12969 		break;
12970 	case TARGET_TYPE_WCN7850:
12971 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12972 					       REO_DST_RING_SIZE_QCA6290);
12973 		soc->ast_override_support = 1;
12974 
12975 		if (soc->cdp_soc.ol_ops->get_con_mode &&
12976 		    soc->cdp_soc.ol_ops->get_con_mode() ==
12977 		    QDF_GLOBAL_MONITOR_MODE) {
12978 			int int_ctx;
12979 
12980 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
12981 			     int_ctx++) {
12982 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
12983 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
12984 			}
12985 		}
12986 
12987 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12988 		break;
12989 	case TARGET_TYPE_QCA8074:
12990 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12991 					       REO_DST_RING_SIZE_QCA8074);
12992 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12993 		soc->da_war_enabled = true;
12994 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12995 		break;
12996 	case TARGET_TYPE_QCA8074V2:
12997 	case TARGET_TYPE_QCA6018:
12998 	case TARGET_TYPE_QCA9574:
12999 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13000 					       REO_DST_RING_SIZE_QCA8074);
13001 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13002 		soc->ast_override_support = 1;
13003 		soc->per_tid_basize_max_tid = 8;
13004 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
13005 		soc->da_war_enabled = false;
13006 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
13007 		break;
13008 	case TARGET_TYPE_QCN9000:
13009 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13010 					       REO_DST_RING_SIZE_QCN9000);
13011 		soc->ast_override_support = 1;
13012 		soc->da_war_enabled = false;
13013 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13014 		soc->per_tid_basize_max_tid = 8;
13015 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
13016 		soc->lmac_polled_mode = 0;
13017 		soc->wbm_release_desc_rx_sg_support = 1;
13018 		break;
13019 	case TARGET_TYPE_QCA5018:
13020 	case TARGET_TYPE_QCN6122:
13021 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13022 					       REO_DST_RING_SIZE_QCA8074);
13023 		soc->ast_override_support = 1;
13024 		soc->da_war_enabled = false;
13025 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13026 		soc->per_tid_basize_max_tid = 8;
13027 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
13028 		soc->disable_mac1_intr = 1;
13029 		soc->disable_mac2_intr = 1;
13030 		soc->wbm_release_desc_rx_sg_support = 1;
13031 		break;
13032 	case TARGET_TYPE_QCN9224:
13033 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13034 					       REO_DST_RING_SIZE_QCA8074);
13035 		soc->ast_override_support = 1;
13036 		soc->da_war_enabled = false;
13037 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13038 		soc->per_tid_basize_max_tid = 8;
13039 		soc->wbm_release_desc_rx_sg_support = 1;
13040 		soc->rxdma2sw_rings_not_supported = 1;
13041 
13042 		break;
13043 	default:
13044 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
13045 		qdf_assert_always(0);
13046 		break;
13047 	}
13048 	dp_soc_cfg_dump(soc, target_type);
13049 }
13050 
13051 /**
13052  * dp_soc_cfg_attach() - set target specific configuration in
13053  *			 dp soc cfg.
13054  * @soc: dp soc handle
13055  */
13056 static void dp_soc_cfg_attach(struct dp_soc *soc)
13057 {
13058 	int target_type;
13059 	int nss_cfg = 0;
13060 
13061 	target_type = hal_get_target_type(soc->hal_soc);
13062 	switch (target_type) {
13063 	case TARGET_TYPE_QCA6290:
13064 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13065 					       REO_DST_RING_SIZE_QCA6290);
13066 		break;
13067 	case TARGET_TYPE_QCA6390:
13068 	case TARGET_TYPE_QCA6490:
13069 	case TARGET_TYPE_QCA6750:
13070 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13071 					       REO_DST_RING_SIZE_QCA6290);
13072 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
13073 		break;
13074 	case TARGET_TYPE_WCN7850:
13075 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13076 					       REO_DST_RING_SIZE_QCA6290);
13077 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
13078 		break;
13079 	case TARGET_TYPE_QCA8074:
13080 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13081 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13082 					       REO_DST_RING_SIZE_QCA8074);
13083 		break;
13084 	case TARGET_TYPE_QCA8074V2:
13085 	case TARGET_TYPE_QCA6018:
13086 	case TARGET_TYPE_QCA9574:
13087 	case TARGET_TYPE_QCN6122:
13088 	case TARGET_TYPE_QCA5018:
13089 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13090 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13091 					       REO_DST_RING_SIZE_QCA8074);
13092 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
13093 		break;
13094 	case TARGET_TYPE_QCN9000:
13095 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13096 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13097 					       REO_DST_RING_SIZE_QCN9000);
13098 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
13099 		break;
13100 	case TARGET_TYPE_QCN9224:
13101 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13102 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13103 					       REO_DST_RING_SIZE_QCA8074);
13104 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
13105 		break;
13106 	default:
13107 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
13108 		qdf_assert_always(0);
13109 		break;
13110 	}
13111 
13112 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
13113 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
13114 
13115 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
13116 
13117 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
13118 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
13119 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
13120 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
13121 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
13122 		soc->init_tcl_cmd_cred_ring = false;
13123 		soc->num_tcl_data_rings =
13124 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
13125 		soc->num_reo_dest_rings =
13126 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
13127 
13128 	} else {
13129 		soc->init_tcl_cmd_cred_ring = true;
13130 		soc->num_tcl_data_rings =
13131 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
13132 		soc->num_reo_dest_rings =
13133 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
13134 	}
13135 
13136 	soc->arch_ops.soc_cfg_attach(soc);
13137 }
13138 
13139 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
13140 {
13141 	struct dp_soc *soc = pdev->soc;
13142 
13143 	switch (pdev->pdev_id) {
13144 	case 0:
13145 		pdev->reo_dest =
13146 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
13147 		break;
13148 
13149 	case 1:
13150 		pdev->reo_dest =
13151 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
13152 		break;
13153 
13154 	case 2:
13155 		pdev->reo_dest =
13156 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
13157 		break;
13158 
13159 	default:
13160 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
13161 			    soc, pdev->pdev_id);
13162 		break;
13163 	}
13164 }
13165 
13166 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
13167 				      HTC_HANDLE htc_handle,
13168 				      qdf_device_t qdf_osdev,
13169 				      uint8_t pdev_id)
13170 {
13171 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
13172 	int nss_cfg;
13173 	void *sojourn_buf;
13174 	QDF_STATUS ret;
13175 
13176 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
13177 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
13178 
13179 	soc_cfg_ctx = soc->wlan_cfg_ctx;
13180 	pdev->soc = soc;
13181 	pdev->pdev_id = pdev_id;
13182 
13183 	/*
13184 	 * Variable to prevent double pdev deinitialization during
13185 	 * radio detach execution .i.e. in the absence of any vdev.
13186 	 */
13187 	pdev->pdev_deinit = 0;
13188 
13189 	if (dp_wdi_event_attach(pdev)) {
13190 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
13191 			  "dp_wdi_evet_attach failed");
13192 		goto fail0;
13193 	}
13194 
13195 	if (dp_pdev_srng_init(pdev)) {
13196 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
13197 		goto fail1;
13198 	}
13199 
13200 	/* Initialize descriptors in TCL Rings used by IPA */
13201 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
13202 		hal_tx_init_data_ring(soc->hal_soc,
13203 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
13204 		dp_ipa_hal_tx_init_alt_data_ring(soc);
13205 	}
13206 
13207 	/*
13208 	 * Initialize command/credit ring descriptor
13209 	 * Command/CREDIT ring also used for sending DATA cmds
13210 	 */
13211 	if (soc->init_tcl_cmd_cred_ring)
13212 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
13213 					    soc->tcl_cmd_credit_ring.hal_srng);
13214 
13215 	dp_tx_pdev_init(pdev);
13216 	/*
13217 	 * Variable to prevent double pdev deinitialization during
13218 	 * radio detach execution .i.e. in the absence of any vdev.
13219 	 */
13220 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
13221 
13222 	if (!pdev->invalid_peer) {
13223 		dp_init_err("%pK: Invalid peer memory allocation failed", soc);
13224 		goto fail2;
13225 	}
13226 
13227 	/*
13228 	 * set nss pdev config based on soc config
13229 	 */
13230 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
13231 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
13232 					 (nss_cfg & (1 << pdev_id)));
13233 	pdev->target_pdev_id =
13234 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
13235 
13236 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
13237 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
13238 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
13239 	}
13240 
13241 	/* Reset the cpu ring map if radio is NSS offloaded */
13242 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
13243 		dp_soc_reset_cpu_ring_map(soc);
13244 		dp_soc_reset_intr_mask(soc);
13245 	}
13246 
13247 	TAILQ_INIT(&pdev->vdev_list);
13248 	qdf_spinlock_create(&pdev->vdev_list_lock);
13249 	pdev->vdev_count = 0;
13250 
13251 	qdf_spinlock_create(&pdev->tx_mutex);
13252 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
13253 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
13254 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
13255 
13256 	DP_STATS_INIT(pdev);
13257 
13258 	dp_local_peer_id_pool_init(pdev);
13259 
13260 	dp_dscp_tid_map_setup(pdev);
13261 	dp_pcp_tid_map_setup(pdev);
13262 
13263 	/* set the reo destination during initialization */
13264 	dp_pdev_set_default_reo(pdev);
13265 
13266 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
13267 
13268 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
13269 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
13270 			      TRUE);
13271 
13272 	if (!pdev->sojourn_buf) {
13273 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
13274 		goto fail3;
13275 	}
13276 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
13277 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
13278 
13279 	qdf_event_create(&pdev->fw_peer_stats_event);
13280 
13281 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13282 
13283 	if (dp_rxdma_ring_setup(soc, pdev)) {
13284 		dp_init_err("%pK: RXDMA ring config failed", soc);
13285 		goto fail4;
13286 	}
13287 
13288 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
13289 		goto fail5;
13290 
13291 	if (dp_ipa_ring_resource_setup(soc, pdev))
13292 		goto fail6;
13293 
13294 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
13295 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
13296 		goto fail6;
13297 	}
13298 
13299 	ret = dp_rx_fst_attach(soc, pdev);
13300 	if ((ret != QDF_STATUS_SUCCESS) &&
13301 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
13302 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
13303 			    soc, pdev_id, ret);
13304 		goto fail7;
13305 	}
13306 
13307 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
13308 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13309 			  FL("dp_pdev_bkp_stats_attach failed"));
13310 		goto fail8;
13311 	}
13312 
13313 	if (dp_monitor_pdev_init(pdev)) {
13314 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
13315 		goto fail9;
13316 	}
13317 
13318 	/* initialize sw rx descriptors */
13319 	dp_rx_pdev_desc_pool_init(pdev);
13320 	/* allocate buffers and replenish the RxDMA ring */
13321 	dp_rx_pdev_buffers_alloc(pdev);
13322 
13323 	dp_init_tso_stats(pdev);
13324 
13325 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13326 		qdf_dma_mem_stats_read(),
13327 		qdf_heap_mem_stats_read(),
13328 		qdf_skb_total_mem_stats_read());
13329 
13330 	return QDF_STATUS_SUCCESS;
13331 fail9:
13332 	dp_pdev_bkp_stats_detach(pdev);
13333 fail8:
13334 	dp_rx_fst_detach(soc, pdev);
13335 fail7:
13336 	dp_ipa_uc_detach(soc, pdev);
13337 fail6:
13338 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
13339 fail5:
13340 	dp_rxdma_ring_cleanup(soc, pdev);
13341 fail4:
13342 	qdf_nbuf_free(pdev->sojourn_buf);
13343 fail3:
13344 	qdf_spinlock_destroy(&pdev->tx_mutex);
13345 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
13346 	qdf_mem_free(pdev->invalid_peer);
13347 fail2:
13348 	dp_pdev_srng_deinit(pdev);
13349 fail1:
13350 	dp_wdi_event_detach(pdev);
13351 fail0:
13352 	return QDF_STATUS_E_FAILURE;
13353 }
13354 
13355 /*
13356  * dp_pdev_init_wifi3() - Init txrx pdev
13357  * @htc_handle: HTC handle for host-target interface
13358  * @qdf_osdev: QDF OS device
13359  * @force: Force deinit
13360  *
13361  * Return: QDF_STATUS
13362  */
13363 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
13364 				     HTC_HANDLE htc_handle,
13365 				     qdf_device_t qdf_osdev,
13366 				     uint8_t pdev_id)
13367 {
13368 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
13369 }
13370 
13371